Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   4 * Author: Joerg Roedel <jroedel@suse.de>
   5 */
   6
   7#define pr_fmt(fmt)    "iommu: " fmt
   8
   9#include <linux/amba/bus.h>
  10#include <linux/device.h>
  11#include <linux/kernel.h>
  12#include <linux/bits.h>
  13#include <linux/bug.h>
  14#include <linux/types.h>
  15#include <linux/init.h>
  16#include <linux/export.h>
  17#include <linux/slab.h>
  18#include <linux/errno.h>
  19#include <linux/host1x_context_bus.h>
  20#include <linux/iommu.h>
  21#include <linux/idr.h>
  22#include <linux/err.h>
  23#include <linux/pci.h>
  24#include <linux/pci-ats.h>
  25#include <linux/bitops.h>
  26#include <linux/platform_device.h>
  27#include <linux/property.h>
  28#include <linux/fsl/mc.h>
  29#include <linux/module.h>
  30#include <linux/cc_platform.h>
 
  31#include <trace/events/iommu.h>
  32#include <linux/sched/mm.h>
 
 
  33
  34#include "dma-iommu.h"
  35
  36#include "iommu-sva.h"
  37
  38static struct kset *iommu_group_kset;
  39static DEFINE_IDA(iommu_group_ida);
 
  40
  41static unsigned int iommu_def_domain_type __read_mostly;
  42static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
  43static u32 iommu_cmd_line __read_mostly;
  44
  45struct iommu_group {
  46	struct kobject kobj;
  47	struct kobject *devices_kobj;
  48	struct list_head devices;
  49	struct xarray pasid_array;
  50	struct mutex mutex;
  51	void *iommu_data;
  52	void (*iommu_data_release)(void *iommu_data);
  53	char *name;
  54	int id;
  55	struct iommu_domain *default_domain;
  56	struct iommu_domain *blocking_domain;
  57	struct iommu_domain *domain;
  58	struct list_head entry;
  59	unsigned int owner_cnt;
  60	void *owner;
  61};
  62
  63struct group_device {
  64	struct list_head list;
  65	struct device *dev;
  66	char *name;
  67};
  68
 
 
 
 
  69struct iommu_group_attribute {
  70	struct attribute attr;
  71	ssize_t (*show)(struct iommu_group *group, char *buf);
  72	ssize_t (*store)(struct iommu_group *group,
  73			 const char *buf, size_t count);
  74};
  75
  76static const char * const iommu_group_resv_type_string[] = {
  77	[IOMMU_RESV_DIRECT]			= "direct",
  78	[IOMMU_RESV_DIRECT_RELAXABLE]		= "direct-relaxable",
  79	[IOMMU_RESV_RESERVED]			= "reserved",
  80	[IOMMU_RESV_MSI]			= "msi",
  81	[IOMMU_RESV_SW_MSI]			= "msi",
  82};
  83
  84#define IOMMU_CMD_LINE_DMA_API		BIT(0)
  85#define IOMMU_CMD_LINE_STRICT		BIT(1)
  86
 
  87static int iommu_bus_notifier(struct notifier_block *nb,
  88			      unsigned long action, void *data);
  89static int iommu_alloc_default_domain(struct iommu_group *group,
  90				      struct device *dev);
  91static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
  92						 unsigned type);
  93static int __iommu_attach_device(struct iommu_domain *domain,
  94				 struct device *dev);
  95static int __iommu_attach_group(struct iommu_domain *domain,
  96				struct iommu_group *group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97static int __iommu_group_set_domain(struct iommu_group *group,
  98				    struct iommu_domain *new_domain);
  99static int iommu_create_device_direct_mappings(struct iommu_group *group,
 
 
 
 
 
 
 
 
 
 
 
 
 100					       struct device *dev);
 101static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
 102static ssize_t iommu_group_store_type(struct iommu_group *group,
 103				      const char *buf, size_t count);
 
 
 
 
 
 
 104
 105#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
 106struct iommu_group_attribute iommu_group_attr_##_name =		\
 107	__ATTR(_name, _mode, _show, _store)
 108
 109#define to_iommu_group_attr(_attr)	\
 110	container_of(_attr, struct iommu_group_attribute, attr)
 111#define to_iommu_group(_kobj)		\
 112	container_of(_kobj, struct iommu_group, kobj)
 113
 114static LIST_HEAD(iommu_device_list);
 115static DEFINE_SPINLOCK(iommu_device_lock);
 116
 117static struct bus_type * const iommu_buses[] = {
 118	&platform_bus_type,
 119#ifdef CONFIG_PCI
 120	&pci_bus_type,
 121#endif
 122#ifdef CONFIG_ARM_AMBA
 123	&amba_bustype,
 124#endif
 125#ifdef CONFIG_FSL_MC_BUS
 126	&fsl_mc_bus_type,
 127#endif
 128#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
 129	&host1x_context_device_bus_type,
 130#endif
 
 
 
 131};
 132
 133/*
 134 * Use a function instead of an array here because the domain-type is a
 135 * bit-field, so an array would waste memory.
 136 */
 137static const char *iommu_domain_type_str(unsigned int t)
 138{
 139	switch (t) {
 140	case IOMMU_DOMAIN_BLOCKED:
 141		return "Blocked";
 142	case IOMMU_DOMAIN_IDENTITY:
 143		return "Passthrough";
 144	case IOMMU_DOMAIN_UNMANAGED:
 145		return "Unmanaged";
 146	case IOMMU_DOMAIN_DMA:
 147	case IOMMU_DOMAIN_DMA_FQ:
 148		return "Translated";
 
 
 149	default:
 150		return "Unknown";
 151	}
 152}
 153
 154static int __init iommu_subsys_init(void)
 155{
 156	struct notifier_block *nb;
 157
 158	if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
 159		if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
 160			iommu_set_default_passthrough(false);
 161		else
 162			iommu_set_default_translated(false);
 163
 164		if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
 165			pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
 166			iommu_set_default_translated(false);
 167		}
 168	}
 169
 170	if (!iommu_default_passthrough() && !iommu_dma_strict)
 171		iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ;
 172
 173	pr_info("Default domain type: %s %s\n",
 174		iommu_domain_type_str(iommu_def_domain_type),
 175		(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
 176			"(set via kernel command line)" : "");
 177
 178	if (!iommu_default_passthrough())
 179		pr_info("DMA domain TLB invalidation policy: %s mode %s\n",
 180			iommu_dma_strict ? "strict" : "lazy",
 181			(iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
 182				"(set via kernel command line)" : "");
 183
 184	nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL);
 185	if (!nb)
 186		return -ENOMEM;
 187
 188	for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
 189		nb[i].notifier_call = iommu_bus_notifier;
 190		bus_register_notifier(iommu_buses[i], &nb[i]);
 191	}
 192
 193	return 0;
 194}
 195subsys_initcall(iommu_subsys_init);
 196
 197static int remove_iommu_group(struct device *dev, void *data)
 198{
 199	if (dev->iommu && dev->iommu->iommu_dev == data)
 200		iommu_release_device(dev);
 201
 202	return 0;
 203}
 204
 205/**
 206 * iommu_device_register() - Register an IOMMU hardware instance
 207 * @iommu: IOMMU handle for the instance
 208 * @ops:   IOMMU ops to associate with the instance
 209 * @hwdev: (optional) actual instance device, used for fwnode lookup
 210 *
 211 * Return: 0 on success, or an error.
 212 */
 213int iommu_device_register(struct iommu_device *iommu,
 214			  const struct iommu_ops *ops, struct device *hwdev)
 215{
 216	int err = 0;
 217
 218	/* We need to be able to take module references appropriately */
 219	if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
 220		return -EINVAL;
 221	/*
 222	 * Temporarily enforce global restriction to a single driver. This was
 223	 * already the de-facto behaviour, since any possible combination of
 224	 * existing drivers would compete for at least the PCI or platform bus.
 225	 */
 226	if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
 227		return -EBUSY;
 228
 229	iommu->ops = ops;
 230	if (hwdev)
 231		iommu->fwnode = dev_fwnode(hwdev);
 232
 233	spin_lock(&iommu_device_lock);
 234	list_add_tail(&iommu->list, &iommu_device_list);
 235	spin_unlock(&iommu_device_lock);
 236
 237	for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
 238		iommu_buses[i]->iommu_ops = ops;
 239		err = bus_iommu_probe(iommu_buses[i]);
 240	}
 241	if (err)
 242		iommu_device_unregister(iommu);
 243	return err;
 244}
 245EXPORT_SYMBOL_GPL(iommu_device_register);
 246
 247void iommu_device_unregister(struct iommu_device *iommu)
 248{
 249	for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
 250		bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
 251
 252	spin_lock(&iommu_device_lock);
 253	list_del(&iommu->list);
 254	spin_unlock(&iommu_device_lock);
 
 
 
 
 255}
 256EXPORT_SYMBOL_GPL(iommu_device_unregister);
 257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 258static struct dev_iommu *dev_iommu_get(struct device *dev)
 259{
 260	struct dev_iommu *param = dev->iommu;
 261
 
 
 262	if (param)
 263		return param;
 264
 265	param = kzalloc(sizeof(*param), GFP_KERNEL);
 266	if (!param)
 267		return NULL;
 268
 269	mutex_init(&param->lock);
 270	dev->iommu = param;
 271	return param;
 272}
 273
 274static void dev_iommu_free(struct device *dev)
 275{
 276	struct dev_iommu *param = dev->iommu;
 277
 278	dev->iommu = NULL;
 279	if (param->fwspec) {
 280		fwnode_handle_put(param->fwspec->iommu_fwnode);
 281		kfree(param->fwspec);
 282	}
 283	kfree(param);
 284}
 285
 
 
 
 
 
 
 
 
 
 286static u32 dev_iommu_get_max_pasids(struct device *dev)
 287{
 288	u32 max_pasids = 0, bits = 0;
 289	int ret;
 290
 291	if (dev_is_pci(dev)) {
 292		ret = pci_max_pasids(to_pci_dev(dev));
 293		if (ret > 0)
 294			max_pasids = ret;
 295	} else {
 296		ret = device_property_read_u32(dev, "pasid-num-bits", &bits);
 297		if (!ret)
 298			max_pasids = 1UL << bits;
 299	}
 300
 301	return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
 302}
 303
 304static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 305{
 306	const struct iommu_ops *ops = dev->bus->iommu_ops;
 307	struct iommu_device *iommu_dev;
 308	struct iommu_group *group;
 309	static DEFINE_MUTEX(iommu_probe_device_lock);
 310	int ret;
 311
 312	if (!ops)
 313		return -ENODEV;
 314	/*
 315	 * Serialise to avoid races between IOMMU drivers registering in
 316	 * parallel and/or the "replay" calls from ACPI/OF code via client
 317	 * driver probe. Once the latter have been cleaned up we should
 318	 * probably be able to use device_lock() here to minimise the scope,
 319	 * but for now enforcing a simple global ordering is fine.
 320	 */
 321	mutex_lock(&iommu_probe_device_lock);
 322	if (!dev_iommu_get(dev)) {
 323		ret = -ENOMEM;
 324		goto err_unlock;
 325	}
 326
 327	if (!try_module_get(ops->owner)) {
 328		ret = -EINVAL;
 329		goto err_free;
 330	}
 331
 332	iommu_dev = ops->probe_device(dev);
 333	if (IS_ERR(iommu_dev)) {
 334		ret = PTR_ERR(iommu_dev);
 335		goto out_module_put;
 336	}
 337
 338	dev->iommu->iommu_dev = iommu_dev;
 339	dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
 340
 341	group = iommu_group_get_for_dev(dev);
 
 
 
 
 
 
 342	if (IS_ERR(group)) {
 343		ret = PTR_ERR(group);
 344		goto out_release;
 345	}
 
 346
 347	mutex_lock(&group->mutex);
 348	if (group_list && !group->default_domain && list_empty(&group->entry))
 349		list_add_tail(&group->entry, group_list);
 350	mutex_unlock(&group->mutex);
 351	iommu_group_put(group);
 352
 353	mutex_unlock(&iommu_probe_device_lock);
 354	iommu_device_link(iommu_dev, dev);
 355
 356	return 0;
 357
 358out_release:
 
 
 359	if (ops->release_device)
 360		ops->release_device(dev);
 361
 362out_module_put:
 363	module_put(ops->owner);
 364
 365err_free:
 
 366	dev_iommu_free(dev);
 
 
 367
 368err_unlock:
 369	mutex_unlock(&iommu_probe_device_lock);
 
 
 370
 371	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 372}
 373
 374int iommu_probe_device(struct device *dev)
 
 
 375{
 376	const struct iommu_ops *ops;
 377	struct iommu_group *group;
 
 378	int ret;
 379
 380	ret = __iommu_probe_device(dev, NULL);
 381	if (ret)
 382		goto err_out;
 383
 384	group = iommu_group_get(dev);
 385	if (!group) {
 386		ret = -ENODEV;
 387		goto err_release;
 388	}
 389
 390	/*
 391	 * Try to allocate a default domain - needs support from the
 392	 * IOMMU driver. There are still some drivers which don't
 393	 * support default domains, so the return value is not yet
 394	 * checked.
 
 
 395	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 396	mutex_lock(&group->mutex);
 397	iommu_alloc_default_domain(group, dev);
 
 
 
 398
 399	/*
 400	 * If device joined an existing group which has been claimed, don't
 401	 * attach the default domain.
 402	 */
 403	if (group->default_domain && !group->owner) {
 404		ret = __iommu_attach_device(group->default_domain, dev);
 405		if (ret) {
 406			mutex_unlock(&group->mutex);
 407			iommu_group_put(group);
 408			goto err_release;
 409		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 410	}
 411
 412	iommu_create_device_direct_mappings(group, dev);
 
 
 
 
 
 413
 
 
 
 
 
 414	mutex_unlock(&group->mutex);
 415	iommu_group_put(group);
 416
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417	ops = dev_iommu_ops(dev);
 418	if (ops->probe_finalize)
 419		ops->probe_finalize(dev);
 420
 421	return 0;
 
 422
 423err_release:
 424	iommu_release_device(dev);
 
 
 425
 426err_out:
 427	return ret;
 
 
 428
 
 
 
 
 
 
 
 
 
 
 
 429}
 430
 431void iommu_release_device(struct device *dev)
 
 432{
 433	const struct iommu_ops *ops;
 
 434
 435	if (!dev->iommu)
 436		return;
 
 
 437
 438	iommu_device_unlink(dev->iommu->iommu_dev, dev);
 
 
 
 
 
 
 
 
 439
 440	ops = dev_iommu_ops(dev);
 441	if (ops->release_device)
 442		ops->release_device(dev);
 
 
 
 443
 444	iommu_group_remove_device(dev);
 445	module_put(ops->owner);
 446	dev_iommu_free(dev);
 
 
 
 
 
 
 
 447}
 448
 449static int __init iommu_set_def_domain_type(char *str)
 450{
 451	bool pt;
 452	int ret;
 453
 454	ret = kstrtobool(str, &pt);
 455	if (ret)
 456		return ret;
 457
 458	if (pt)
 459		iommu_set_default_passthrough(true);
 460	else
 461		iommu_set_default_translated(true);
 462
 463	return 0;
 464}
 465early_param("iommu.passthrough", iommu_set_def_domain_type);
 466
 467static int __init iommu_dma_setup(char *str)
 468{
 469	int ret = kstrtobool(str, &iommu_dma_strict);
 470
 471	if (!ret)
 472		iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
 473	return ret;
 474}
 475early_param("iommu.strict", iommu_dma_setup);
 476
 477void iommu_set_dma_strict(void)
 478{
 479	iommu_dma_strict = true;
 480	if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ)
 481		iommu_def_domain_type = IOMMU_DOMAIN_DMA;
 482}
 483
 484static ssize_t iommu_group_attr_show(struct kobject *kobj,
 485				     struct attribute *__attr, char *buf)
 486{
 487	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 488	struct iommu_group *group = to_iommu_group(kobj);
 489	ssize_t ret = -EIO;
 490
 491	if (attr->show)
 492		ret = attr->show(group, buf);
 493	return ret;
 494}
 495
 496static ssize_t iommu_group_attr_store(struct kobject *kobj,
 497				      struct attribute *__attr,
 498				      const char *buf, size_t count)
 499{
 500	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 501	struct iommu_group *group = to_iommu_group(kobj);
 502	ssize_t ret = -EIO;
 503
 504	if (attr->store)
 505		ret = attr->store(group, buf, count);
 506	return ret;
 507}
 508
 509static const struct sysfs_ops iommu_group_sysfs_ops = {
 510	.show = iommu_group_attr_show,
 511	.store = iommu_group_attr_store,
 512};
 513
 514static int iommu_group_create_file(struct iommu_group *group,
 515				   struct iommu_group_attribute *attr)
 516{
 517	return sysfs_create_file(&group->kobj, &attr->attr);
 518}
 519
 520static void iommu_group_remove_file(struct iommu_group *group,
 521				    struct iommu_group_attribute *attr)
 522{
 523	sysfs_remove_file(&group->kobj, &attr->attr);
 524}
 525
 526static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
 527{
 528	return sprintf(buf, "%s\n", group->name);
 529}
 530
 531/**
 532 * iommu_insert_resv_region - Insert a new region in the
 533 * list of reserved regions.
 534 * @new: new region to insert
 535 * @regions: list of regions
 536 *
 537 * Elements are sorted by start address and overlapping segments
 538 * of the same type are merged.
 539 */
 540static int iommu_insert_resv_region(struct iommu_resv_region *new,
 541				    struct list_head *regions)
 542{
 543	struct iommu_resv_region *iter, *tmp, *nr, *top;
 544	LIST_HEAD(stack);
 545
 546	nr = iommu_alloc_resv_region(new->start, new->length,
 547				     new->prot, new->type, GFP_KERNEL);
 548	if (!nr)
 549		return -ENOMEM;
 550
 551	/* First add the new element based on start address sorting */
 552	list_for_each_entry(iter, regions, list) {
 553		if (nr->start < iter->start ||
 554		    (nr->start == iter->start && nr->type <= iter->type))
 555			break;
 556	}
 557	list_add_tail(&nr->list, &iter->list);
 558
 559	/* Merge overlapping segments of type nr->type in @regions, if any */
 560	list_for_each_entry_safe(iter, tmp, regions, list) {
 561		phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
 562
 563		/* no merge needed on elements of different types than @new */
 564		if (iter->type != new->type) {
 565			list_move_tail(&iter->list, &stack);
 566			continue;
 567		}
 568
 569		/* look for the last stack element of same type as @iter */
 570		list_for_each_entry_reverse(top, &stack, list)
 571			if (top->type == iter->type)
 572				goto check_overlap;
 573
 574		list_move_tail(&iter->list, &stack);
 575		continue;
 576
 577check_overlap:
 578		top_end = top->start + top->length - 1;
 579
 580		if (iter->start > top_end + 1) {
 581			list_move_tail(&iter->list, &stack);
 582		} else {
 583			top->length = max(top_end, iter_end) - top->start + 1;
 584			list_del(&iter->list);
 585			kfree(iter);
 586		}
 587	}
 588	list_splice(&stack, regions);
 589	return 0;
 590}
 591
 592static int
 593iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
 594				 struct list_head *group_resv_regions)
 595{
 596	struct iommu_resv_region *entry;
 597	int ret = 0;
 598
 599	list_for_each_entry(entry, dev_resv_regions, list) {
 600		ret = iommu_insert_resv_region(entry, group_resv_regions);
 601		if (ret)
 602			break;
 603	}
 604	return ret;
 605}
 606
 607int iommu_get_group_resv_regions(struct iommu_group *group,
 608				 struct list_head *head)
 609{
 610	struct group_device *device;
 611	int ret = 0;
 612
 613	mutex_lock(&group->mutex);
 614	list_for_each_entry(device, &group->devices, list) {
 615		struct list_head dev_resv_regions;
 616
 617		/*
 618		 * Non-API groups still expose reserved_regions in sysfs,
 619		 * so filter out calls that get here that way.
 620		 */
 621		if (!device->dev->iommu)
 622			break;
 623
 624		INIT_LIST_HEAD(&dev_resv_regions);
 625		iommu_get_resv_regions(device->dev, &dev_resv_regions);
 626		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
 627		iommu_put_resv_regions(device->dev, &dev_resv_regions);
 628		if (ret)
 629			break;
 630	}
 631	mutex_unlock(&group->mutex);
 632	return ret;
 633}
 634EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
 635
 636static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
 637					     char *buf)
 638{
 639	struct iommu_resv_region *region, *next;
 640	struct list_head group_resv_regions;
 641	char *str = buf;
 642
 643	INIT_LIST_HEAD(&group_resv_regions);
 644	iommu_get_group_resv_regions(group, &group_resv_regions);
 645
 646	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
 647		str += sprintf(str, "0x%016llx 0x%016llx %s\n",
 648			       (long long int)region->start,
 649			       (long long int)(region->start +
 650						region->length - 1),
 651			       iommu_group_resv_type_string[region->type]);
 652		kfree(region);
 653	}
 654
 655	return (str - buf);
 656}
 657
 658static ssize_t iommu_group_show_type(struct iommu_group *group,
 659				     char *buf)
 660{
 661	char *type = "unknown\n";
 662
 663	mutex_lock(&group->mutex);
 664	if (group->default_domain) {
 665		switch (group->default_domain->type) {
 666		case IOMMU_DOMAIN_BLOCKED:
 667			type = "blocked\n";
 668			break;
 669		case IOMMU_DOMAIN_IDENTITY:
 670			type = "identity\n";
 671			break;
 672		case IOMMU_DOMAIN_UNMANAGED:
 673			type = "unmanaged\n";
 674			break;
 675		case IOMMU_DOMAIN_DMA:
 676			type = "DMA\n";
 677			break;
 678		case IOMMU_DOMAIN_DMA_FQ:
 679			type = "DMA-FQ\n";
 680			break;
 681		}
 682	}
 683	mutex_unlock(&group->mutex);
 684	strcpy(buf, type);
 685
 686	return strlen(type);
 687}
 688
 689static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 690
 691static IOMMU_GROUP_ATTR(reserved_regions, 0444,
 692			iommu_group_show_resv_regions, NULL);
 693
 694static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
 695			iommu_group_store_type);
 696
 697static void iommu_group_release(struct kobject *kobj)
 698{
 699	struct iommu_group *group = to_iommu_group(kobj);
 700
 701	pr_debug("Releasing group %d\n", group->id);
 702
 703	if (group->iommu_data_release)
 704		group->iommu_data_release(group->iommu_data);
 705
 706	ida_free(&iommu_group_ida, group->id);
 707
 708	if (group->default_domain)
 709		iommu_domain_free(group->default_domain);
 710	if (group->blocking_domain)
 711		iommu_domain_free(group->blocking_domain);
 712
 713	kfree(group->name);
 714	kfree(group);
 715}
 716
 717static struct kobj_type iommu_group_ktype = {
 718	.sysfs_ops = &iommu_group_sysfs_ops,
 719	.release = iommu_group_release,
 720};
 721
 722/**
 723 * iommu_group_alloc - Allocate a new group
 724 *
 725 * This function is called by an iommu driver to allocate a new iommu
 726 * group.  The iommu group represents the minimum granularity of the iommu.
 727 * Upon successful return, the caller holds a reference to the supplied
 728 * group in order to hold the group until devices are added.  Use
 729 * iommu_group_put() to release this extra reference count, allowing the
 730 * group to be automatically reclaimed once it has no devices or external
 731 * references.
 732 */
 733struct iommu_group *iommu_group_alloc(void)
 734{
 735	struct iommu_group *group;
 736	int ret;
 737
 738	group = kzalloc(sizeof(*group), GFP_KERNEL);
 739	if (!group)
 740		return ERR_PTR(-ENOMEM);
 741
 742	group->kobj.kset = iommu_group_kset;
 743	mutex_init(&group->mutex);
 744	INIT_LIST_HEAD(&group->devices);
 745	INIT_LIST_HEAD(&group->entry);
 746	xa_init(&group->pasid_array);
 747
 748	ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
 749	if (ret < 0) {
 750		kfree(group);
 751		return ERR_PTR(ret);
 752	}
 753	group->id = ret;
 754
 755	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
 756				   NULL, "%d", group->id);
 757	if (ret) {
 758		kobject_put(&group->kobj);
 759		return ERR_PTR(ret);
 760	}
 761
 762	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
 763	if (!group->devices_kobj) {
 764		kobject_put(&group->kobj); /* triggers .release & free */
 765		return ERR_PTR(-ENOMEM);
 766	}
 767
 768	/*
 769	 * The devices_kobj holds a reference on the group kobject, so
 770	 * as long as that exists so will the group.  We can therefore
 771	 * use the devices_kobj for reference counting.
 772	 */
 773	kobject_put(&group->kobj);
 774
 775	ret = iommu_group_create_file(group,
 776				      &iommu_group_attr_reserved_regions);
 777	if (ret)
 
 778		return ERR_PTR(ret);
 
 779
 780	ret = iommu_group_create_file(group, &iommu_group_attr_type);
 781	if (ret)
 
 782		return ERR_PTR(ret);
 
 783
 784	pr_debug("Allocated group %d\n", group->id);
 785
 786	return group;
 787}
 788EXPORT_SYMBOL_GPL(iommu_group_alloc);
 789
 790struct iommu_group *iommu_group_get_by_id(int id)
 791{
 792	struct kobject *group_kobj;
 793	struct iommu_group *group;
 794	const char *name;
 795
 796	if (!iommu_group_kset)
 797		return NULL;
 798
 799	name = kasprintf(GFP_KERNEL, "%d", id);
 800	if (!name)
 801		return NULL;
 802
 803	group_kobj = kset_find_obj(iommu_group_kset, name);
 804	kfree(name);
 805
 806	if (!group_kobj)
 807		return NULL;
 808
 809	group = container_of(group_kobj, struct iommu_group, kobj);
 810	BUG_ON(group->id != id);
 811
 812	kobject_get(group->devices_kobj);
 813	kobject_put(&group->kobj);
 814
 815	return group;
 816}
 817EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
 818
 819/**
 820 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 821 * @group: the group
 822 *
 823 * iommu drivers can store data in the group for use when doing iommu
 824 * operations.  This function provides a way to retrieve it.  Caller
 825 * should hold a group reference.
 826 */
 827void *iommu_group_get_iommudata(struct iommu_group *group)
 828{
 829	return group->iommu_data;
 830}
 831EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
 832
 833/**
 834 * iommu_group_set_iommudata - set iommu_data for a group
 835 * @group: the group
 836 * @iommu_data: new data
 837 * @release: release function for iommu_data
 838 *
 839 * iommu drivers can store data in the group for use when doing iommu
 840 * operations.  This function provides a way to set the data after
 841 * the group has been allocated.  Caller should hold a group reference.
 842 */
 843void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
 844			       void (*release)(void *iommu_data))
 845{
 846	group->iommu_data = iommu_data;
 847	group->iommu_data_release = release;
 848}
 849EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
 850
 851/**
 852 * iommu_group_set_name - set name for a group
 853 * @group: the group
 854 * @name: name
 855 *
 856 * Allow iommu driver to set a name for a group.  When set it will
 857 * appear in a name attribute file under the group in sysfs.
 858 */
 859int iommu_group_set_name(struct iommu_group *group, const char *name)
 860{
 861	int ret;
 862
 863	if (group->name) {
 864		iommu_group_remove_file(group, &iommu_group_attr_name);
 865		kfree(group->name);
 866		group->name = NULL;
 867		if (!name)
 868			return 0;
 869	}
 870
 871	group->name = kstrdup(name, GFP_KERNEL);
 872	if (!group->name)
 873		return -ENOMEM;
 874
 875	ret = iommu_group_create_file(group, &iommu_group_attr_name);
 876	if (ret) {
 877		kfree(group->name);
 878		group->name = NULL;
 879		return ret;
 880	}
 881
 882	return 0;
 883}
 884EXPORT_SYMBOL_GPL(iommu_group_set_name);
 885
 886static int iommu_create_device_direct_mappings(struct iommu_group *group,
 887					       struct device *dev)
 888{
 889	struct iommu_domain *domain = group->default_domain;
 890	struct iommu_resv_region *entry;
 891	struct list_head mappings;
 892	unsigned long pg_size;
 893	int ret = 0;
 894
 895	if (!domain || !iommu_is_dma_domain(domain))
 896		return 0;
 897
 898	BUG_ON(!domain->pgsize_bitmap);
 899
 900	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
 901	INIT_LIST_HEAD(&mappings);
 902
 
 
 
 903	iommu_get_resv_regions(dev, &mappings);
 904
 905	/* We need to consider overlapping regions for different devices */
 906	list_for_each_entry(entry, &mappings, list) {
 907		dma_addr_t start, end, addr;
 908		size_t map_size = 0;
 909
 910		start = ALIGN(entry->start, pg_size);
 911		end   = ALIGN(entry->start + entry->length, pg_size);
 912
 913		if (entry->type != IOMMU_RESV_DIRECT &&
 914		    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
 
 915			continue;
 916
 
 
 
 917		for (addr = start; addr <= end; addr += pg_size) {
 918			phys_addr_t phys_addr;
 919
 920			if (addr == end)
 921				goto map_end;
 922
 923			phys_addr = iommu_iova_to_phys(domain, addr);
 924			if (!phys_addr) {
 925				map_size += pg_size;
 926				continue;
 927			}
 928
 929map_end:
 930			if (map_size) {
 931				ret = iommu_map(domain, addr - map_size,
 932						addr - map_size, map_size,
 933						entry->prot);
 934				if (ret)
 935					goto out;
 936				map_size = 0;
 937			}
 938		}
 939
 940	}
 941
 942	iommu_flush_iotlb_all(domain);
 943
 944out:
 945	iommu_put_resv_regions(dev, &mappings);
 946
 947	return ret;
 948}
 949
 950static bool iommu_is_attach_deferred(struct device *dev)
 951{
 952	const struct iommu_ops *ops = dev_iommu_ops(dev);
 953
 954	if (ops->is_attach_deferred)
 955		return ops->is_attach_deferred(dev);
 956
 957	return false;
 958}
 959
 960/**
 961 * iommu_group_add_device - add a device to an iommu group
 962 * @group: the group into which to add the device (reference should be held)
 963 * @dev: the device
 964 *
 965 * This function is called by an iommu driver to add a device into a
 966 * group.  Adding a device increments the group reference count.
 967 */
 968int iommu_group_add_device(struct iommu_group *group, struct device *dev)
 969{
 970	int ret, i = 0;
 971	struct group_device *device;
 972
 973	device = kzalloc(sizeof(*device), GFP_KERNEL);
 974	if (!device)
 975		return -ENOMEM;
 976
 977	device->dev = dev;
 978
 979	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
 980	if (ret)
 981		goto err_free_device;
 982
 983	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
 984rename:
 985	if (!device->name) {
 986		ret = -ENOMEM;
 987		goto err_remove_link;
 988	}
 989
 990	ret = sysfs_create_link_nowarn(group->devices_kobj,
 991				       &dev->kobj, device->name);
 992	if (ret) {
 993		if (ret == -EEXIST && i >= 0) {
 994			/*
 995			 * Account for the slim chance of collision
 996			 * and append an instance to the name.
 997			 */
 998			kfree(device->name);
 999			device->name = kasprintf(GFP_KERNEL, "%s.%d",
1000						 kobject_name(&dev->kobj), i++);
1001			goto rename;
1002		}
1003		goto err_free_name;
1004	}
1005
1006	kobject_get(group->devices_kobj);
1007
1008	dev->iommu_group = group;
1009
1010	mutex_lock(&group->mutex);
1011	list_add_tail(&device->list, &group->devices);
1012	if (group->domain  && !iommu_is_attach_deferred(dev))
1013		ret = __iommu_attach_device(group->domain, dev);
1014	mutex_unlock(&group->mutex);
1015	if (ret)
1016		goto err_put_group;
1017
1018	trace_add_device_to_group(group->id, dev);
1019
1020	dev_info(dev, "Adding to iommu group %d\n", group->id);
1021
1022	return 0;
1023
1024err_put_group:
1025	mutex_lock(&group->mutex);
1026	list_del(&device->list);
1027	mutex_unlock(&group->mutex);
1028	dev->iommu_group = NULL;
1029	kobject_put(group->devices_kobj);
1030	sysfs_remove_link(group->devices_kobj, device->name);
1031err_free_name:
1032	kfree(device->name);
1033err_remove_link:
1034	sysfs_remove_link(&dev->kobj, "iommu_group");
1035err_free_device:
1036	kfree(device);
1037	dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
1038	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039}
1040EXPORT_SYMBOL_GPL(iommu_group_add_device);
1041
1042/**
1043 * iommu_group_remove_device - remove a device from it's current group
1044 * @dev: device to be removed
1045 *
1046 * This function is called by an iommu driver to remove the device from
1047 * it's current group.  This decrements the iommu group reference count.
1048 */
1049void iommu_group_remove_device(struct device *dev)
1050{
1051	struct iommu_group *group = dev->iommu_group;
1052	struct group_device *tmp_device, *device = NULL;
1053
1054	if (!group)
1055		return;
1056
1057	dev_info(dev, "Removing from iommu group %d\n", group->id);
1058
1059	mutex_lock(&group->mutex);
1060	list_for_each_entry(tmp_device, &group->devices, list) {
1061		if (tmp_device->dev == dev) {
1062			device = tmp_device;
1063			list_del(&device->list);
1064			break;
1065		}
1066	}
1067	mutex_unlock(&group->mutex);
1068
1069	if (!device)
1070		return;
1071
1072	sysfs_remove_link(group->devices_kobj, device->name);
1073	sysfs_remove_link(&dev->kobj, "iommu_group");
1074
1075	trace_remove_device_from_group(group->id, dev);
1076
1077	kfree(device->name);
1078	kfree(device);
1079	dev->iommu_group = NULL;
1080	kobject_put(group->devices_kobj);
1081}
1082EXPORT_SYMBOL_GPL(iommu_group_remove_device);
1083
1084static int iommu_group_device_count(struct iommu_group *group)
 
 
 
 
 
 
 
 
 
 
1085{
1086	struct group_device *entry;
1087	int ret = 0;
1088
1089	list_for_each_entry(entry, &group->devices, list)
1090		ret++;
1091
1092	return ret;
1093}
 
 
1094
1095static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
1096				      int (*fn)(struct device *, void *))
1097{
1098	struct group_device *device;
1099	int ret = 0;
1100
1101	list_for_each_entry(device, &group->devices, list) {
1102		ret = fn(device->dev, data);
1103		if (ret)
1104			break;
1105	}
1106	return ret;
1107}
1108
1109/**
1110 * iommu_group_for_each_dev - iterate over each device in the group
1111 * @group: the group
1112 * @data: caller opaque data to be passed to callback function
1113 * @fn: caller supplied callback function
1114 *
1115 * This function is called by group users to iterate over group devices.
1116 * Callers should hold a reference count to the group during callback.
1117 * The group->mutex is held across callbacks, which will block calls to
1118 * iommu_group_add/remove_device.
1119 */
1120int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1121			     int (*fn)(struct device *, void *))
1122{
1123	int ret;
 
1124
1125	mutex_lock(&group->mutex);
1126	ret = __iommu_group_for_each_dev(group, data, fn);
 
 
 
 
1127	mutex_unlock(&group->mutex);
1128
1129	return ret;
1130}
1131EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
1132
1133/**
1134 * iommu_group_get - Return the group for a device and increment reference
1135 * @dev: get the group that this device belongs to
1136 *
1137 * This function is called by iommu drivers and users to get the group
1138 * for the specified device.  If found, the group is returned and the group
1139 * reference in incremented, else NULL.
1140 */
1141struct iommu_group *iommu_group_get(struct device *dev)
1142{
1143	struct iommu_group *group = dev->iommu_group;
1144
1145	if (group)
1146		kobject_get(group->devices_kobj);
1147
1148	return group;
1149}
1150EXPORT_SYMBOL_GPL(iommu_group_get);
1151
1152/**
1153 * iommu_group_ref_get - Increment reference on a group
1154 * @group: the group to use, must not be NULL
1155 *
1156 * This function is called by iommu drivers to take additional references on an
1157 * existing group.  Returns the given group for convenience.
1158 */
1159struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1160{
1161	kobject_get(group->devices_kobj);
1162	return group;
1163}
1164EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1165
1166/**
1167 * iommu_group_put - Decrement group reference
1168 * @group: the group to use
1169 *
1170 * This function is called by iommu drivers and users to release the
1171 * iommu group.  Once the reference count is zero, the group is released.
1172 */
1173void iommu_group_put(struct iommu_group *group)
1174{
1175	if (group)
1176		kobject_put(group->devices_kobj);
1177}
1178EXPORT_SYMBOL_GPL(iommu_group_put);
1179
1180/**
1181 * iommu_register_device_fault_handler() - Register a device fault handler
1182 * @dev: the device
1183 * @handler: the fault handler
1184 * @data: private data passed as argument to the handler
1185 *
1186 * When an IOMMU fault event is received, this handler gets called with the
1187 * fault event and data as argument. The handler should return 0 on success. If
1188 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1189 * complete the fault by calling iommu_page_response() with one of the following
1190 * response code:
1191 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1192 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1193 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1194 *   page faults if possible.
1195 *
1196 * Return 0 if the fault handler was installed successfully, or an error.
1197 */
1198int iommu_register_device_fault_handler(struct device *dev,
1199					iommu_dev_fault_handler_t handler,
1200					void *data)
1201{
1202	struct dev_iommu *param = dev->iommu;
1203	int ret = 0;
1204
1205	if (!param)
1206		return -EINVAL;
1207
1208	mutex_lock(&param->lock);
1209	/* Only allow one fault handler registered for each device */
1210	if (param->fault_param) {
1211		ret = -EBUSY;
1212		goto done_unlock;
1213	}
1214
1215	get_device(dev);
1216	param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1217	if (!param->fault_param) {
1218		put_device(dev);
1219		ret = -ENOMEM;
1220		goto done_unlock;
1221	}
1222	param->fault_param->handler = handler;
1223	param->fault_param->data = data;
1224	mutex_init(&param->fault_param->lock);
1225	INIT_LIST_HEAD(&param->fault_param->faults);
1226
1227done_unlock:
1228	mutex_unlock(&param->lock);
1229
1230	return ret;
1231}
1232EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1233
1234/**
1235 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1236 * @dev: the device
1237 *
1238 * Remove the device fault handler installed with
1239 * iommu_register_device_fault_handler().
1240 *
1241 * Return 0 on success, or an error.
1242 */
1243int iommu_unregister_device_fault_handler(struct device *dev)
1244{
1245	struct dev_iommu *param = dev->iommu;
1246	int ret = 0;
1247
1248	if (!param)
1249		return -EINVAL;
1250
1251	mutex_lock(&param->lock);
1252
1253	if (!param->fault_param)
1254		goto unlock;
1255
1256	/* we cannot unregister handler if there are pending faults */
1257	if (!list_empty(&param->fault_param->faults)) {
1258		ret = -EBUSY;
1259		goto unlock;
1260	}
1261
1262	kfree(param->fault_param);
1263	param->fault_param = NULL;
1264	put_device(dev);
1265unlock:
1266	mutex_unlock(&param->lock);
1267
1268	return ret;
1269}
1270EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1271
1272/**
1273 * iommu_report_device_fault() - Report fault event to device driver
1274 * @dev: the device
1275 * @evt: fault event data
1276 *
1277 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1278 * handler. When this function fails and the fault is recoverable, it is the
1279 * caller's responsibility to complete the fault.
1280 *
1281 * Return 0 on success, or an error.
1282 */
1283int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1284{
1285	struct dev_iommu *param = dev->iommu;
1286	struct iommu_fault_event *evt_pending = NULL;
1287	struct iommu_fault_param *fparam;
1288	int ret = 0;
1289
1290	if (!param || !evt)
1291		return -EINVAL;
1292
1293	/* we only report device fault if there is a handler registered */
1294	mutex_lock(&param->lock);
1295	fparam = param->fault_param;
1296	if (!fparam || !fparam->handler) {
1297		ret = -EINVAL;
1298		goto done_unlock;
1299	}
1300
1301	if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1302	    (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1303		evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1304				      GFP_KERNEL);
1305		if (!evt_pending) {
1306			ret = -ENOMEM;
1307			goto done_unlock;
1308		}
1309		mutex_lock(&fparam->lock);
1310		list_add_tail(&evt_pending->list, &fparam->faults);
1311		mutex_unlock(&fparam->lock);
1312	}
1313
1314	ret = fparam->handler(&evt->fault, fparam->data);
1315	if (ret && evt_pending) {
1316		mutex_lock(&fparam->lock);
1317		list_del(&evt_pending->list);
1318		mutex_unlock(&fparam->lock);
1319		kfree(evt_pending);
1320	}
1321done_unlock:
1322	mutex_unlock(&param->lock);
1323	return ret;
1324}
1325EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1326
1327int iommu_page_response(struct device *dev,
1328			struct iommu_page_response *msg)
1329{
1330	bool needs_pasid;
1331	int ret = -EINVAL;
1332	struct iommu_fault_event *evt;
1333	struct iommu_fault_page_request *prm;
1334	struct dev_iommu *param = dev->iommu;
1335	const struct iommu_ops *ops = dev_iommu_ops(dev);
1336	bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1337
1338	if (!ops->page_response)
1339		return -ENODEV;
1340
1341	if (!param || !param->fault_param)
1342		return -EINVAL;
1343
1344	if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1345	    msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1346		return -EINVAL;
1347
1348	/* Only send response if there is a fault report pending */
1349	mutex_lock(&param->fault_param->lock);
1350	if (list_empty(&param->fault_param->faults)) {
1351		dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1352		goto done_unlock;
1353	}
1354	/*
1355	 * Check if we have a matching page request pending to respond,
1356	 * otherwise return -EINVAL
1357	 */
1358	list_for_each_entry(evt, &param->fault_param->faults, list) {
1359		prm = &evt->fault.prm;
1360		if (prm->grpid != msg->grpid)
1361			continue;
1362
1363		/*
1364		 * If the PASID is required, the corresponding request is
1365		 * matched using the group ID, the PASID valid bit and the PASID
1366		 * value. Otherwise only the group ID matches request and
1367		 * response.
1368		 */
1369		needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
1370		if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1371			continue;
1372
1373		if (!needs_pasid && has_pasid) {
1374			/* No big deal, just clear it. */
1375			msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
1376			msg->pasid = 0;
1377		}
1378
1379		ret = ops->page_response(dev, evt, msg);
1380		list_del(&evt->list);
1381		kfree(evt);
1382		break;
1383	}
1384
1385done_unlock:
1386	mutex_unlock(&param->fault_param->lock);
1387	return ret;
1388}
1389EXPORT_SYMBOL_GPL(iommu_page_response);
1390
1391/**
1392 * iommu_group_id - Return ID for a group
1393 * @group: the group to ID
1394 *
1395 * Return the unique ID for the group matching the sysfs group number.
1396 */
1397int iommu_group_id(struct iommu_group *group)
1398{
1399	return group->id;
1400}
1401EXPORT_SYMBOL_GPL(iommu_group_id);
1402
1403static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1404					       unsigned long *devfns);
1405
1406/*
1407 * To consider a PCI device isolated, we require ACS to support Source
1408 * Validation, Request Redirection, Completer Redirection, and Upstream
1409 * Forwarding.  This effectively means that devices cannot spoof their
1410 * requester ID, requests and completions cannot be redirected, and all
1411 * transactions are forwarded upstream, even as it passes through a
1412 * bridge where the target device is downstream.
1413 */
1414#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1415
1416/*
1417 * For multifunction devices which are not isolated from each other, find
1418 * all the other non-isolated functions and look for existing groups.  For
1419 * each function, we also need to look for aliases to or from other devices
1420 * that may already have a group.
1421 */
1422static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1423							unsigned long *devfns)
1424{
1425	struct pci_dev *tmp = NULL;
1426	struct iommu_group *group;
1427
1428	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1429		return NULL;
1430
1431	for_each_pci_dev(tmp) {
1432		if (tmp == pdev || tmp->bus != pdev->bus ||
1433		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1434		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1435			continue;
1436
1437		group = get_pci_alias_group(tmp, devfns);
1438		if (group) {
1439			pci_dev_put(tmp);
1440			return group;
1441		}
1442	}
1443
1444	return NULL;
1445}
1446
1447/*
1448 * Look for aliases to or from the given device for existing groups. DMA
1449 * aliases are only supported on the same bus, therefore the search
1450 * space is quite small (especially since we're really only looking at pcie
1451 * device, and therefore only expect multiple slots on the root complex or
1452 * downstream switch ports).  It's conceivable though that a pair of
1453 * multifunction devices could have aliases between them that would cause a
1454 * loop.  To prevent this, we use a bitmap to track where we've been.
1455 */
1456static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1457					       unsigned long *devfns)
1458{
1459	struct pci_dev *tmp = NULL;
1460	struct iommu_group *group;
1461
1462	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1463		return NULL;
1464
1465	group = iommu_group_get(&pdev->dev);
1466	if (group)
1467		return group;
1468
1469	for_each_pci_dev(tmp) {
1470		if (tmp == pdev || tmp->bus != pdev->bus)
1471			continue;
1472
1473		/* We alias them or they alias us */
1474		if (pci_devs_are_dma_aliases(pdev, tmp)) {
1475			group = get_pci_alias_group(tmp, devfns);
1476			if (group) {
1477				pci_dev_put(tmp);
1478				return group;
1479			}
1480
1481			group = get_pci_function_alias_group(tmp, devfns);
1482			if (group) {
1483				pci_dev_put(tmp);
1484				return group;
1485			}
1486		}
1487	}
1488
1489	return NULL;
1490}
1491
1492struct group_for_pci_data {
1493	struct pci_dev *pdev;
1494	struct iommu_group *group;
1495};
1496
1497/*
1498 * DMA alias iterator callback, return the last seen device.  Stop and return
1499 * the IOMMU group if we find one along the way.
1500 */
1501static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1502{
1503	struct group_for_pci_data *data = opaque;
1504
1505	data->pdev = pdev;
1506	data->group = iommu_group_get(&pdev->dev);
1507
1508	return data->group != NULL;
1509}
1510
1511/*
1512 * Generic device_group call-back function. It just allocates one
1513 * iommu-group per device.
1514 */
1515struct iommu_group *generic_device_group(struct device *dev)
1516{
1517	return iommu_group_alloc();
1518}
1519EXPORT_SYMBOL_GPL(generic_device_group);
1520
1521/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1522 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1523 * to find or create an IOMMU group for a device.
1524 */
1525struct iommu_group *pci_device_group(struct device *dev)
1526{
1527	struct pci_dev *pdev = to_pci_dev(dev);
1528	struct group_for_pci_data data;
1529	struct pci_bus *bus;
1530	struct iommu_group *group = NULL;
1531	u64 devfns[4] = { 0 };
1532
1533	if (WARN_ON(!dev_is_pci(dev)))
1534		return ERR_PTR(-EINVAL);
1535
1536	/*
1537	 * Find the upstream DMA alias for the device.  A device must not
1538	 * be aliased due to topology in order to have its own IOMMU group.
1539	 * If we find an alias along the way that already belongs to a
1540	 * group, use it.
1541	 */
1542	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1543		return data.group;
1544
1545	pdev = data.pdev;
1546
1547	/*
1548	 * Continue upstream from the point of minimum IOMMU granularity
1549	 * due to aliases to the point where devices are protected from
1550	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1551	 * group, use it.
1552	 */
1553	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1554		if (!bus->self)
1555			continue;
1556
1557		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1558			break;
1559
1560		pdev = bus->self;
1561
1562		group = iommu_group_get(&pdev->dev);
1563		if (group)
1564			return group;
1565	}
1566
1567	/*
1568	 * Look for existing groups on device aliases.  If we alias another
1569	 * device or another device aliases us, use the same group.
1570	 */
1571	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1572	if (group)
1573		return group;
1574
1575	/*
1576	 * Look for existing groups on non-isolated functions on the same
1577	 * slot and aliases of those funcions, if any.  No need to clear
1578	 * the search bitmap, the tested devfns are still valid.
1579	 */
1580	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1581	if (group)
1582		return group;
1583
1584	/* No shared group found, allocate new */
1585	return iommu_group_alloc();
1586}
1587EXPORT_SYMBOL_GPL(pci_device_group);
1588
1589/* Get the IOMMU group for device on fsl-mc bus */
1590struct iommu_group *fsl_mc_device_group(struct device *dev)
1591{
1592	struct device *cont_dev = fsl_mc_cont_dev(dev);
1593	struct iommu_group *group;
1594
1595	group = iommu_group_get(cont_dev);
1596	if (!group)
1597		group = iommu_group_alloc();
1598	return group;
1599}
1600EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1601
1602static int iommu_get_def_domain_type(struct device *dev)
1603{
1604	const struct iommu_ops *ops = dev_iommu_ops(dev);
 
1605
1606	if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
1607		return IOMMU_DOMAIN_DMA;
1608
1609	if (ops->def_domain_type)
1610		return ops->def_domain_type(dev);
 
 
 
 
 
 
 
1611
1612	return 0;
 
1613}
1614
1615static int iommu_group_alloc_default_domain(struct bus_type *bus,
1616					    struct iommu_group *group,
1617					    unsigned int type)
1618{
 
1619	struct iommu_domain *dom;
1620
1621	dom = __iommu_domain_alloc(bus, type);
1622	if (!dom && type != IOMMU_DOMAIN_DMA) {
1623		dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1624		if (dom)
1625			pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1626				type, group->name);
1627	}
1628
1629	if (!dom)
1630		return -ENOMEM;
1631
1632	group->default_domain = dom;
1633	if (!group->domain)
1634		group->domain = dom;
1635	return 0;
1636}
 
 
1637
1638static int iommu_alloc_default_domain(struct iommu_group *group,
1639				      struct device *dev)
1640{
1641	unsigned int type;
 
 
1642
1643	if (group->default_domain)
1644		return 0;
1645
1646	type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type;
 
1647
1648	return iommu_group_alloc_default_domain(dev->bus, group, type);
1649}
1650
1651/**
1652 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1653 * @dev: target device
1654 *
1655 * This function is intended to be called by IOMMU drivers and extended to
1656 * support common, bus-defined algorithms when determining or creating the
1657 * IOMMU group for a device.  On success, the caller will hold a reference
1658 * to the returned IOMMU group, which will already include the provided
1659 * device.  The reference should be released with iommu_group_put().
1660 */
1661static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 
1662{
1663	const struct iommu_ops *ops = dev_iommu_ops(dev);
1664	struct iommu_group *group;
1665	int ret;
1666
1667	group = iommu_group_get(dev);
1668	if (group)
1669		return group;
1670
1671	group = ops->device_group(dev);
1672	if (WARN_ON_ONCE(group == NULL))
1673		return ERR_PTR(-EINVAL);
1674
1675	if (IS_ERR(group))
1676		return group;
1677
1678	ret = iommu_group_add_device(group, dev);
1679	if (ret)
1680		goto out_put_group;
 
 
 
 
 
 
 
1681
1682	return group;
 
1683
1684out_put_group:
1685	iommu_group_put(group);
 
 
1686
1687	return ERR_PTR(ret);
 
 
 
 
 
 
 
 
 
1688}
1689
1690struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1691{
1692	return group->default_domain;
1693}
1694
1695static int probe_iommu_group(struct device *dev, void *data)
1696{
1697	struct list_head *group_list = data;
1698	struct iommu_group *group;
1699	int ret;
1700
1701	/* Device is probed already if in a group */
1702	group = iommu_group_get(dev);
1703	if (group) {
1704		iommu_group_put(group);
1705		return 0;
1706	}
1707
1708	ret = __iommu_probe_device(dev, group_list);
 
1709	if (ret == -ENODEV)
1710		ret = 0;
1711
1712	return ret;
1713}
1714
1715static int iommu_bus_notifier(struct notifier_block *nb,
1716			      unsigned long action, void *data)
1717{
1718	struct device *dev = data;
1719
1720	if (action == BUS_NOTIFY_ADD_DEVICE) {
1721		int ret;
1722
1723		ret = iommu_probe_device(dev);
1724		return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1725	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1726		iommu_release_device(dev);
1727		return NOTIFY_OK;
1728	}
1729
1730	return 0;
1731}
1732
1733struct __group_domain_type {
1734	struct device *dev;
1735	unsigned int type;
1736};
1737
1738static int probe_get_default_domain_type(struct device *dev, void *data)
1739{
1740	struct __group_domain_type *gtype = data;
1741	unsigned int type = iommu_get_def_domain_type(dev);
1742
1743	if (type) {
1744		if (gtype->type && gtype->type != type) {
1745			dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1746				 iommu_domain_type_str(type),
1747				 dev_name(gtype->dev),
1748				 iommu_domain_type_str(gtype->type));
1749			gtype->type = 0;
1750		}
1751
1752		if (!gtype->dev) {
1753			gtype->dev  = dev;
1754			gtype->type = type;
1755		}
 
 
 
 
 
 
 
1756	}
1757
1758	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1759}
1760
1761static void probe_alloc_default_domain(struct bus_type *bus,
1762				       struct iommu_group *group)
 
 
 
 
1763{
1764	struct __group_domain_type gtype;
1765
1766	memset(&gtype, 0, sizeof(gtype));
1767
1768	/* Ask for default domain requirements of all devices in the group */
1769	__iommu_group_for_each_dev(group, &gtype,
1770				   probe_get_default_domain_type);
1771
1772	if (!gtype.type)
1773		gtype.type = iommu_def_domain_type;
1774
1775	iommu_group_alloc_default_domain(bus, group, gtype.type);
1776
1777}
 
 
 
 
 
 
 
 
1778
1779static int iommu_group_do_dma_attach(struct device *dev, void *data)
1780{
1781	struct iommu_domain *domain = data;
1782	int ret = 0;
1783
1784	if (!iommu_is_attach_deferred(dev))
1785		ret = __iommu_attach_device(domain, dev);
 
 
 
 
 
 
 
 
1786
1787	return ret;
1788}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1789
1790static int __iommu_group_dma_attach(struct iommu_group *group)
1791{
1792	return __iommu_group_for_each_dev(group, group->default_domain,
1793					  iommu_group_do_dma_attach);
 
 
1794}
1795
1796static int iommu_group_do_probe_finalize(struct device *dev, void *data)
1797{
1798	const struct iommu_ops *ops = dev_iommu_ops(dev);
1799
1800	if (ops->probe_finalize)
1801		ops->probe_finalize(dev);
1802
1803	return 0;
1804}
1805
1806static void __iommu_group_dma_finalize(struct iommu_group *group)
1807{
1808	__iommu_group_for_each_dev(group, group->default_domain,
1809				   iommu_group_do_probe_finalize);
1810}
1811
1812static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1813{
1814	struct iommu_group *group = data;
1815
1816	iommu_create_device_direct_mappings(group, dev);
1817
1818	return 0;
1819}
1820
1821static int iommu_group_create_direct_mappings(struct iommu_group *group)
1822{
1823	return __iommu_group_for_each_dev(group, group,
1824					  iommu_do_create_direct_mappings);
1825}
1826
1827int bus_iommu_probe(struct bus_type *bus)
1828{
1829	struct iommu_group *group, *next;
1830	LIST_HEAD(group_list);
1831	int ret;
1832
1833	/*
1834	 * This code-path does not allocate the default domain when
1835	 * creating the iommu group, so do it after the groups are
1836	 * created.
1837	 */
1838	ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1839	if (ret)
1840		return ret;
1841
1842	list_for_each_entry_safe(group, next, &group_list, entry) {
 
 
1843		mutex_lock(&group->mutex);
1844
1845		/* Remove item from the list */
1846		list_del_init(&group->entry);
1847
1848		/* Try to allocate default domain */
1849		probe_alloc_default_domain(bus, group);
1850
1851		if (!group->default_domain) {
 
 
 
1852			mutex_unlock(&group->mutex);
1853			continue;
1854		}
1855
1856		iommu_group_create_direct_mappings(group);
1857
1858		ret = __iommu_group_dma_attach(group);
1859
1860		mutex_unlock(&group->mutex);
1861
1862		if (ret)
1863			break;
1864
1865		__iommu_group_dma_finalize(group);
 
 
 
 
1866	}
1867
1868	return ret;
1869}
1870
1871bool iommu_present(struct bus_type *bus)
1872{
1873	return bus->iommu_ops != NULL;
1874}
1875EXPORT_SYMBOL_GPL(iommu_present);
1876
1877/**
1878 * device_iommu_capable() - check for a general IOMMU capability
1879 * @dev: device to which the capability would be relevant, if available
1880 * @cap: IOMMU capability
1881 *
1882 * Return: true if an IOMMU is present and supports the given capability
1883 * for the given device, otherwise false.
1884 */
1885bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1886{
1887	const struct iommu_ops *ops;
1888
1889	if (!dev->iommu || !dev->iommu->iommu_dev)
1890		return false;
1891
1892	ops = dev_iommu_ops(dev);
1893	if (!ops->capable)
1894		return false;
1895
1896	return ops->capable(dev, cap);
1897}
1898EXPORT_SYMBOL_GPL(device_iommu_capable);
1899
1900/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1901 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1902 * @domain: iommu domain
1903 * @handler: fault handler
1904 * @token: user data, will be passed back to the fault handler
1905 *
1906 * This function should be used by IOMMU users which want to be notified
1907 * whenever an IOMMU fault happens.
1908 *
1909 * The fault handler itself should return 0 on success, and an appropriate
1910 * error code otherwise.
1911 */
1912void iommu_set_fault_handler(struct iommu_domain *domain,
1913					iommu_fault_handler_t handler,
1914					void *token)
1915{
1916	BUG_ON(!domain);
1917
1918	domain->handler = handler;
1919	domain->handler_token = token;
1920}
1921EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1922
1923static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1924						 unsigned type)
1925{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1926	struct iommu_domain *domain;
1927
1928	if (bus == NULL || bus->iommu_ops == NULL)
1929		return NULL;
1930
1931	domain = bus->iommu_ops->domain_alloc(type);
1932	if (!domain)
1933		return NULL;
1934
1935	domain->type = type;
1936	/* Assume all sizes by default; the driver may override this later */
1937	domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1938	if (!domain->ops)
1939		domain->ops = bus->iommu_ops->default_domain_ops;
 
 
 
1940
1941	if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
1942		iommu_domain_free(domain);
1943		domain = NULL;
1944	}
 
 
1945	return domain;
1946}
1947
1948struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
 
 
 
 
 
 
 
 
 
1949{
1950	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
 
1951}
1952EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1953
1954void iommu_domain_free(struct iommu_domain *domain)
1955{
1956	if (domain->type == IOMMU_DOMAIN_SVA)
1957		mmdrop(domain->mm);
1958	iommu_put_dma_cookie(domain);
1959	domain->ops->free(domain);
 
1960}
1961EXPORT_SYMBOL_GPL(iommu_domain_free);
1962
1963/*
1964 * Put the group's domain back to the appropriate core-owned domain - either the
1965 * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
1966 */
1967static void __iommu_group_set_core_domain(struct iommu_group *group)
1968{
1969	struct iommu_domain *new_domain;
1970	int ret;
1971
1972	if (group->owner)
1973		new_domain = group->blocking_domain;
1974	else
1975		new_domain = group->default_domain;
1976
1977	ret = __iommu_group_set_domain(group, new_domain);
1978	WARN(ret, "iommu driver failed to attach the default/blocking domain");
1979}
1980
1981static int __iommu_attach_device(struct iommu_domain *domain,
1982				 struct device *dev)
1983{
1984	int ret;
1985
1986	if (unlikely(domain->ops->attach_dev == NULL))
1987		return -ENODEV;
1988
1989	ret = domain->ops->attach_dev(domain, dev);
1990	if (!ret)
1991		trace_attach_device_to_domain(dev);
1992	return ret;
 
 
1993}
1994
1995/**
1996 * iommu_attach_device - Attach an IOMMU domain to a device
1997 * @domain: IOMMU domain to attach
1998 * @dev: Device that will be attached
1999 *
2000 * Returns 0 on success and error code on failure
2001 *
2002 * Note that EINVAL can be treated as a soft failure, indicating
2003 * that certain configuration of the domain is incompatible with
2004 * the device. In this case attaching a different domain to the
2005 * device may succeed.
2006 */
2007int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
2008{
2009	struct iommu_group *group;
 
2010	int ret;
2011
2012	group = iommu_group_get(dev);
2013	if (!group)
2014		return -ENODEV;
2015
2016	/*
2017	 * Lock the group to make sure the device-count doesn't
2018	 * change while we are attaching
2019	 */
2020	mutex_lock(&group->mutex);
2021	ret = -EINVAL;
2022	if (iommu_group_device_count(group) != 1)
2023		goto out_unlock;
2024
2025	ret = __iommu_attach_group(domain, group);
2026
2027out_unlock:
2028	mutex_unlock(&group->mutex);
2029	iommu_group_put(group);
2030
2031	return ret;
2032}
2033EXPORT_SYMBOL_GPL(iommu_attach_device);
2034
2035int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
2036{
2037	if (iommu_is_attach_deferred(dev))
2038		return __iommu_attach_device(domain, dev);
2039
2040	return 0;
2041}
2042
2043static void __iommu_detach_device(struct iommu_domain *domain,
2044				  struct device *dev)
2045{
2046	if (iommu_is_attach_deferred(dev))
2047		return;
2048
2049	domain->ops->detach_dev(domain, dev);
2050	trace_detach_device_from_domain(dev);
2051}
2052
2053void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2054{
2055	struct iommu_group *group;
 
2056
2057	group = iommu_group_get(dev);
2058	if (!group)
2059		return;
2060
2061	mutex_lock(&group->mutex);
2062	if (WARN_ON(domain != group->domain) ||
2063	    WARN_ON(iommu_group_device_count(group) != 1))
2064		goto out_unlock;
2065	__iommu_group_set_core_domain(group);
2066
2067out_unlock:
2068	mutex_unlock(&group->mutex);
2069	iommu_group_put(group);
2070}
2071EXPORT_SYMBOL_GPL(iommu_detach_device);
2072
2073struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2074{
2075	struct iommu_domain *domain;
2076	struct iommu_group *group;
2077
2078	group = iommu_group_get(dev);
2079	if (!group)
2080		return NULL;
2081
2082	domain = group->domain;
2083
2084	iommu_group_put(group);
2085
2086	return domain;
2087}
2088EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2089
2090/*
2091 * For IOMMU_DOMAIN_DMA implementations which already provide their own
2092 * guarantees that the group and its default domain are valid and correct.
2093 */
2094struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2095{
2096	return dev->iommu_group->default_domain;
2097}
2098
2099/*
2100 * IOMMU groups are really the natural working unit of the IOMMU, but
2101 * the IOMMU API works on domains and devices.  Bridge that gap by
2102 * iterating over the devices in a group.  Ideally we'd have a single
2103 * device which represents the requestor ID of the group, but we also
2104 * allow IOMMU drivers to create policy defined minimum sets, where
2105 * the physical hardware may be able to distiguish members, but we
2106 * wish to group them at a higher level (ex. untrusted multi-function
2107 * PCI devices).  Thus we attach each device.
2108 */
2109static int iommu_group_do_attach_device(struct device *dev, void *data)
2110{
2111	struct iommu_domain *domain = data;
2112
2113	return __iommu_attach_device(domain, dev);
2114}
2115
2116static int __iommu_attach_group(struct iommu_domain *domain,
2117				struct iommu_group *group)
2118{
2119	int ret;
2120
2121	if (group->domain && group->domain != group->default_domain &&
2122	    group->domain != group->blocking_domain)
2123		return -EBUSY;
2124
2125	ret = __iommu_group_for_each_dev(group, domain,
2126					 iommu_group_do_attach_device);
2127	if (ret == 0)
2128		group->domain = domain;
2129
2130	return ret;
2131}
2132
2133/**
2134 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
2135 * @domain: IOMMU domain to attach
2136 * @group: IOMMU group that will be attached
2137 *
2138 * Returns 0 on success and error code on failure
2139 *
2140 * Note that EINVAL can be treated as a soft failure, indicating
2141 * that certain configuration of the domain is incompatible with
2142 * the group. In this case attaching a different domain to the
2143 * group may succeed.
2144 */
2145int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2146{
2147	int ret;
2148
2149	mutex_lock(&group->mutex);
2150	ret = __iommu_attach_group(domain, group);
2151	mutex_unlock(&group->mutex);
2152
2153	return ret;
2154}
2155EXPORT_SYMBOL_GPL(iommu_attach_group);
2156
2157static int iommu_group_do_detach_device(struct device *dev, void *data)
 
 
 
 
 
 
 
 
 
 
 
 
2158{
2159	struct iommu_domain *domain = data;
2160
2161	__iommu_detach_device(domain, dev);
 
2162
2163	return 0;
 
 
 
2164}
 
2165
2166static int __iommu_group_set_domain(struct iommu_group *group,
2167				    struct iommu_domain *new_domain)
 
 
2168{
2169	int ret;
2170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2171	if (group->domain == new_domain)
2172		return 0;
2173
2174	/*
2175	 * New drivers should support default domains and so the detach_dev() op
2176	 * will never be called. Otherwise the NULL domain represents some
2177	 * platform specific behavior.
2178	 */
2179	if (!new_domain) {
2180		if (WARN_ON(!group->domain->ops->detach_dev))
2181			return -EINVAL;
2182		__iommu_group_for_each_dev(group, group->domain,
2183					   iommu_group_do_detach_device);
2184		group->domain = NULL;
2185		return 0;
2186	}
2187
2188	/*
2189	 * Changing the domain is done by calling attach_dev() on the new
2190	 * domain. This switch does not have to be atomic and DMA can be
2191	 * discarded during the transition. DMA must only be able to access
2192	 * either new_domain or group->domain, never something else.
2193	 *
2194	 * Note that this is called in error unwind paths, attaching to a
2195	 * domain that has already been attached cannot fail.
2196	 */
2197	ret = __iommu_group_for_each_dev(group, new_domain,
2198					 iommu_group_do_attach_device);
2199	if (ret)
2200		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2201	group->domain = new_domain;
2202	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2203}
2204
2205void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2206{
2207	mutex_lock(&group->mutex);
2208	__iommu_group_set_core_domain(group);
2209	mutex_unlock(&group->mutex);
2210}
2211EXPORT_SYMBOL_GPL(iommu_detach_group);
2212
2213phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2214{
2215	if (domain->type == IOMMU_DOMAIN_IDENTITY)
2216		return iova;
2217
2218	if (domain->type == IOMMU_DOMAIN_BLOCKED)
2219		return 0;
2220
2221	return domain->ops->iova_to_phys(domain, iova);
2222}
2223EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2224
2225static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2226			   phys_addr_t paddr, size_t size, size_t *count)
2227{
2228	unsigned int pgsize_idx, pgsize_idx_next;
2229	unsigned long pgsizes;
2230	size_t offset, pgsize, pgsize_next;
2231	unsigned long addr_merge = paddr | iova;
2232
2233	/* Page sizes supported by the hardware and small enough for @size */
2234	pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
2235
2236	/* Constrain the page sizes further based on the maximum alignment */
2237	if (likely(addr_merge))
2238		pgsizes &= GENMASK(__ffs(addr_merge), 0);
2239
2240	/* Make sure we have at least one suitable page size */
2241	BUG_ON(!pgsizes);
2242
2243	/* Pick the biggest page size remaining */
2244	pgsize_idx = __fls(pgsizes);
2245	pgsize = BIT(pgsize_idx);
2246	if (!count)
2247		return pgsize;
2248
2249	/* Find the next biggest support page size, if it exists */
2250	pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
2251	if (!pgsizes)
2252		goto out_set_count;
2253
2254	pgsize_idx_next = __ffs(pgsizes);
2255	pgsize_next = BIT(pgsize_idx_next);
2256
2257	/*
2258	 * There's no point trying a bigger page size unless the virtual
2259	 * and physical addresses are similarly offset within the larger page.
2260	 */
2261	if ((iova ^ paddr) & (pgsize_next - 1))
2262		goto out_set_count;
2263
2264	/* Calculate the offset to the next page size alignment boundary */
2265	offset = pgsize_next - (addr_merge & (pgsize_next - 1));
2266
2267	/*
2268	 * If size is big enough to accommodate the larger page, reduce
2269	 * the number of smaller pages.
2270	 */
2271	if (offset + pgsize_next <= size)
2272		size = offset;
2273
2274out_set_count:
2275	*count = size >> pgsize_idx;
2276	return pgsize;
2277}
2278
2279static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
2280			     phys_addr_t paddr, size_t size, int prot,
2281			     gfp_t gfp, size_t *mapped)
2282{
2283	const struct iommu_domain_ops *ops = domain->ops;
2284	size_t pgsize, count;
2285	int ret;
2286
2287	pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
2288
2289	pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
2290		 iova, &paddr, pgsize, count);
2291
2292	if (ops->map_pages) {
2293		ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
2294				     gfp, mapped);
2295	} else {
2296		ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2297		*mapped = ret ? 0 : pgsize;
2298	}
2299
2300	return ret;
2301}
2302
2303static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2304		       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2305{
2306	const struct iommu_domain_ops *ops = domain->ops;
2307	unsigned long orig_iova = iova;
2308	unsigned int min_pagesz;
2309	size_t orig_size = size;
2310	phys_addr_t orig_paddr = paddr;
2311	int ret = 0;
2312
2313	if (unlikely(!(ops->map || ops->map_pages) ||
2314		     domain->pgsize_bitmap == 0UL))
2315		return -ENODEV;
2316
2317	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2318		return -EINVAL;
2319
 
 
 
2320	/* find out the minimum page size supported */
2321	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2322
2323	/*
2324	 * both the virtual address and the physical one, as well as
2325	 * the size of the mapping, must be aligned (at least) to the
2326	 * size of the smallest page supported by the hardware
2327	 */
2328	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2329		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2330		       iova, &paddr, size, min_pagesz);
2331		return -EINVAL;
2332	}
2333
2334	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2335
2336	while (size) {
2337		size_t mapped = 0;
2338
2339		ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
2340					&mapped);
 
 
 
 
2341		/*
2342		 * Some pages may have been mapped, even if an error occurred,
2343		 * so we should account for those so they can be unmapped.
2344		 */
2345		size -= mapped;
2346
2347		if (ret)
2348			break;
2349
2350		iova += mapped;
2351		paddr += mapped;
2352	}
2353
2354	/* unroll mapping in case something went wrong */
2355	if (ret)
2356		iommu_unmap(domain, orig_iova, orig_size - size);
2357	else
2358		trace_map(orig_iova, orig_paddr, orig_size);
2359
2360	return ret;
2361}
2362
2363static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
2364		      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2365{
2366	const struct iommu_domain_ops *ops = domain->ops;
2367	int ret;
2368
2369	ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2370	if (ret == 0 && ops->iotlb_sync_map)
2371		ops->iotlb_sync_map(domain, iova, size);
2372
2373	return ret;
2374}
 
 
2375
2376int iommu_map(struct iommu_domain *domain, unsigned long iova,
2377	      phys_addr_t paddr, size_t size, int prot)
2378{
2379	might_sleep();
2380	return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2381}
2382EXPORT_SYMBOL_GPL(iommu_map);
2383
2384int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2385	      phys_addr_t paddr, size_t size, int prot)
2386{
2387	return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2388}
2389EXPORT_SYMBOL_GPL(iommu_map_atomic);
2390
2391static size_t __iommu_unmap_pages(struct iommu_domain *domain,
2392				  unsigned long iova, size_t size,
2393				  struct iommu_iotlb_gather *iotlb_gather)
2394{
2395	const struct iommu_domain_ops *ops = domain->ops;
2396	size_t pgsize, count;
2397
2398	pgsize = iommu_pgsize(domain, iova, iova, size, &count);
2399	return ops->unmap_pages ?
2400	       ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
2401	       ops->unmap(domain, iova, pgsize, iotlb_gather);
2402}
 
2403
2404static size_t __iommu_unmap(struct iommu_domain *domain,
2405			    unsigned long iova, size_t size,
2406			    struct iommu_iotlb_gather *iotlb_gather)
2407{
2408	const struct iommu_domain_ops *ops = domain->ops;
2409	size_t unmapped_page, unmapped = 0;
2410	unsigned long orig_iova = iova;
2411	unsigned int min_pagesz;
2412
2413	if (unlikely(!(ops->unmap || ops->unmap_pages) ||
2414		     domain->pgsize_bitmap == 0UL))
2415		return 0;
2416
2417	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2418		return 0;
2419
2420	/* find out the minimum page size supported */
2421	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2422
2423	/*
2424	 * The virtual address, as well as the size of the mapping, must be
2425	 * aligned (at least) to the size of the smallest page supported
2426	 * by the hardware
2427	 */
2428	if (!IS_ALIGNED(iova | size, min_pagesz)) {
2429		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2430		       iova, size, min_pagesz);
2431		return 0;
2432	}
2433
2434	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2435
2436	/*
2437	 * Keep iterating until we either unmap 'size' bytes (or more)
2438	 * or we hit an area that isn't mapped.
2439	 */
2440	while (unmapped < size) {
2441		unmapped_page = __iommu_unmap_pages(domain, iova,
2442						    size - unmapped,
2443						    iotlb_gather);
 
2444		if (!unmapped_page)
2445			break;
2446
2447		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2448			 iova, unmapped_page);
2449
2450		iova += unmapped_page;
2451		unmapped += unmapped_page;
2452	}
2453
2454	trace_unmap(orig_iova, size, unmapped);
2455	return unmapped;
2456}
2457
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2458size_t iommu_unmap(struct iommu_domain *domain,
2459		   unsigned long iova, size_t size)
2460{
2461	struct iommu_iotlb_gather iotlb_gather;
2462	size_t ret;
2463
2464	iommu_iotlb_gather_init(&iotlb_gather);
2465	ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2466	iommu_iotlb_sync(domain, &iotlb_gather);
2467
2468	return ret;
2469}
2470EXPORT_SYMBOL_GPL(iommu_unmap);
2471
2472size_t iommu_unmap_fast(struct iommu_domain *domain,
2473			unsigned long iova, size_t size,
2474			struct iommu_iotlb_gather *iotlb_gather)
2475{
2476	return __iommu_unmap(domain, iova, size, iotlb_gather);
2477}
2478EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2479
2480static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2481		struct scatterlist *sg, unsigned int nents, int prot,
2482		gfp_t gfp)
2483{
2484	const struct iommu_domain_ops *ops = domain->ops;
2485	size_t len = 0, mapped = 0;
2486	phys_addr_t start;
2487	unsigned int i = 0;
2488	int ret;
2489
 
 
 
 
 
 
 
2490	while (i <= nents) {
2491		phys_addr_t s_phys = sg_phys(sg);
2492
2493		if (len && s_phys != start + len) {
2494			ret = __iommu_map(domain, iova + mapped, start,
2495					len, prot, gfp);
2496
2497			if (ret)
2498				goto out_err;
2499
2500			mapped += len;
2501			len = 0;
2502		}
2503
2504		if (sg_is_dma_bus_address(sg))
2505			goto next;
2506
2507		if (len) {
2508			len += sg->length;
2509		} else {
2510			len = sg->length;
2511			start = s_phys;
2512		}
2513
2514next:
2515		if (++i < nents)
2516			sg = sg_next(sg);
2517	}
2518
2519	if (ops->iotlb_sync_map)
2520		ops->iotlb_sync_map(domain, iova, mapped);
 
 
 
2521	return mapped;
2522
2523out_err:
2524	/* undo mappings already done */
2525	iommu_unmap(domain, iova, mapped);
2526
2527	return ret;
2528}
2529
2530ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2531		     struct scatterlist *sg, unsigned int nents, int prot)
2532{
2533	might_sleep();
2534	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2535}
2536EXPORT_SYMBOL_GPL(iommu_map_sg);
2537
2538ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2539		    struct scatterlist *sg, unsigned int nents, int prot)
2540{
2541	return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2542}
2543
2544/**
2545 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2546 * @domain: the iommu domain where the fault has happened
2547 * @dev: the device where the fault has happened
2548 * @iova: the faulting address
2549 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2550 *
2551 * This function should be called by the low-level IOMMU implementations
2552 * whenever IOMMU faults happen, to allow high-level users, that are
2553 * interested in such events, to know about them.
2554 *
2555 * This event may be useful for several possible use cases:
2556 * - mere logging of the event
2557 * - dynamic TLB/PTE loading
2558 * - if restarting of the faulting device is required
2559 *
2560 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2561 * PTE/TLB loading will one day be supported, implementations will be able
2562 * to tell whether it succeeded or not according to this return value).
2563 *
2564 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2565 * (though fault handlers can also return -ENOSYS, in case they want to
2566 * elicit the default behavior of the IOMMU drivers).
2567 */
2568int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2569		       unsigned long iova, int flags)
2570{
2571	int ret = -ENOSYS;
2572
2573	/*
2574	 * if upper layers showed interest and installed a fault handler,
2575	 * invoke it.
2576	 */
2577	if (domain->handler)
2578		ret = domain->handler(domain, dev, iova, flags,
2579						domain->handler_token);
2580
2581	trace_io_page_fault(dev, iova, flags);
2582	return ret;
2583}
2584EXPORT_SYMBOL_GPL(report_iommu_fault);
2585
2586static int __init iommu_init(void)
2587{
2588	iommu_group_kset = kset_create_and_add("iommu_groups",
2589					       NULL, kernel_kobj);
2590	BUG_ON(!iommu_group_kset);
2591
2592	iommu_debugfs_setup();
2593
2594	return 0;
2595}
2596core_initcall(iommu_init);
2597
2598int iommu_enable_nesting(struct iommu_domain *domain)
2599{
2600	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2601		return -EINVAL;
2602	if (!domain->ops->enable_nesting)
2603		return -EINVAL;
2604	return domain->ops->enable_nesting(domain);
2605}
2606EXPORT_SYMBOL_GPL(iommu_enable_nesting);
2607
2608int iommu_set_pgtable_quirks(struct iommu_domain *domain,
2609		unsigned long quirk)
2610{
2611	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2612		return -EINVAL;
2613	if (!domain->ops->set_pgtable_quirks)
2614		return -EINVAL;
2615	return domain->ops->set_pgtable_quirks(domain, quirk);
2616}
2617EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
2618
 
 
 
 
 
 
 
 
2619void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2620{
2621	const struct iommu_ops *ops = dev_iommu_ops(dev);
2622
2623	if (ops->get_resv_regions)
2624		ops->get_resv_regions(dev, list);
2625}
 
2626
2627/**
2628 * iommu_put_resv_regions - release resered regions
2629 * @dev: device for which to free reserved regions
2630 * @list: reserved region list for device
2631 *
2632 * This releases a reserved region list acquired by iommu_get_resv_regions().
2633 */
2634void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2635{
2636	struct iommu_resv_region *entry, *next;
2637
2638	list_for_each_entry_safe(entry, next, list, list) {
2639		if (entry->free)
2640			entry->free(dev, entry);
2641		else
2642			kfree(entry);
2643	}
2644}
2645EXPORT_SYMBOL(iommu_put_resv_regions);
2646
2647struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2648						  size_t length, int prot,
2649						  enum iommu_resv_type type,
2650						  gfp_t gfp)
2651{
2652	struct iommu_resv_region *region;
2653
2654	region = kzalloc(sizeof(*region), gfp);
2655	if (!region)
2656		return NULL;
2657
2658	INIT_LIST_HEAD(&region->list);
2659	region->start = start;
2660	region->length = length;
2661	region->prot = prot;
2662	region->type = type;
2663	return region;
2664}
2665EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2666
2667void iommu_set_default_passthrough(bool cmd_line)
2668{
2669	if (cmd_line)
2670		iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2671	iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2672}
2673
2674void iommu_set_default_translated(bool cmd_line)
2675{
2676	if (cmd_line)
2677		iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2678	iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2679}
2680
2681bool iommu_default_passthrough(void)
2682{
2683	return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2684}
2685EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2686
2687const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2688{
2689	const struct iommu_ops *ops = NULL;
2690	struct iommu_device *iommu;
2691
2692	spin_lock(&iommu_device_lock);
2693	list_for_each_entry(iommu, &iommu_device_list, list)
2694		if (iommu->fwnode == fwnode) {
2695			ops = iommu->ops;
2696			break;
2697		}
2698	spin_unlock(&iommu_device_lock);
2699	return ops;
2700}
2701
2702int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2703		      const struct iommu_ops *ops)
2704{
 
2705	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2706
 
 
 
2707	if (fwspec)
2708		return ops == fwspec->ops ? 0 : -EINVAL;
2709
2710	if (!dev_iommu_get(dev))
2711		return -ENOMEM;
2712
2713	/* Preallocate for the overwhelmingly common case of 1 ID */
2714	fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2715	if (!fwspec)
2716		return -ENOMEM;
2717
2718	of_node_get(to_of_node(iommu_fwnode));
2719	fwspec->iommu_fwnode = iommu_fwnode;
2720	fwspec->ops = ops;
2721	dev_iommu_fwspec_set(dev, fwspec);
2722	return 0;
2723}
2724EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2725
2726void iommu_fwspec_free(struct device *dev)
2727{
2728	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2729
2730	if (fwspec) {
2731		fwnode_handle_put(fwspec->iommu_fwnode);
2732		kfree(fwspec);
2733		dev_iommu_fwspec_set(dev, NULL);
2734	}
2735}
2736EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2737
2738int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2739{
2740	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2741	int i, new_num;
2742
2743	if (!fwspec)
2744		return -EINVAL;
2745
2746	new_num = fwspec->num_ids + num_ids;
2747	if (new_num > 1) {
2748		fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2749				  GFP_KERNEL);
2750		if (!fwspec)
2751			return -ENOMEM;
2752
2753		dev_iommu_fwspec_set(dev, fwspec);
2754	}
2755
2756	for (i = 0; i < num_ids; i++)
2757		fwspec->ids[fwspec->num_ids + i] = ids[i];
2758
2759	fwspec->num_ids = new_num;
2760	return 0;
2761}
2762EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2763
2764/*
2765 * Per device IOMMU features.
2766 */
2767int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2768{
2769	if (dev->iommu && dev->iommu->iommu_dev) {
2770		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2771
2772		if (ops->dev_enable_feat)
2773			return ops->dev_enable_feat(dev, feat);
2774	}
2775
2776	return -ENODEV;
2777}
2778EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2779
2780/*
2781 * The device drivers should do the necessary cleanups before calling this.
2782 */
2783int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2784{
2785	if (dev->iommu && dev->iommu->iommu_dev) {
2786		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2787
2788		if (ops->dev_disable_feat)
2789			return ops->dev_disable_feat(dev, feat);
2790	}
2791
2792	return -EBUSY;
2793}
2794EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2795
2796/*
2797 * Changes the default domain of an iommu group that has *only* one device
2798 *
2799 * @group: The group for which the default domain should be changed
2800 * @prev_dev: The device in the group (this is used to make sure that the device
2801 *	 hasn't changed after the caller has called this function)
2802 * @type: The type of the new default domain that gets associated with the group
2803 *
2804 * Returns 0 on success and error code on failure
2805 *
2806 * Note:
2807 * 1. Presently, this function is called only when user requests to change the
2808 *    group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type
2809 *    Please take a closer look if intended to use for other purposes.
2810 */
2811static int iommu_change_dev_def_domain(struct iommu_group *group,
2812				       struct device *prev_dev, int type)
2813{
2814	struct iommu_domain *prev_dom;
2815	struct group_device *grp_dev;
2816	int ret, dev_def_dom;
2817	struct device *dev;
2818
2819	mutex_lock(&group->mutex);
2820
2821	if (group->default_domain != group->domain) {
2822		dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
2823		ret = -EBUSY;
2824		goto out;
2825	}
2826
2827	/*
2828	 * iommu group wasn't locked while acquiring device lock in
2829	 * iommu_group_store_type(). So, make sure that the device count hasn't
2830	 * changed while acquiring device lock.
2831	 *
2832	 * Changing default domain of an iommu group with two or more devices
2833	 * isn't supported because there could be a potential deadlock. Consider
2834	 * the following scenario. T1 is trying to acquire device locks of all
2835	 * the devices in the group and before it could acquire all of them,
2836	 * there could be another thread T2 (from different sub-system and use
2837	 * case) that has already acquired some of the device locks and might be
2838	 * waiting for T1 to release other device locks.
2839	 */
2840	if (iommu_group_device_count(group) != 1) {
2841		dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
2842		ret = -EINVAL;
2843		goto out;
2844	}
2845
2846	/* Since group has only one device */
2847	grp_dev = list_first_entry(&group->devices, struct group_device, list);
2848	dev = grp_dev->dev;
2849
2850	if (prev_dev != dev) {
2851		dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
2852		ret = -EBUSY;
2853		goto out;
 
 
2854	}
2855
2856	prev_dom = group->default_domain;
2857	if (!prev_dom) {
2858		ret = -EINVAL;
2859		goto out;
 
 
 
 
 
 
 
 
 
2860	}
2861
2862	dev_def_dom = iommu_get_def_domain_type(dev);
2863	if (!type) {
 
2864		/*
2865		 * If the user hasn't requested any specific type of domain and
2866		 * if the device supports both the domains, then default to the
2867		 * domain the device was booted with
 
2868		 */
2869		type = dev_def_dom ? : iommu_def_domain_type;
2870	} else if (dev_def_dom && type != dev_def_dom) {
2871		dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
2872				    iommu_domain_type_str(type));
2873		ret = -EINVAL;
2874		goto out;
 
 
2875	}
2876
2877	/*
2878	 * Switch to a new domain only if the requested domain type is different
2879	 * from the existing default domain type
 
 
2880	 */
2881	if (prev_dom->type == type) {
2882		ret = 0;
2883		goto out;
2884	}
2885
2886	/* We can bring up a flush queue without tearing down the domain */
2887	if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) {
2888		ret = iommu_dma_init_fq(prev_dom);
2889		if (!ret)
2890			prev_dom->type = IOMMU_DOMAIN_DMA_FQ;
2891		goto out;
2892	}
2893
2894	/* Sets group->default_domain to the newly allocated domain */
2895	ret = iommu_group_alloc_default_domain(dev->bus, group, type);
2896	if (ret)
2897		goto out;
2898
2899	ret = iommu_create_device_direct_mappings(group, dev);
2900	if (ret)
2901		goto free_new_domain;
2902
2903	ret = __iommu_attach_device(group->default_domain, dev);
2904	if (ret)
2905		goto free_new_domain;
2906
2907	group->domain = group->default_domain;
2908
2909	/*
2910	 * Release the mutex here because ops->probe_finalize() call-back of
2911	 * some vendor IOMMU drivers calls arm_iommu_attach_device() which
2912	 * in-turn might call back into IOMMU core code, where it tries to take
2913	 * group->mutex, resulting in a deadlock.
2914	 */
2915	mutex_unlock(&group->mutex);
2916
2917	/* Make sure dma_ops is appropriatley set */
2918	iommu_group_do_probe_finalize(dev, group->default_domain);
2919	iommu_domain_free(prev_dom);
2920	return 0;
2921
2922free_new_domain:
2923	iommu_domain_free(group->default_domain);
2924	group->default_domain = prev_dom;
2925	group->domain = prev_dom;
2926
2927out:
2928	mutex_unlock(&group->mutex);
2929
 
 
 
 
 
 
 
 
 
2930	return ret;
2931}
2932
2933/*
2934 * Changing the default domain through sysfs requires the users to unbind the
2935 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
2936 * transition. Return failure if this isn't met.
2937 *
2938 * We need to consider the race between this and the device release path.
2939 * device_lock(dev) is used here to guarantee that the device release path
2940 * will not be entered at the same time.
2941 */
2942static ssize_t iommu_group_store_type(struct iommu_group *group,
2943				      const char *buf, size_t count)
2944{
2945	struct group_device *grp_dev;
2946	struct device *dev;
2947	int ret, req_type;
2948
2949	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2950		return -EACCES;
2951
2952	if (WARN_ON(!group) || !group->default_domain)
2953		return -EINVAL;
2954
2955	if (sysfs_streq(buf, "identity"))
2956		req_type = IOMMU_DOMAIN_IDENTITY;
2957	else if (sysfs_streq(buf, "DMA"))
2958		req_type = IOMMU_DOMAIN_DMA;
2959	else if (sysfs_streq(buf, "DMA-FQ"))
2960		req_type = IOMMU_DOMAIN_DMA_FQ;
2961	else if (sysfs_streq(buf, "auto"))
2962		req_type = 0;
2963	else
2964		return -EINVAL;
2965
2966	/*
2967	 * Lock/Unlock the group mutex here before device lock to
2968	 * 1. Make sure that the iommu group has only one device (this is a
2969	 *    prerequisite for step 2)
2970	 * 2. Get struct *dev which is needed to lock device
2971	 */
2972	mutex_lock(&group->mutex);
2973	if (iommu_group_device_count(group) != 1) {
2974		mutex_unlock(&group->mutex);
2975		pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
2976		return -EINVAL;
2977	}
2978
2979	/* Since group has only one device */
2980	grp_dev = list_first_entry(&group->devices, struct group_device, list);
2981	dev = grp_dev->dev;
2982	get_device(dev);
2983
2984	/*
2985	 * Don't hold the group mutex because taking group mutex first and then
2986	 * the device lock could potentially cause a deadlock as below. Assume
2987	 * two threads T1 and T2. T1 is trying to change default domain of an
2988	 * iommu group and T2 is trying to hot unplug a device or release [1] VF
2989	 * of a PCIe device which is in the same iommu group. T1 takes group
2990	 * mutex and before it could take device lock assume T2 has taken device
2991	 * lock and is yet to take group mutex. Now, both the threads will be
2992	 * waiting for the other thread to release lock. Below, lock order was
2993	 * suggested.
2994	 * device_lock(dev);
2995	 *	mutex_lock(&group->mutex);
2996	 *		iommu_change_dev_def_domain();
2997	 *	mutex_unlock(&group->mutex);
2998	 * device_unlock(dev);
2999	 *
3000	 * [1] Typical device release path
3001	 * device_lock() from device/driver core code
3002	 *  -> bus_notifier()
3003	 *   -> iommu_bus_notifier()
3004	 *    -> iommu_release_device()
3005	 *     -> ops->release_device() vendor driver calls back iommu core code
3006	 *      -> mutex_lock() from iommu core code
3007	 */
3008	mutex_unlock(&group->mutex);
3009
3010	/* Check if the device in the group still has a driver bound to it */
3011	device_lock(dev);
3012	if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ &&
3013	    group->default_domain->type == IOMMU_DOMAIN_DMA)) {
3014		pr_err_ratelimited("Device is still bound to driver\n");
3015		ret = -EBUSY;
3016		goto out;
3017	}
3018
3019	ret = iommu_change_dev_def_domain(group, dev, req_type);
3020	ret = ret ?: count;
3021
3022out:
3023	device_unlock(dev);
3024	put_device(dev);
3025
3026	return ret;
3027}
 
3028
3029static bool iommu_is_default_domain(struct iommu_group *group)
3030{
3031	if (group->domain == group->default_domain)
3032		return true;
3033
3034	/*
3035	 * If the default domain was set to identity and it is still an identity
3036	 * domain then we consider this a pass. This happens because of
3037	 * amd_iommu_init_device() replacing the default idenytity domain with an
3038	 * identity domain that has a different configuration for AMDGPU.
3039	 */
3040	if (group->default_domain &&
3041	    group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
3042	    group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
3043		return true;
3044	return false;
3045}
3046
3047/**
3048 * iommu_device_use_default_domain() - Device driver wants to handle device
3049 *                                     DMA through the kernel DMA API.
3050 * @dev: The device.
3051 *
3052 * The device driver about to bind @dev wants to do DMA through the kernel
3053 * DMA API. Return 0 if it is allowed, otherwise an error.
3054 */
3055int iommu_device_use_default_domain(struct device *dev)
3056{
3057	struct iommu_group *group = iommu_group_get(dev);
 
3058	int ret = 0;
3059
3060	if (!group)
3061		return 0;
3062
3063	mutex_lock(&group->mutex);
3064	if (group->owner_cnt) {
3065		if (group->owner || !iommu_is_default_domain(group) ||
3066		    !xa_empty(&group->pasid_array)) {
3067			ret = -EBUSY;
3068			goto unlock_out;
3069		}
3070	}
3071
3072	group->owner_cnt++;
3073
3074unlock_out:
3075	mutex_unlock(&group->mutex);
3076	iommu_group_put(group);
3077
3078	return ret;
3079}
3080
3081/**
3082 * iommu_device_unuse_default_domain() - Device driver stops handling device
3083 *                                       DMA through the kernel DMA API.
3084 * @dev: The device.
3085 *
3086 * The device driver doesn't want to do DMA through kernel DMA API anymore.
3087 * It must be called after iommu_device_use_default_domain().
3088 */
3089void iommu_device_unuse_default_domain(struct device *dev)
3090{
3091	struct iommu_group *group = iommu_group_get(dev);
 
3092
3093	if (!group)
3094		return;
3095
3096	mutex_lock(&group->mutex);
3097	if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array)))
3098		group->owner_cnt--;
3099
3100	mutex_unlock(&group->mutex);
3101	iommu_group_put(group);
3102}
3103
3104static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
3105{
3106	struct group_device *dev =
3107		list_first_entry(&group->devices, struct group_device, list);
 
3108
3109	if (group->blocking_domain)
3110		return 0;
3111
3112	group->blocking_domain =
3113		__iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED);
3114	if (!group->blocking_domain) {
3115		/*
3116		 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
3117		 * create an empty domain instead.
3118		 */
3119		group->blocking_domain = __iommu_domain_alloc(
3120			dev->dev->bus, IOMMU_DOMAIN_UNMANAGED);
3121		if (!group->blocking_domain)
3122			return -EINVAL;
3123	}
 
 
 
 
 
 
 
 
 
3124	return 0;
3125}
3126
3127static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner)
3128{
3129	int ret;
3130
3131	if ((group->domain && group->domain != group->default_domain) ||
3132	    !xa_empty(&group->pasid_array))
3133		return -EBUSY;
3134
3135	ret = __iommu_group_alloc_blocking_domain(group);
3136	if (ret)
3137		return ret;
3138	ret = __iommu_group_set_domain(group, group->blocking_domain);
3139	if (ret)
3140		return ret;
3141
3142	group->owner = owner;
3143	group->owner_cnt++;
3144	return 0;
3145}
3146
3147/**
3148 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
3149 * @group: The group.
3150 * @owner: Caller specified pointer. Used for exclusive ownership.
3151 *
3152 * This is to support backward compatibility for vfio which manages the dma
3153 * ownership in iommu_group level. New invocations on this interface should be
3154 * prohibited. Only a single owner may exist for a group.
3155 */
3156int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
3157{
3158	int ret = 0;
3159
3160	if (WARN_ON(!owner))
3161		return -EINVAL;
3162
3163	mutex_lock(&group->mutex);
3164	if (group->owner_cnt) {
3165		ret = -EPERM;
3166		goto unlock_out;
3167	}
3168
3169	ret = __iommu_take_dma_ownership(group, owner);
3170unlock_out:
3171	mutex_unlock(&group->mutex);
3172
3173	return ret;
3174}
3175EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
3176
3177/**
3178 * iommu_device_claim_dma_owner() - Set DMA ownership of a device
3179 * @dev: The device.
3180 * @owner: Caller specified pointer. Used for exclusive ownership.
3181 *
3182 * Claim the DMA ownership of a device. Multiple devices in the same group may
3183 * concurrently claim ownership if they present the same owner value. Returns 0
3184 * on success and error code on failure
3185 */
3186int iommu_device_claim_dma_owner(struct device *dev, void *owner)
3187{
3188	struct iommu_group *group;
 
3189	int ret = 0;
3190
3191	if (WARN_ON(!owner))
3192		return -EINVAL;
3193
3194	group = iommu_group_get(dev);
3195	if (!group)
3196		return -ENODEV;
3197
3198	mutex_lock(&group->mutex);
3199	if (group->owner_cnt) {
3200		if (group->owner != owner) {
3201			ret = -EPERM;
3202			goto unlock_out;
3203		}
3204		group->owner_cnt++;
3205		goto unlock_out;
3206	}
3207
3208	ret = __iommu_take_dma_ownership(group, owner);
3209unlock_out:
3210	mutex_unlock(&group->mutex);
3211	iommu_group_put(group);
3212
3213	return ret;
3214}
3215EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner);
3216
3217static void __iommu_release_dma_ownership(struct iommu_group *group)
3218{
3219	int ret;
3220
3221	if (WARN_ON(!group->owner_cnt || !group->owner ||
3222		    !xa_empty(&group->pasid_array)))
3223		return;
3224
3225	group->owner_cnt = 0;
3226	group->owner = NULL;
3227	ret = __iommu_group_set_domain(group, group->default_domain);
3228	WARN(ret, "iommu driver failed to attach the default domain");
3229}
3230
3231/**
3232 * iommu_group_release_dma_owner() - Release DMA ownership of a group
3233 * @dev: The device
3234 *
3235 * Release the DMA ownership claimed by iommu_group_claim_dma_owner().
3236 */
3237void iommu_group_release_dma_owner(struct iommu_group *group)
3238{
3239	mutex_lock(&group->mutex);
3240	__iommu_release_dma_ownership(group);
3241	mutex_unlock(&group->mutex);
3242}
3243EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
3244
3245/**
3246 * iommu_device_release_dma_owner() - Release DMA ownership of a device
3247 * @group: The device.
3248 *
3249 * Release the DMA ownership claimed by iommu_device_claim_dma_owner().
3250 */
3251void iommu_device_release_dma_owner(struct device *dev)
3252{
3253	struct iommu_group *group = iommu_group_get(dev);
 
3254
3255	mutex_lock(&group->mutex);
3256	if (group->owner_cnt > 1)
3257		group->owner_cnt--;
3258	else
3259		__iommu_release_dma_ownership(group);
3260	mutex_unlock(&group->mutex);
3261	iommu_group_put(group);
3262}
3263EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner);
3264
3265/**
3266 * iommu_group_dma_owner_claimed() - Query group dma ownership status
3267 * @group: The group.
3268 *
3269 * This provides status query on a given group. It is racy and only for
3270 * non-binding status reporting.
3271 */
3272bool iommu_group_dma_owner_claimed(struct iommu_group *group)
3273{
3274	unsigned int user;
3275
3276	mutex_lock(&group->mutex);
3277	user = group->owner_cnt;
3278	mutex_unlock(&group->mutex);
3279
3280	return user;
3281}
3282EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
3283
3284static int __iommu_set_group_pasid(struct iommu_domain *domain,
3285				   struct iommu_group *group, ioasid_t pasid)
3286{
3287	struct group_device *device;
3288	int ret = 0;
3289
3290	list_for_each_entry(device, &group->devices, list) {
3291		ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
 
3292		if (ret)
3293			break;
3294	}
3295
 
 
 
 
 
 
 
 
 
 
 
3296	return ret;
3297}
3298
3299static void __iommu_remove_group_pasid(struct iommu_group *group,
3300				       ioasid_t pasid)
 
3301{
3302	struct group_device *device;
3303	const struct iommu_ops *ops;
3304
3305	list_for_each_entry(device, &group->devices, list) {
3306		ops = dev_iommu_ops(device->dev);
3307		ops->remove_dev_pasid(device->dev, pasid);
3308	}
3309}
3310
3311/*
3312 * iommu_attach_device_pasid() - Attach a domain to pasid of device
3313 * @domain: the iommu domain.
3314 * @dev: the attached device.
3315 * @pasid: the pasid of the device.
 
3316 *
3317 * Return: 0 on success, or an error.
3318 */
3319int iommu_attach_device_pasid(struct iommu_domain *domain,
3320			      struct device *dev, ioasid_t pasid)
 
3321{
3322	struct iommu_group *group;
3323	void *curr;
 
3324	int ret;
3325
3326	if (!domain->ops->set_dev_pasid)
3327		return -EOPNOTSUPP;
3328
3329	group = iommu_group_get(dev);
3330	if (!group)
3331		return -ENODEV;
3332
 
 
 
 
3333	mutex_lock(&group->mutex);
3334	curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
3335	if (curr) {
3336		ret = xa_err(curr) ? : -EBUSY;
3337		goto out_unlock;
 
3338	}
3339
 
 
 
 
 
 
 
3340	ret = __iommu_set_group_pasid(domain, group, pasid);
3341	if (ret) {
3342		__iommu_remove_group_pasid(group, pasid);
3343		xa_erase(&group->pasid_array, pasid);
3344	}
3345out_unlock:
3346	mutex_unlock(&group->mutex);
3347	iommu_group_put(group);
3348
3349	return ret;
3350}
3351EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
3352
3353/*
3354 * iommu_detach_device_pasid() - Detach the domain from pasid of device
3355 * @domain: the iommu domain.
3356 * @dev: the attached device.
3357 * @pasid: the pasid of the device.
3358 *
3359 * The @domain must have been attached to @pasid of the @dev with
3360 * iommu_attach_device_pasid().
3361 */
3362void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
3363			       ioasid_t pasid)
3364{
3365	struct iommu_group *group = iommu_group_get(dev);
 
3366
3367	mutex_lock(&group->mutex);
3368	__iommu_remove_group_pasid(group, pasid);
3369	WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
3370	mutex_unlock(&group->mutex);
3371
3372	iommu_group_put(group);
3373}
3374EXPORT_SYMBOL_GPL(iommu_detach_device_pasid);
3375
3376/*
3377 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev
3378 * @dev: the queried device
3379 * @pasid: the pasid of the device
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3380 * @type: matched domain type, 0 for any match
3381 *
3382 * This is a variant of iommu_get_domain_for_dev(). It returns the existing
3383 * domain attached to pasid of a device. Callers must hold a lock around this
3384 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of
3385 * type is being manipulated. This API does not internally resolve races with
3386 * attach/detach.
3387 *
3388 * Return: attached domain on success, NULL otherwise.
3389 */
3390struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
3391						    ioasid_t pasid,
3392						    unsigned int type)
3393{
3394	struct iommu_domain *domain;
3395	struct iommu_group *group;
3396
3397	group = iommu_group_get(dev);
3398	if (!group)
3399		return NULL;
3400
3401	xa_lock(&group->pasid_array);
3402	domain = xa_load(&group->pasid_array, pasid);
3403	if (type && domain && domain->type != type)
3404		domain = ERR_PTR(-EBUSY);
 
 
3405	xa_unlock(&group->pasid_array);
3406	iommu_group_put(group);
3407
3408	return domain;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3409}
3410EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
3411
3412struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
3413					    struct mm_struct *mm)
 
 
 
 
 
 
 
 
3414{
3415	const struct iommu_ops *ops = dev_iommu_ops(dev);
3416	struct iommu_domain *domain;
 
 
 
 
3417
3418	domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
3419	if (!domain)
3420		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
3421
3422	domain->type = IOMMU_DOMAIN_SVA;
3423	mmgrab(mm);
3424	domain->mm = mm;
3425	domain->iopf_handler = iommu_sva_handle_iopf;
3426	domain->fault_data = mm;
3427
3428	return domain;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3429}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   4 * Author: Joerg Roedel <jroedel@suse.de>
   5 */
   6
   7#define pr_fmt(fmt)    "iommu: " fmt
   8
   9#include <linux/amba/bus.h>
  10#include <linux/device.h>
  11#include <linux/kernel.h>
  12#include <linux/bits.h>
  13#include <linux/bug.h>
  14#include <linux/types.h>
  15#include <linux/init.h>
  16#include <linux/export.h>
  17#include <linux/slab.h>
  18#include <linux/errno.h>
  19#include <linux/host1x_context_bus.h>
  20#include <linux/iommu.h>
  21#include <linux/idr.h>
  22#include <linux/err.h>
  23#include <linux/pci.h>
  24#include <linux/pci-ats.h>
  25#include <linux/bitops.h>
  26#include <linux/platform_device.h>
  27#include <linux/property.h>
  28#include <linux/fsl/mc.h>
  29#include <linux/module.h>
  30#include <linux/cc_platform.h>
  31#include <linux/cdx/cdx_bus.h>
  32#include <trace/events/iommu.h>
  33#include <linux/sched/mm.h>
  34#include <linux/msi.h>
  35#include <uapi/linux/iommufd.h>
  36
  37#include "dma-iommu.h"
  38#include "iommu-priv.h"
 
  39
  40static struct kset *iommu_group_kset;
  41static DEFINE_IDA(iommu_group_ida);
  42static DEFINE_IDA(iommu_global_pasid_ida);
  43
  44static unsigned int iommu_def_domain_type __read_mostly;
  45static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
  46static u32 iommu_cmd_line __read_mostly;
  47
  48struct iommu_group {
  49	struct kobject kobj;
  50	struct kobject *devices_kobj;
  51	struct list_head devices;
  52	struct xarray pasid_array;
  53	struct mutex mutex;
  54	void *iommu_data;
  55	void (*iommu_data_release)(void *iommu_data);
  56	char *name;
  57	int id;
  58	struct iommu_domain *default_domain;
  59	struct iommu_domain *blocking_domain;
  60	struct iommu_domain *domain;
  61	struct list_head entry;
  62	unsigned int owner_cnt;
  63	void *owner;
  64};
  65
  66struct group_device {
  67	struct list_head list;
  68	struct device *dev;
  69	char *name;
  70};
  71
  72/* Iterate over each struct group_device in a struct iommu_group */
  73#define for_each_group_device(group, pos) \
  74	list_for_each_entry(pos, &(group)->devices, list)
  75
  76struct iommu_group_attribute {
  77	struct attribute attr;
  78	ssize_t (*show)(struct iommu_group *group, char *buf);
  79	ssize_t (*store)(struct iommu_group *group,
  80			 const char *buf, size_t count);
  81};
  82
  83static const char * const iommu_group_resv_type_string[] = {
  84	[IOMMU_RESV_DIRECT]			= "direct",
  85	[IOMMU_RESV_DIRECT_RELAXABLE]		= "direct-relaxable",
  86	[IOMMU_RESV_RESERVED]			= "reserved",
  87	[IOMMU_RESV_MSI]			= "msi",
  88	[IOMMU_RESV_SW_MSI]			= "msi",
  89};
  90
  91#define IOMMU_CMD_LINE_DMA_API		BIT(0)
  92#define IOMMU_CMD_LINE_STRICT		BIT(1)
  93
  94static int bus_iommu_probe(const struct bus_type *bus);
  95static int iommu_bus_notifier(struct notifier_block *nb,
  96			      unsigned long action, void *data);
  97static void iommu_release_device(struct device *dev);
 
 
 
  98static int __iommu_attach_device(struct iommu_domain *domain,
  99				 struct device *dev);
 100static int __iommu_attach_group(struct iommu_domain *domain,
 101				struct iommu_group *group);
 102static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev,
 103						       unsigned int type,
 104						       unsigned int flags);
 105
 106enum {
 107	IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0,
 108};
 109
 110static int __iommu_device_set_domain(struct iommu_group *group,
 111				     struct device *dev,
 112				     struct iommu_domain *new_domain,
 113				     unsigned int flags);
 114static int __iommu_group_set_domain_internal(struct iommu_group *group,
 115					     struct iommu_domain *new_domain,
 116					     unsigned int flags);
 117static int __iommu_group_set_domain(struct iommu_group *group,
 118				    struct iommu_domain *new_domain)
 119{
 120	return __iommu_group_set_domain_internal(group, new_domain, 0);
 121}
 122static void __iommu_group_set_domain_nofail(struct iommu_group *group,
 123					    struct iommu_domain *new_domain)
 124{
 125	WARN_ON(__iommu_group_set_domain_internal(
 126		group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED));
 127}
 128
 129static int iommu_setup_default_domain(struct iommu_group *group,
 130				      int target_type);
 131static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
 132					       struct device *dev);
 
 133static ssize_t iommu_group_store_type(struct iommu_group *group,
 134				      const char *buf, size_t count);
 135static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
 136						     struct device *dev);
 137static void __iommu_group_free_device(struct iommu_group *group,
 138				      struct group_device *grp_dev);
 139static void iommu_domain_init(struct iommu_domain *domain, unsigned int type,
 140			      const struct iommu_ops *ops);
 141
 142#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
 143struct iommu_group_attribute iommu_group_attr_##_name =		\
 144	__ATTR(_name, _mode, _show, _store)
 145
 146#define to_iommu_group_attr(_attr)	\
 147	container_of(_attr, struct iommu_group_attribute, attr)
 148#define to_iommu_group(_kobj)		\
 149	container_of(_kobj, struct iommu_group, kobj)
 150
 151static LIST_HEAD(iommu_device_list);
 152static DEFINE_SPINLOCK(iommu_device_lock);
 153
 154static const struct bus_type * const iommu_buses[] = {
 155	&platform_bus_type,
 156#ifdef CONFIG_PCI
 157	&pci_bus_type,
 158#endif
 159#ifdef CONFIG_ARM_AMBA
 160	&amba_bustype,
 161#endif
 162#ifdef CONFIG_FSL_MC_BUS
 163	&fsl_mc_bus_type,
 164#endif
 165#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
 166	&host1x_context_device_bus_type,
 167#endif
 168#ifdef CONFIG_CDX_BUS
 169	&cdx_bus_type,
 170#endif
 171};
 172
 173/*
 174 * Use a function instead of an array here because the domain-type is a
 175 * bit-field, so an array would waste memory.
 176 */
 177static const char *iommu_domain_type_str(unsigned int t)
 178{
 179	switch (t) {
 180	case IOMMU_DOMAIN_BLOCKED:
 181		return "Blocked";
 182	case IOMMU_DOMAIN_IDENTITY:
 183		return "Passthrough";
 184	case IOMMU_DOMAIN_UNMANAGED:
 185		return "Unmanaged";
 186	case IOMMU_DOMAIN_DMA:
 187	case IOMMU_DOMAIN_DMA_FQ:
 188		return "Translated";
 189	case IOMMU_DOMAIN_PLATFORM:
 190		return "Platform";
 191	default:
 192		return "Unknown";
 193	}
 194}
 195
 196static int __init iommu_subsys_init(void)
 197{
 198	struct notifier_block *nb;
 199
 200	if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
 201		if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
 202			iommu_set_default_passthrough(false);
 203		else
 204			iommu_set_default_translated(false);
 205
 206		if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
 207			pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
 208			iommu_set_default_translated(false);
 209		}
 210	}
 211
 212	if (!iommu_default_passthrough() && !iommu_dma_strict)
 213		iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ;
 214
 215	pr_info("Default domain type: %s%s\n",
 216		iommu_domain_type_str(iommu_def_domain_type),
 217		(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
 218			" (set via kernel command line)" : "");
 219
 220	if (!iommu_default_passthrough())
 221		pr_info("DMA domain TLB invalidation policy: %s mode%s\n",
 222			iommu_dma_strict ? "strict" : "lazy",
 223			(iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
 224				" (set via kernel command line)" : "");
 225
 226	nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL);
 227	if (!nb)
 228		return -ENOMEM;
 229
 230	for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
 231		nb[i].notifier_call = iommu_bus_notifier;
 232		bus_register_notifier(iommu_buses[i], &nb[i]);
 233	}
 234
 235	return 0;
 236}
 237subsys_initcall(iommu_subsys_init);
 238
 239static int remove_iommu_group(struct device *dev, void *data)
 240{
 241	if (dev->iommu && dev->iommu->iommu_dev == data)
 242		iommu_release_device(dev);
 243
 244	return 0;
 245}
 246
 247/**
 248 * iommu_device_register() - Register an IOMMU hardware instance
 249 * @iommu: IOMMU handle for the instance
 250 * @ops:   IOMMU ops to associate with the instance
 251 * @hwdev: (optional) actual instance device, used for fwnode lookup
 252 *
 253 * Return: 0 on success, or an error.
 254 */
 255int iommu_device_register(struct iommu_device *iommu,
 256			  const struct iommu_ops *ops, struct device *hwdev)
 257{
 258	int err = 0;
 259
 260	/* We need to be able to take module references appropriately */
 261	if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
 262		return -EINVAL;
 
 
 
 
 
 
 
 263
 264	iommu->ops = ops;
 265	if (hwdev)
 266		iommu->fwnode = dev_fwnode(hwdev);
 267
 268	spin_lock(&iommu_device_lock);
 269	list_add_tail(&iommu->list, &iommu_device_list);
 270	spin_unlock(&iommu_device_lock);
 271
 272	for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++)
 
 273		err = bus_iommu_probe(iommu_buses[i]);
 
 274	if (err)
 275		iommu_device_unregister(iommu);
 276	return err;
 277}
 278EXPORT_SYMBOL_GPL(iommu_device_register);
 279
 280void iommu_device_unregister(struct iommu_device *iommu)
 281{
 282	for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
 283		bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
 284
 285	spin_lock(&iommu_device_lock);
 286	list_del(&iommu->list);
 287	spin_unlock(&iommu_device_lock);
 288
 289	/* Pairs with the alloc in generic_single_device_group() */
 290	iommu_group_put(iommu->singleton_group);
 291	iommu->singleton_group = NULL;
 292}
 293EXPORT_SYMBOL_GPL(iommu_device_unregister);
 294
 295#if IS_ENABLED(CONFIG_IOMMUFD_TEST)
 296void iommu_device_unregister_bus(struct iommu_device *iommu,
 297				 const struct bus_type *bus,
 298				 struct notifier_block *nb)
 299{
 300	bus_unregister_notifier(bus, nb);
 301	iommu_device_unregister(iommu);
 302}
 303EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
 304
 305/*
 306 * Register an iommu driver against a single bus. This is only used by iommufd
 307 * selftest to create a mock iommu driver. The caller must provide
 308 * some memory to hold a notifier_block.
 309 */
 310int iommu_device_register_bus(struct iommu_device *iommu,
 311			      const struct iommu_ops *ops,
 312			      const struct bus_type *bus,
 313			      struct notifier_block *nb)
 314{
 315	int err;
 316
 317	iommu->ops = ops;
 318	nb->notifier_call = iommu_bus_notifier;
 319	err = bus_register_notifier(bus, nb);
 320	if (err)
 321		return err;
 322
 323	spin_lock(&iommu_device_lock);
 324	list_add_tail(&iommu->list, &iommu_device_list);
 325	spin_unlock(&iommu_device_lock);
 326
 327	err = bus_iommu_probe(bus);
 328	if (err) {
 329		iommu_device_unregister_bus(iommu, bus, nb);
 330		return err;
 331	}
 332	return 0;
 333}
 334EXPORT_SYMBOL_GPL(iommu_device_register_bus);
 335#endif
 336
 337static struct dev_iommu *dev_iommu_get(struct device *dev)
 338{
 339	struct dev_iommu *param = dev->iommu;
 340
 341	lockdep_assert_held(&iommu_probe_device_lock);
 342
 343	if (param)
 344		return param;
 345
 346	param = kzalloc(sizeof(*param), GFP_KERNEL);
 347	if (!param)
 348		return NULL;
 349
 350	mutex_init(&param->lock);
 351	dev->iommu = param;
 352	return param;
 353}
 354
 355static void dev_iommu_free(struct device *dev)
 356{
 357	struct dev_iommu *param = dev->iommu;
 358
 359	dev->iommu = NULL;
 360	if (param->fwspec) {
 361		fwnode_handle_put(param->fwspec->iommu_fwnode);
 362		kfree(param->fwspec);
 363	}
 364	kfree(param);
 365}
 366
 367/*
 368 * Internal equivalent of device_iommu_mapped() for when we care that a device
 369 * actually has API ops, and don't want false positives from VFIO-only groups.
 370 */
 371static bool dev_has_iommu(struct device *dev)
 372{
 373	return dev->iommu && dev->iommu->iommu_dev;
 374}
 375
 376static u32 dev_iommu_get_max_pasids(struct device *dev)
 377{
 378	u32 max_pasids = 0, bits = 0;
 379	int ret;
 380
 381	if (dev_is_pci(dev)) {
 382		ret = pci_max_pasids(to_pci_dev(dev));
 383		if (ret > 0)
 384			max_pasids = ret;
 385	} else {
 386		ret = device_property_read_u32(dev, "pasid-num-bits", &bits);
 387		if (!ret)
 388			max_pasids = 1UL << bits;
 389	}
 390
 391	return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
 392}
 393
 394void dev_iommu_priv_set(struct device *dev, void *priv)
 395{
 396	/* FSL_PAMU does something weird */
 397	if (!IS_ENABLED(CONFIG_FSL_PAMU))
 398		lockdep_assert_held(&iommu_probe_device_lock);
 399	dev->iommu->priv = priv;
 400}
 401EXPORT_SYMBOL_GPL(dev_iommu_priv_set);
 402
 403/*
 404 * Init the dev->iommu and dev->iommu_group in the struct device and get the
 405 * driver probed
 406 */
 407static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
 408{
 
 409	struct iommu_device *iommu_dev;
 410	struct iommu_group *group;
 
 411	int ret;
 412
 413	if (!dev_iommu_get(dev))
 414		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 415
 416	if (!try_module_get(ops->owner)) {
 417		ret = -EINVAL;
 418		goto err_free;
 419	}
 420
 421	iommu_dev = ops->probe_device(dev);
 422	if (IS_ERR(iommu_dev)) {
 423		ret = PTR_ERR(iommu_dev);
 424		goto err_module_put;
 425	}
 
 426	dev->iommu->iommu_dev = iommu_dev;
 
 427
 428	ret = iommu_device_link(iommu_dev, dev);
 429	if (ret)
 430		goto err_release;
 431
 432	group = ops->device_group(dev);
 433	if (WARN_ON_ONCE(group == NULL))
 434		group = ERR_PTR(-EINVAL);
 435	if (IS_ERR(group)) {
 436		ret = PTR_ERR(group);
 437		goto err_unlink;
 438	}
 439	dev->iommu_group = group;
 440
 441	dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
 442	if (ops->is_attach_deferred)
 443		dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
 
 
 
 
 
 
 444	return 0;
 445
 446err_unlink:
 447	iommu_device_unlink(iommu_dev, dev);
 448err_release:
 449	if (ops->release_device)
 450		ops->release_device(dev);
 451err_module_put:
 
 452	module_put(ops->owner);
 
 453err_free:
 454	dev->iommu->iommu_dev = NULL;
 455	dev_iommu_free(dev);
 456	return ret;
 457}
 458
 459static void iommu_deinit_device(struct device *dev)
 460{
 461	struct iommu_group *group = dev->iommu_group;
 462	const struct iommu_ops *ops = dev_iommu_ops(dev);
 463
 464	lockdep_assert_held(&group->mutex);
 465
 466	iommu_device_unlink(dev->iommu->iommu_dev, dev);
 467
 468	/*
 469	 * release_device() must stop using any attached domain on the device.
 470	 * If there are still other devices in the group, they are not affected
 471	 * by this callback.
 472	 *
 473	 * If the iommu driver provides release_domain, the core code ensures
 474	 * that domain is attached prior to calling release_device. Drivers can
 475	 * use this to enforce a translation on the idle iommu. Typically, the
 476	 * global static blocked_domain is a good choice.
 477	 *
 478	 * Otherwise, the iommu driver must set the device to either an identity
 479	 * or a blocking translation in release_device() and stop using any
 480	 * domain pointer, as it is going to be freed.
 481	 *
 482	 * Regardless, if a delayed attach never occurred, then the release
 483	 * should still avoid touching any hardware configuration either.
 484	 */
 485	if (!dev->iommu->attach_deferred && ops->release_domain)
 486		ops->release_domain->ops->attach_dev(ops->release_domain, dev);
 487
 488	if (ops->release_device)
 489		ops->release_device(dev);
 490
 491	/*
 492	 * If this is the last driver to use the group then we must free the
 493	 * domains before we do the module_put().
 494	 */
 495	if (list_empty(&group->devices)) {
 496		if (group->default_domain) {
 497			iommu_domain_free(group->default_domain);
 498			group->default_domain = NULL;
 499		}
 500		if (group->blocking_domain) {
 501			iommu_domain_free(group->blocking_domain);
 502			group->blocking_domain = NULL;
 503		}
 504		group->domain = NULL;
 505	}
 506
 507	/* Caller must put iommu_group */
 508	dev->iommu_group = NULL;
 509	module_put(ops->owner);
 510	dev_iommu_free(dev);
 511}
 512
 513DEFINE_MUTEX(iommu_probe_device_lock);
 514
 515static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
 516{
 517	const struct iommu_ops *ops;
 518	struct iommu_group *group;
 519	struct group_device *gdev;
 520	int ret;
 521
 
 
 
 
 
 
 
 
 
 
 522	/*
 523	 * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU
 524	 * instances with non-NULL fwnodes, and client devices should have been
 525	 * identified with a fwspec by this point. Otherwise, we can currently
 526	 * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can
 527	 * be present, and that any of their registered instances has suitable
 528	 * ops for probing, and thus cheekily co-opt the same mechanism.
 529	 */
 530	ops = iommu_fwspec_ops(dev_iommu_fwspec_get(dev));
 531	if (!ops)
 532		return -ENODEV;
 533	/*
 534	 * Serialise to avoid races between IOMMU drivers registering in
 535	 * parallel and/or the "replay" calls from ACPI/OF code via client
 536	 * driver probe. Once the latter have been cleaned up we should
 537	 * probably be able to use device_lock() here to minimise the scope,
 538	 * but for now enforcing a simple global ordering is fine.
 539	 */
 540	lockdep_assert_held(&iommu_probe_device_lock);
 541
 542	/* Device is probed already if in a group */
 543	if (dev->iommu_group)
 544		return 0;
 545
 546	ret = iommu_init_device(dev, ops);
 547	if (ret)
 548		return ret;
 549
 550	group = dev->iommu_group;
 551	gdev = iommu_group_alloc_device(group, dev);
 552	mutex_lock(&group->mutex);
 553	if (IS_ERR(gdev)) {
 554		ret = PTR_ERR(gdev);
 555		goto err_put_group;
 556	}
 557
 558	/*
 559	 * The gdev must be in the list before calling
 560	 * iommu_setup_default_domain()
 561	 */
 562	list_add_tail(&gdev->list, &group->devices);
 563	WARN_ON(group->default_domain && !group->domain);
 564	if (group->default_domain)
 565		iommu_create_device_direct_mappings(group->default_domain, dev);
 566	if (group->domain) {
 567		ret = __iommu_device_set_domain(group, dev, group->domain, 0);
 568		if (ret)
 569			goto err_remove_gdev;
 570	} else if (!group->default_domain && !group_list) {
 571		ret = iommu_setup_default_domain(group, 0);
 572		if (ret)
 573			goto err_remove_gdev;
 574	} else if (!group->default_domain) {
 575		/*
 576		 * With a group_list argument we defer the default_domain setup
 577		 * to the caller by providing a de-duplicated list of groups
 578		 * that need further setup.
 579		 */
 580		if (list_empty(&group->entry))
 581			list_add_tail(&group->entry, group_list);
 582	}
 583
 584	if (group->default_domain)
 585		iommu_setup_dma_ops(dev);
 586
 587	mutex_unlock(&group->mutex);
 588
 589	return 0;
 590
 591err_remove_gdev:
 592	list_del(&gdev->list);
 593	__iommu_group_free_device(group, gdev);
 594err_put_group:
 595	iommu_deinit_device(dev);
 596	mutex_unlock(&group->mutex);
 597	iommu_group_put(group);
 598
 599	return ret;
 600}
 601
 602int iommu_probe_device(struct device *dev)
 603{
 604	const struct iommu_ops *ops;
 605	int ret;
 606
 607	mutex_lock(&iommu_probe_device_lock);
 608	ret = __iommu_probe_device(dev, NULL);
 609	mutex_unlock(&iommu_probe_device_lock);
 610	if (ret)
 611		return ret;
 612
 613	ops = dev_iommu_ops(dev);
 614	if (ops->probe_finalize)
 615		ops->probe_finalize(dev);
 616
 617	return 0;
 618}
 619
 620static void __iommu_group_free_device(struct iommu_group *group,
 621				      struct group_device *grp_dev)
 622{
 623	struct device *dev = grp_dev->dev;
 624
 625	sysfs_remove_link(group->devices_kobj, grp_dev->name);
 626	sysfs_remove_link(&dev->kobj, "iommu_group");
 627
 628	trace_remove_device_from_group(group->id, dev);
 629
 630	/*
 631	 * If the group has become empty then ownership must have been
 632	 * released, and the current domain must be set back to NULL or
 633	 * the default domain.
 634	 */
 635	if (list_empty(&group->devices))
 636		WARN_ON(group->owner_cnt ||
 637			group->domain != group->default_domain);
 638
 639	kfree(grp_dev->name);
 640	kfree(grp_dev);
 641}
 642
 643/* Remove the iommu_group from the struct device. */
 644static void __iommu_group_remove_device(struct device *dev)
 645{
 646	struct iommu_group *group = dev->iommu_group;
 647	struct group_device *device;
 648
 649	mutex_lock(&group->mutex);
 650	for_each_group_device(group, device) {
 651		if (device->dev != dev)
 652			continue;
 653
 654		list_del(&device->list);
 655		__iommu_group_free_device(group, device);
 656		if (dev_has_iommu(dev))
 657			iommu_deinit_device(dev);
 658		else
 659			dev->iommu_group = NULL;
 660		break;
 661	}
 662	mutex_unlock(&group->mutex);
 663
 664	/*
 665	 * Pairs with the get in iommu_init_device() or
 666	 * iommu_group_add_device()
 667	 */
 668	iommu_group_put(group);
 669}
 670
 671static void iommu_release_device(struct device *dev)
 672{
 673	struct iommu_group *group = dev->iommu_group;
 674
 675	if (group)
 676		__iommu_group_remove_device(dev);
 677
 678	/* Free any fwspec if no iommu_driver was ever attached */
 679	if (dev->iommu)
 680		dev_iommu_free(dev);
 681}
 682
 683static int __init iommu_set_def_domain_type(char *str)
 684{
 685	bool pt;
 686	int ret;
 687
 688	ret = kstrtobool(str, &pt);
 689	if (ret)
 690		return ret;
 691
 692	if (pt)
 693		iommu_set_default_passthrough(true);
 694	else
 695		iommu_set_default_translated(true);
 696
 697	return 0;
 698}
 699early_param("iommu.passthrough", iommu_set_def_domain_type);
 700
 701static int __init iommu_dma_setup(char *str)
 702{
 703	int ret = kstrtobool(str, &iommu_dma_strict);
 704
 705	if (!ret)
 706		iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
 707	return ret;
 708}
 709early_param("iommu.strict", iommu_dma_setup);
 710
 711void iommu_set_dma_strict(void)
 712{
 713	iommu_dma_strict = true;
 714	if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ)
 715		iommu_def_domain_type = IOMMU_DOMAIN_DMA;
 716}
 717
 718static ssize_t iommu_group_attr_show(struct kobject *kobj,
 719				     struct attribute *__attr, char *buf)
 720{
 721	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 722	struct iommu_group *group = to_iommu_group(kobj);
 723	ssize_t ret = -EIO;
 724
 725	if (attr->show)
 726		ret = attr->show(group, buf);
 727	return ret;
 728}
 729
 730static ssize_t iommu_group_attr_store(struct kobject *kobj,
 731				      struct attribute *__attr,
 732				      const char *buf, size_t count)
 733{
 734	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 735	struct iommu_group *group = to_iommu_group(kobj);
 736	ssize_t ret = -EIO;
 737
 738	if (attr->store)
 739		ret = attr->store(group, buf, count);
 740	return ret;
 741}
 742
 743static const struct sysfs_ops iommu_group_sysfs_ops = {
 744	.show = iommu_group_attr_show,
 745	.store = iommu_group_attr_store,
 746};
 747
 748static int iommu_group_create_file(struct iommu_group *group,
 749				   struct iommu_group_attribute *attr)
 750{
 751	return sysfs_create_file(&group->kobj, &attr->attr);
 752}
 753
 754static void iommu_group_remove_file(struct iommu_group *group,
 755				    struct iommu_group_attribute *attr)
 756{
 757	sysfs_remove_file(&group->kobj, &attr->attr);
 758}
 759
 760static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
 761{
 762	return sysfs_emit(buf, "%s\n", group->name);
 763}
 764
 765/**
 766 * iommu_insert_resv_region - Insert a new region in the
 767 * list of reserved regions.
 768 * @new: new region to insert
 769 * @regions: list of regions
 770 *
 771 * Elements are sorted by start address and overlapping segments
 772 * of the same type are merged.
 773 */
 774static int iommu_insert_resv_region(struct iommu_resv_region *new,
 775				    struct list_head *regions)
 776{
 777	struct iommu_resv_region *iter, *tmp, *nr, *top;
 778	LIST_HEAD(stack);
 779
 780	nr = iommu_alloc_resv_region(new->start, new->length,
 781				     new->prot, new->type, GFP_KERNEL);
 782	if (!nr)
 783		return -ENOMEM;
 784
 785	/* First add the new element based on start address sorting */
 786	list_for_each_entry(iter, regions, list) {
 787		if (nr->start < iter->start ||
 788		    (nr->start == iter->start && nr->type <= iter->type))
 789			break;
 790	}
 791	list_add_tail(&nr->list, &iter->list);
 792
 793	/* Merge overlapping segments of type nr->type in @regions, if any */
 794	list_for_each_entry_safe(iter, tmp, regions, list) {
 795		phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
 796
 797		/* no merge needed on elements of different types than @new */
 798		if (iter->type != new->type) {
 799			list_move_tail(&iter->list, &stack);
 800			continue;
 801		}
 802
 803		/* look for the last stack element of same type as @iter */
 804		list_for_each_entry_reverse(top, &stack, list)
 805			if (top->type == iter->type)
 806				goto check_overlap;
 807
 808		list_move_tail(&iter->list, &stack);
 809		continue;
 810
 811check_overlap:
 812		top_end = top->start + top->length - 1;
 813
 814		if (iter->start > top_end + 1) {
 815			list_move_tail(&iter->list, &stack);
 816		} else {
 817			top->length = max(top_end, iter_end) - top->start + 1;
 818			list_del(&iter->list);
 819			kfree(iter);
 820		}
 821	}
 822	list_splice(&stack, regions);
 823	return 0;
 824}
 825
 826static int
 827iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
 828				 struct list_head *group_resv_regions)
 829{
 830	struct iommu_resv_region *entry;
 831	int ret = 0;
 832
 833	list_for_each_entry(entry, dev_resv_regions, list) {
 834		ret = iommu_insert_resv_region(entry, group_resv_regions);
 835		if (ret)
 836			break;
 837	}
 838	return ret;
 839}
 840
 841int iommu_get_group_resv_regions(struct iommu_group *group,
 842				 struct list_head *head)
 843{
 844	struct group_device *device;
 845	int ret = 0;
 846
 847	mutex_lock(&group->mutex);
 848	for_each_group_device(group, device) {
 849		struct list_head dev_resv_regions;
 850
 851		/*
 852		 * Non-API groups still expose reserved_regions in sysfs,
 853		 * so filter out calls that get here that way.
 854		 */
 855		if (!dev_has_iommu(device->dev))
 856			break;
 857
 858		INIT_LIST_HEAD(&dev_resv_regions);
 859		iommu_get_resv_regions(device->dev, &dev_resv_regions);
 860		ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
 861		iommu_put_resv_regions(device->dev, &dev_resv_regions);
 862		if (ret)
 863			break;
 864	}
 865	mutex_unlock(&group->mutex);
 866	return ret;
 867}
 868EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
 869
 870static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
 871					     char *buf)
 872{
 873	struct iommu_resv_region *region, *next;
 874	struct list_head group_resv_regions;
 875	int offset = 0;
 876
 877	INIT_LIST_HEAD(&group_resv_regions);
 878	iommu_get_group_resv_regions(group, &group_resv_regions);
 879
 880	list_for_each_entry_safe(region, next, &group_resv_regions, list) {
 881		offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n",
 882					(long long)region->start,
 883					(long long)(region->start +
 884						    region->length - 1),
 885					iommu_group_resv_type_string[region->type]);
 886		kfree(region);
 887	}
 888
 889	return offset;
 890}
 891
 892static ssize_t iommu_group_show_type(struct iommu_group *group,
 893				     char *buf)
 894{
 895	char *type = "unknown";
 896
 897	mutex_lock(&group->mutex);
 898	if (group->default_domain) {
 899		switch (group->default_domain->type) {
 900		case IOMMU_DOMAIN_BLOCKED:
 901			type = "blocked";
 902			break;
 903		case IOMMU_DOMAIN_IDENTITY:
 904			type = "identity";
 905			break;
 906		case IOMMU_DOMAIN_UNMANAGED:
 907			type = "unmanaged";
 908			break;
 909		case IOMMU_DOMAIN_DMA:
 910			type = "DMA";
 911			break;
 912		case IOMMU_DOMAIN_DMA_FQ:
 913			type = "DMA-FQ";
 914			break;
 915		}
 916	}
 917	mutex_unlock(&group->mutex);
 
 918
 919	return sysfs_emit(buf, "%s\n", type);
 920}
 921
 922static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 923
 924static IOMMU_GROUP_ATTR(reserved_regions, 0444,
 925			iommu_group_show_resv_regions, NULL);
 926
 927static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
 928			iommu_group_store_type);
 929
 930static void iommu_group_release(struct kobject *kobj)
 931{
 932	struct iommu_group *group = to_iommu_group(kobj);
 933
 934	pr_debug("Releasing group %d\n", group->id);
 935
 936	if (group->iommu_data_release)
 937		group->iommu_data_release(group->iommu_data);
 938
 939	ida_free(&iommu_group_ida, group->id);
 940
 941	/* Domains are free'd by iommu_deinit_device() */
 942	WARN_ON(group->default_domain);
 943	WARN_ON(group->blocking_domain);
 
 944
 945	kfree(group->name);
 946	kfree(group);
 947}
 948
 949static const struct kobj_type iommu_group_ktype = {
 950	.sysfs_ops = &iommu_group_sysfs_ops,
 951	.release = iommu_group_release,
 952};
 953
 954/**
 955 * iommu_group_alloc - Allocate a new group
 956 *
 957 * This function is called by an iommu driver to allocate a new iommu
 958 * group.  The iommu group represents the minimum granularity of the iommu.
 959 * Upon successful return, the caller holds a reference to the supplied
 960 * group in order to hold the group until devices are added.  Use
 961 * iommu_group_put() to release this extra reference count, allowing the
 962 * group to be automatically reclaimed once it has no devices or external
 963 * references.
 964 */
 965struct iommu_group *iommu_group_alloc(void)
 966{
 967	struct iommu_group *group;
 968	int ret;
 969
 970	group = kzalloc(sizeof(*group), GFP_KERNEL);
 971	if (!group)
 972		return ERR_PTR(-ENOMEM);
 973
 974	group->kobj.kset = iommu_group_kset;
 975	mutex_init(&group->mutex);
 976	INIT_LIST_HEAD(&group->devices);
 977	INIT_LIST_HEAD(&group->entry);
 978	xa_init(&group->pasid_array);
 979
 980	ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
 981	if (ret < 0) {
 982		kfree(group);
 983		return ERR_PTR(ret);
 984	}
 985	group->id = ret;
 986
 987	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
 988				   NULL, "%d", group->id);
 989	if (ret) {
 990		kobject_put(&group->kobj);
 991		return ERR_PTR(ret);
 992	}
 993
 994	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
 995	if (!group->devices_kobj) {
 996		kobject_put(&group->kobj); /* triggers .release & free */
 997		return ERR_PTR(-ENOMEM);
 998	}
 999
1000	/*
1001	 * The devices_kobj holds a reference on the group kobject, so
1002	 * as long as that exists so will the group.  We can therefore
1003	 * use the devices_kobj for reference counting.
1004	 */
1005	kobject_put(&group->kobj);
1006
1007	ret = iommu_group_create_file(group,
1008				      &iommu_group_attr_reserved_regions);
1009	if (ret) {
1010		kobject_put(group->devices_kobj);
1011		return ERR_PTR(ret);
1012	}
1013
1014	ret = iommu_group_create_file(group, &iommu_group_attr_type);
1015	if (ret) {
1016		kobject_put(group->devices_kobj);
1017		return ERR_PTR(ret);
1018	}
1019
1020	pr_debug("Allocated group %d\n", group->id);
1021
1022	return group;
1023}
1024EXPORT_SYMBOL_GPL(iommu_group_alloc);
1025
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1026/**
1027 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
1028 * @group: the group
1029 *
1030 * iommu drivers can store data in the group for use when doing iommu
1031 * operations.  This function provides a way to retrieve it.  Caller
1032 * should hold a group reference.
1033 */
1034void *iommu_group_get_iommudata(struct iommu_group *group)
1035{
1036	return group->iommu_data;
1037}
1038EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
1039
1040/**
1041 * iommu_group_set_iommudata - set iommu_data for a group
1042 * @group: the group
1043 * @iommu_data: new data
1044 * @release: release function for iommu_data
1045 *
1046 * iommu drivers can store data in the group for use when doing iommu
1047 * operations.  This function provides a way to set the data after
1048 * the group has been allocated.  Caller should hold a group reference.
1049 */
1050void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
1051			       void (*release)(void *iommu_data))
1052{
1053	group->iommu_data = iommu_data;
1054	group->iommu_data_release = release;
1055}
1056EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
1057
1058/**
1059 * iommu_group_set_name - set name for a group
1060 * @group: the group
1061 * @name: name
1062 *
1063 * Allow iommu driver to set a name for a group.  When set it will
1064 * appear in a name attribute file under the group in sysfs.
1065 */
1066int iommu_group_set_name(struct iommu_group *group, const char *name)
1067{
1068	int ret;
1069
1070	if (group->name) {
1071		iommu_group_remove_file(group, &iommu_group_attr_name);
1072		kfree(group->name);
1073		group->name = NULL;
1074		if (!name)
1075			return 0;
1076	}
1077
1078	group->name = kstrdup(name, GFP_KERNEL);
1079	if (!group->name)
1080		return -ENOMEM;
1081
1082	ret = iommu_group_create_file(group, &iommu_group_attr_name);
1083	if (ret) {
1084		kfree(group->name);
1085		group->name = NULL;
1086		return ret;
1087	}
1088
1089	return 0;
1090}
1091EXPORT_SYMBOL_GPL(iommu_group_set_name);
1092
1093static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
1094					       struct device *dev)
1095{
 
1096	struct iommu_resv_region *entry;
1097	struct list_head mappings;
1098	unsigned long pg_size;
1099	int ret = 0;
1100
1101	pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0;
 
 
 
 
 
1102	INIT_LIST_HEAD(&mappings);
1103
1104	if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size))
1105		return -EINVAL;
1106
1107	iommu_get_resv_regions(dev, &mappings);
1108
1109	/* We need to consider overlapping regions for different devices */
1110	list_for_each_entry(entry, &mappings, list) {
1111		dma_addr_t start, end, addr;
1112		size_t map_size = 0;
1113
1114		if (entry->type == IOMMU_RESV_DIRECT)
1115			dev->iommu->require_direct = 1;
1116
1117		if ((entry->type != IOMMU_RESV_DIRECT &&
1118		     entry->type != IOMMU_RESV_DIRECT_RELAXABLE) ||
1119		    !iommu_is_dma_domain(domain))
1120			continue;
1121
1122		start = ALIGN(entry->start, pg_size);
1123		end   = ALIGN(entry->start + entry->length, pg_size);
1124
1125		for (addr = start; addr <= end; addr += pg_size) {
1126			phys_addr_t phys_addr;
1127
1128			if (addr == end)
1129				goto map_end;
1130
1131			phys_addr = iommu_iova_to_phys(domain, addr);
1132			if (!phys_addr) {
1133				map_size += pg_size;
1134				continue;
1135			}
1136
1137map_end:
1138			if (map_size) {
1139				ret = iommu_map(domain, addr - map_size,
1140						addr - map_size, map_size,
1141						entry->prot, GFP_KERNEL);
1142				if (ret)
1143					goto out;
1144				map_size = 0;
1145			}
1146		}
1147
1148	}
 
 
 
1149out:
1150	iommu_put_resv_regions(dev, &mappings);
1151
1152	return ret;
1153}
1154
1155/* This is undone by __iommu_group_free_device() */
1156static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
1157						     struct device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1158{
1159	int ret, i = 0;
1160	struct group_device *device;
1161
1162	device = kzalloc(sizeof(*device), GFP_KERNEL);
1163	if (!device)
1164		return ERR_PTR(-ENOMEM);
1165
1166	device->dev = dev;
1167
1168	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
1169	if (ret)
1170		goto err_free_device;
1171
1172	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
1173rename:
1174	if (!device->name) {
1175		ret = -ENOMEM;
1176		goto err_remove_link;
1177	}
1178
1179	ret = sysfs_create_link_nowarn(group->devices_kobj,
1180				       &dev->kobj, device->name);
1181	if (ret) {
1182		if (ret == -EEXIST && i >= 0) {
1183			/*
1184			 * Account for the slim chance of collision
1185			 * and append an instance to the name.
1186			 */
1187			kfree(device->name);
1188			device->name = kasprintf(GFP_KERNEL, "%s.%d",
1189						 kobject_name(&dev->kobj), i++);
1190			goto rename;
1191		}
1192		goto err_free_name;
1193	}
1194
 
 
 
 
 
 
 
 
 
 
 
 
1195	trace_add_device_to_group(group->id, dev);
1196
1197	dev_info(dev, "Adding to iommu group %d\n", group->id);
1198
1199	return device;
1200
 
 
 
 
 
 
 
1201err_free_name:
1202	kfree(device->name);
1203err_remove_link:
1204	sysfs_remove_link(&dev->kobj, "iommu_group");
1205err_free_device:
1206	kfree(device);
1207	dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
1208	return ERR_PTR(ret);
1209}
1210
1211/**
1212 * iommu_group_add_device - add a device to an iommu group
1213 * @group: the group into which to add the device (reference should be held)
1214 * @dev: the device
1215 *
1216 * This function is called by an iommu driver to add a device into a
1217 * group.  Adding a device increments the group reference count.
1218 */
1219int iommu_group_add_device(struct iommu_group *group, struct device *dev)
1220{
1221	struct group_device *gdev;
1222
1223	gdev = iommu_group_alloc_device(group, dev);
1224	if (IS_ERR(gdev))
1225		return PTR_ERR(gdev);
1226
1227	iommu_group_ref_get(group);
1228	dev->iommu_group = group;
1229
1230	mutex_lock(&group->mutex);
1231	list_add_tail(&gdev->list, &group->devices);
1232	mutex_unlock(&group->mutex);
1233	return 0;
1234}
1235EXPORT_SYMBOL_GPL(iommu_group_add_device);
1236
1237/**
1238 * iommu_group_remove_device - remove a device from it's current group
1239 * @dev: device to be removed
1240 *
1241 * This function is called by an iommu driver to remove the device from
1242 * it's current group.  This decrements the iommu group reference count.
1243 */
1244void iommu_group_remove_device(struct device *dev)
1245{
1246	struct iommu_group *group = dev->iommu_group;
 
1247
1248	if (!group)
1249		return;
1250
1251	dev_info(dev, "Removing from iommu group %d\n", group->id);
1252
1253	__iommu_group_remove_device(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1254}
1255EXPORT_SYMBOL_GPL(iommu_group_remove_device);
1256
1257#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
1258/**
1259 * iommu_group_mutex_assert - Check device group mutex lock
1260 * @dev: the device that has group param set
1261 *
1262 * This function is called by an iommu driver to check whether it holds
1263 * group mutex lock for the given device or not.
1264 *
1265 * Note that this function must be called after device group param is set.
1266 */
1267void iommu_group_mutex_assert(struct device *dev)
1268{
1269	struct iommu_group *group = dev->iommu_group;
 
 
 
 
1270
1271	lockdep_assert_held(&group->mutex);
1272}
1273EXPORT_SYMBOL_GPL(iommu_group_mutex_assert);
1274#endif
1275
1276static struct device *iommu_group_first_dev(struct iommu_group *group)
 
1277{
1278	lockdep_assert_held(&group->mutex);
1279	return list_first_entry(&group->devices, struct group_device, list)->dev;
 
 
 
 
 
 
 
1280}
1281
1282/**
1283 * iommu_group_for_each_dev - iterate over each device in the group
1284 * @group: the group
1285 * @data: caller opaque data to be passed to callback function
1286 * @fn: caller supplied callback function
1287 *
1288 * This function is called by group users to iterate over group devices.
1289 * Callers should hold a reference count to the group during callback.
1290 * The group->mutex is held across callbacks, which will block calls to
1291 * iommu_group_add/remove_device.
1292 */
1293int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1294			     int (*fn)(struct device *, void *))
1295{
1296	struct group_device *device;
1297	int ret = 0;
1298
1299	mutex_lock(&group->mutex);
1300	for_each_group_device(group, device) {
1301		ret = fn(device->dev, data);
1302		if (ret)
1303			break;
1304	}
1305	mutex_unlock(&group->mutex);
1306
1307	return ret;
1308}
1309EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
1310
1311/**
1312 * iommu_group_get - Return the group for a device and increment reference
1313 * @dev: get the group that this device belongs to
1314 *
1315 * This function is called by iommu drivers and users to get the group
1316 * for the specified device.  If found, the group is returned and the group
1317 * reference in incremented, else NULL.
1318 */
1319struct iommu_group *iommu_group_get(struct device *dev)
1320{
1321	struct iommu_group *group = dev->iommu_group;
1322
1323	if (group)
1324		kobject_get(group->devices_kobj);
1325
1326	return group;
1327}
1328EXPORT_SYMBOL_GPL(iommu_group_get);
1329
1330/**
1331 * iommu_group_ref_get - Increment reference on a group
1332 * @group: the group to use, must not be NULL
1333 *
1334 * This function is called by iommu drivers to take additional references on an
1335 * existing group.  Returns the given group for convenience.
1336 */
1337struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1338{
1339	kobject_get(group->devices_kobj);
1340	return group;
1341}
1342EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1343
1344/**
1345 * iommu_group_put - Decrement group reference
1346 * @group: the group to use
1347 *
1348 * This function is called by iommu drivers and users to release the
1349 * iommu group.  Once the reference count is zero, the group is released.
1350 */
1351void iommu_group_put(struct iommu_group *group)
1352{
1353	if (group)
1354		kobject_put(group->devices_kobj);
1355}
1356EXPORT_SYMBOL_GPL(iommu_group_put);
1357
1358/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1359 * iommu_group_id - Return ID for a group
1360 * @group: the group to ID
1361 *
1362 * Return the unique ID for the group matching the sysfs group number.
1363 */
1364int iommu_group_id(struct iommu_group *group)
1365{
1366	return group->id;
1367}
1368EXPORT_SYMBOL_GPL(iommu_group_id);
1369
1370static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1371					       unsigned long *devfns);
1372
1373/*
1374 * To consider a PCI device isolated, we require ACS to support Source
1375 * Validation, Request Redirection, Completer Redirection, and Upstream
1376 * Forwarding.  This effectively means that devices cannot spoof their
1377 * requester ID, requests and completions cannot be redirected, and all
1378 * transactions are forwarded upstream, even as it passes through a
1379 * bridge where the target device is downstream.
1380 */
1381#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1382
1383/*
1384 * For multifunction devices which are not isolated from each other, find
1385 * all the other non-isolated functions and look for existing groups.  For
1386 * each function, we also need to look for aliases to or from other devices
1387 * that may already have a group.
1388 */
1389static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1390							unsigned long *devfns)
1391{
1392	struct pci_dev *tmp = NULL;
1393	struct iommu_group *group;
1394
1395	if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1396		return NULL;
1397
1398	for_each_pci_dev(tmp) {
1399		if (tmp == pdev || tmp->bus != pdev->bus ||
1400		    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1401		    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1402			continue;
1403
1404		group = get_pci_alias_group(tmp, devfns);
1405		if (group) {
1406			pci_dev_put(tmp);
1407			return group;
1408		}
1409	}
1410
1411	return NULL;
1412}
1413
1414/*
1415 * Look for aliases to or from the given device for existing groups. DMA
1416 * aliases are only supported on the same bus, therefore the search
1417 * space is quite small (especially since we're really only looking at pcie
1418 * device, and therefore only expect multiple slots on the root complex or
1419 * downstream switch ports).  It's conceivable though that a pair of
1420 * multifunction devices could have aliases between them that would cause a
1421 * loop.  To prevent this, we use a bitmap to track where we've been.
1422 */
1423static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1424					       unsigned long *devfns)
1425{
1426	struct pci_dev *tmp = NULL;
1427	struct iommu_group *group;
1428
1429	if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1430		return NULL;
1431
1432	group = iommu_group_get(&pdev->dev);
1433	if (group)
1434		return group;
1435
1436	for_each_pci_dev(tmp) {
1437		if (tmp == pdev || tmp->bus != pdev->bus)
1438			continue;
1439
1440		/* We alias them or they alias us */
1441		if (pci_devs_are_dma_aliases(pdev, tmp)) {
1442			group = get_pci_alias_group(tmp, devfns);
1443			if (group) {
1444				pci_dev_put(tmp);
1445				return group;
1446			}
1447
1448			group = get_pci_function_alias_group(tmp, devfns);
1449			if (group) {
1450				pci_dev_put(tmp);
1451				return group;
1452			}
1453		}
1454	}
1455
1456	return NULL;
1457}
1458
1459struct group_for_pci_data {
1460	struct pci_dev *pdev;
1461	struct iommu_group *group;
1462};
1463
1464/*
1465 * DMA alias iterator callback, return the last seen device.  Stop and return
1466 * the IOMMU group if we find one along the way.
1467 */
1468static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1469{
1470	struct group_for_pci_data *data = opaque;
1471
1472	data->pdev = pdev;
1473	data->group = iommu_group_get(&pdev->dev);
1474
1475	return data->group != NULL;
1476}
1477
1478/*
1479 * Generic device_group call-back function. It just allocates one
1480 * iommu-group per device.
1481 */
1482struct iommu_group *generic_device_group(struct device *dev)
1483{
1484	return iommu_group_alloc();
1485}
1486EXPORT_SYMBOL_GPL(generic_device_group);
1487
1488/*
1489 * Generic device_group call-back function. It just allocates one
1490 * iommu-group per iommu driver instance shared by every device
1491 * probed by that iommu driver.
1492 */
1493struct iommu_group *generic_single_device_group(struct device *dev)
1494{
1495	struct iommu_device *iommu = dev->iommu->iommu_dev;
1496
1497	if (!iommu->singleton_group) {
1498		struct iommu_group *group;
1499
1500		group = iommu_group_alloc();
1501		if (IS_ERR(group))
1502			return group;
1503		iommu->singleton_group = group;
1504	}
1505	return iommu_group_ref_get(iommu->singleton_group);
1506}
1507EXPORT_SYMBOL_GPL(generic_single_device_group);
1508
1509/*
1510 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1511 * to find or create an IOMMU group for a device.
1512 */
1513struct iommu_group *pci_device_group(struct device *dev)
1514{
1515	struct pci_dev *pdev = to_pci_dev(dev);
1516	struct group_for_pci_data data;
1517	struct pci_bus *bus;
1518	struct iommu_group *group = NULL;
1519	u64 devfns[4] = { 0 };
1520
1521	if (WARN_ON(!dev_is_pci(dev)))
1522		return ERR_PTR(-EINVAL);
1523
1524	/*
1525	 * Find the upstream DMA alias for the device.  A device must not
1526	 * be aliased due to topology in order to have its own IOMMU group.
1527	 * If we find an alias along the way that already belongs to a
1528	 * group, use it.
1529	 */
1530	if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1531		return data.group;
1532
1533	pdev = data.pdev;
1534
1535	/*
1536	 * Continue upstream from the point of minimum IOMMU granularity
1537	 * due to aliases to the point where devices are protected from
1538	 * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1539	 * group, use it.
1540	 */
1541	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1542		if (!bus->self)
1543			continue;
1544
1545		if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1546			break;
1547
1548		pdev = bus->self;
1549
1550		group = iommu_group_get(&pdev->dev);
1551		if (group)
1552			return group;
1553	}
1554
1555	/*
1556	 * Look for existing groups on device aliases.  If we alias another
1557	 * device or another device aliases us, use the same group.
1558	 */
1559	group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1560	if (group)
1561		return group;
1562
1563	/*
1564	 * Look for existing groups on non-isolated functions on the same
1565	 * slot and aliases of those funcions, if any.  No need to clear
1566	 * the search bitmap, the tested devfns are still valid.
1567	 */
1568	group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1569	if (group)
1570		return group;
1571
1572	/* No shared group found, allocate new */
1573	return iommu_group_alloc();
1574}
1575EXPORT_SYMBOL_GPL(pci_device_group);
1576
1577/* Get the IOMMU group for device on fsl-mc bus */
1578struct iommu_group *fsl_mc_device_group(struct device *dev)
1579{
1580	struct device *cont_dev = fsl_mc_cont_dev(dev);
1581	struct iommu_group *group;
1582
1583	group = iommu_group_get(cont_dev);
1584	if (!group)
1585		group = iommu_group_alloc();
1586	return group;
1587}
1588EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1589
1590static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev)
1591{
1592	const struct iommu_ops *ops = dev_iommu_ops(dev);
1593	struct iommu_domain *domain;
1594
1595	if (ops->identity_domain)
1596		return ops->identity_domain;
1597
1598	/* Older drivers create the identity domain via ops->domain_alloc() */
1599	if (!ops->domain_alloc)
1600		return ERR_PTR(-EOPNOTSUPP);
1601
1602	domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY);
1603	if (IS_ERR(domain))
1604		return domain;
1605	if (!domain)
1606		return ERR_PTR(-ENOMEM);
1607
1608	iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops);
1609	return domain;
1610}
1611
1612static struct iommu_domain *
1613__iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
 
1614{
1615	struct device *dev = iommu_group_first_dev(group);
1616	struct iommu_domain *dom;
1617
1618	if (group->default_domain && group->default_domain->type == req_type)
1619		return group->default_domain;
 
 
 
 
 
 
 
 
1620
1621	/*
1622	 * When allocating the DMA API domain assume that the driver is going to
1623	 * use PASID and make sure the RID's domain is PASID compatible.
1624	 */
1625	if (req_type & __IOMMU_DOMAIN_PAGING) {
1626		dom = __iommu_paging_domain_alloc_flags(dev, req_type,
1627			   dev->iommu->max_pasids ? IOMMU_HWPT_ALLOC_PASID : 0);
1628
1629		/*
1630		 * If driver does not support PASID feature then
1631		 * try to allocate non-PASID domain
1632		 */
1633		if (PTR_ERR(dom) == -EOPNOTSUPP)
1634			dom = __iommu_paging_domain_alloc_flags(dev, req_type, 0);
1635
1636		return dom;
1637	}
1638
1639	if (req_type == IOMMU_DOMAIN_IDENTITY)
1640		return __iommu_alloc_identity_domain(dev);
1641
1642	return ERR_PTR(-EINVAL);
1643}
1644
1645/*
1646 * req_type of 0 means "auto" which means to select a domain based on
1647 * iommu_def_domain_type or what the driver actually supports.
 
 
 
 
 
 
1648 */
1649static struct iommu_domain *
1650iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
1651{
1652	const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group));
1653	struct iommu_domain *dom;
 
 
 
 
 
 
 
 
 
1654
1655	lockdep_assert_held(&group->mutex);
 
1656
1657	/*
1658	 * Allow legacy drivers to specify the domain that will be the default
1659	 * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM
1660	 * domain. Do not use in new drivers.
1661	 */
1662	if (ops->default_domain) {
1663		if (req_type != ops->default_domain->type)
1664			return ERR_PTR(-EINVAL);
1665		return ops->default_domain;
1666	}
1667
1668	if (req_type)
1669		return __iommu_group_alloc_default_domain(group, req_type);
1670
1671	/* The driver gave no guidance on what type to use, try the default */
1672	dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type);
1673	if (!IS_ERR(dom))
1674		return dom;
1675
1676	/* Otherwise IDENTITY and DMA_FQ defaults will try DMA */
1677	if (iommu_def_domain_type == IOMMU_DOMAIN_DMA)
1678		return ERR_PTR(-EINVAL);
1679	dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA);
1680	if (IS_ERR(dom))
1681		return dom;
1682
1683	pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1684		iommu_def_domain_type, group->name);
1685	return dom;
1686}
1687
1688struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1689{
1690	return group->default_domain;
1691}
1692
1693static int probe_iommu_group(struct device *dev, void *data)
1694{
1695	struct list_head *group_list = data;
 
1696	int ret;
1697
1698	mutex_lock(&iommu_probe_device_lock);
 
 
 
 
 
 
1699	ret = __iommu_probe_device(dev, group_list);
1700	mutex_unlock(&iommu_probe_device_lock);
1701	if (ret == -ENODEV)
1702		ret = 0;
1703
1704	return ret;
1705}
1706
1707static int iommu_bus_notifier(struct notifier_block *nb,
1708			      unsigned long action, void *data)
1709{
1710	struct device *dev = data;
1711
1712	if (action == BUS_NOTIFY_ADD_DEVICE) {
1713		int ret;
1714
1715		ret = iommu_probe_device(dev);
1716		return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1717	} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1718		iommu_release_device(dev);
1719		return NOTIFY_OK;
1720	}
1721
1722	return 0;
1723}
1724
1725/*
1726 * Combine the driver's chosen def_domain_type across all the devices in a
1727 * group. Drivers must give a consistent result.
1728 */
1729static int iommu_get_def_domain_type(struct iommu_group *group,
1730				     struct device *dev, int cur_type)
1731{
1732	const struct iommu_ops *ops = dev_iommu_ops(dev);
1733	int type;
 
 
 
 
 
 
 
 
 
1734
1735	if (ops->default_domain) {
1736		/*
1737		 * Drivers that declare a global static default_domain will
1738		 * always choose that.
1739		 */
1740		type = ops->default_domain->type;
1741	} else {
1742		if (ops->def_domain_type)
1743			type = ops->def_domain_type(dev);
1744		else
1745			return cur_type;
1746	}
1747	if (!type || cur_type == type)
1748		return cur_type;
1749	if (!cur_type)
1750		return type;
1751
1752	dev_err_ratelimited(
1753		dev,
1754		"IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n",
1755		iommu_domain_type_str(cur_type), iommu_domain_type_str(type),
1756		group->id);
1757
1758	/*
1759	 * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY
1760	 * takes precedence.
1761	 */
1762	if (type == IOMMU_DOMAIN_IDENTITY)
1763		return type;
1764	return cur_type;
1765}
1766
1767/*
1768 * A target_type of 0 will select the best domain type. 0 can be returned in
1769 * this case meaning the global default should be used.
1770 */
1771static int iommu_get_default_domain_type(struct iommu_group *group,
1772					 int target_type)
1773{
1774	struct device *untrusted = NULL;
1775	struct group_device *gdev;
1776	int driver_type = 0;
 
 
 
 
1777
1778	lockdep_assert_held(&group->mutex);
 
1779
1780	/*
1781	 * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an
1782	 * identity_domain and it will automatically become their default
1783	 * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain.
1784	 * Override the selection to IDENTITY.
1785	 */
1786	if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
1787		static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) &&
1788				IS_ENABLED(CONFIG_IOMMU_DMA)));
1789		driver_type = IOMMU_DOMAIN_IDENTITY;
1790	}
1791
1792	for_each_group_device(group, gdev) {
1793		driver_type = iommu_get_def_domain_type(group, gdev->dev,
1794							driver_type);
 
1795
1796		if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) {
1797			/*
1798			 * No ARM32 using systems will set untrusted, it cannot
1799			 * work.
1800			 */
1801			if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)))
1802				return -1;
1803			untrusted = gdev->dev;
1804		}
1805	}
1806
1807	/*
1808	 * If the common dma ops are not selected in kconfig then we cannot use
1809	 * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been
1810	 * selected.
1811	 */
1812	if (!IS_ENABLED(CONFIG_IOMMU_DMA)) {
1813		if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA))
1814			return -1;
1815		if (!driver_type)
1816			driver_type = IOMMU_DOMAIN_IDENTITY;
1817	}
1818
1819	if (untrusted) {
1820		if (driver_type && driver_type != IOMMU_DOMAIN_DMA) {
1821			dev_err_ratelimited(
1822				untrusted,
1823				"Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n",
1824				group->id, iommu_domain_type_str(driver_type));
1825			return -1;
1826		}
1827		driver_type = IOMMU_DOMAIN_DMA;
1828	}
1829
1830	if (target_type) {
1831		if (driver_type && target_type != driver_type)
1832			return -1;
1833		return target_type;
1834	}
1835	return driver_type;
1836}
1837
1838static void iommu_group_do_probe_finalize(struct device *dev)
1839{
1840	const struct iommu_ops *ops = dev_iommu_ops(dev);
1841
1842	if (ops->probe_finalize)
1843		ops->probe_finalize(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1844}
1845
1846static int bus_iommu_probe(const struct bus_type *bus)
1847{
1848	struct iommu_group *group, *next;
1849	LIST_HEAD(group_list);
1850	int ret;
1851
 
 
 
 
 
1852	ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1853	if (ret)
1854		return ret;
1855
1856	list_for_each_entry_safe(group, next, &group_list, entry) {
1857		struct group_device *gdev;
1858
1859		mutex_lock(&group->mutex);
1860
1861		/* Remove item from the list */
1862		list_del_init(&group->entry);
1863
1864		/*
1865		 * We go to the trouble of deferred default domain creation so
1866		 * that the cross-group default domain type and the setup of the
1867		 * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios.
1868		 */
1869		ret = iommu_setup_default_domain(group, 0);
1870		if (ret) {
1871			mutex_unlock(&group->mutex);
1872			return ret;
1873		}
1874		for_each_group_device(group, gdev)
1875			iommu_setup_dma_ops(gdev->dev);
 
 
 
1876		mutex_unlock(&group->mutex);
1877
1878		/*
1879		 * FIXME: Mis-locked because the ops->probe_finalize() call-back
1880		 * of some IOMMU drivers calls arm_iommu_attach_device() which
1881		 * in-turn might call back into IOMMU core code, where it tries
1882		 * to take group->mutex, resulting in a deadlock.
1883		 */
1884		for_each_group_device(group, gdev)
1885			iommu_group_do_probe_finalize(gdev->dev);
1886	}
1887
1888	return 0;
 
 
 
 
 
1889}
 
1890
1891/**
1892 * device_iommu_capable() - check for a general IOMMU capability
1893 * @dev: device to which the capability would be relevant, if available
1894 * @cap: IOMMU capability
1895 *
1896 * Return: true if an IOMMU is present and supports the given capability
1897 * for the given device, otherwise false.
1898 */
1899bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1900{
1901	const struct iommu_ops *ops;
1902
1903	if (!dev_has_iommu(dev))
1904		return false;
1905
1906	ops = dev_iommu_ops(dev);
1907	if (!ops->capable)
1908		return false;
1909
1910	return ops->capable(dev, cap);
1911}
1912EXPORT_SYMBOL_GPL(device_iommu_capable);
1913
1914/**
1915 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi()
1916 *       for a group
1917 * @group: Group to query
1918 *
1919 * IOMMU groups should not have differing values of
1920 * msi_device_has_isolated_msi() for devices in a group. However nothing
1921 * directly prevents this, so ensure mistakes don't result in isolation failures
1922 * by checking that all the devices are the same.
1923 */
1924bool iommu_group_has_isolated_msi(struct iommu_group *group)
1925{
1926	struct group_device *group_dev;
1927	bool ret = true;
1928
1929	mutex_lock(&group->mutex);
1930	for_each_group_device(group, group_dev)
1931		ret &= msi_device_has_isolated_msi(group_dev->dev);
1932	mutex_unlock(&group->mutex);
1933	return ret;
1934}
1935EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi);
1936
1937/**
1938 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1939 * @domain: iommu domain
1940 * @handler: fault handler
1941 * @token: user data, will be passed back to the fault handler
1942 *
1943 * This function should be used by IOMMU users which want to be notified
1944 * whenever an IOMMU fault happens.
1945 *
1946 * The fault handler itself should return 0 on success, and an appropriate
1947 * error code otherwise.
1948 */
1949void iommu_set_fault_handler(struct iommu_domain *domain,
1950					iommu_fault_handler_t handler,
1951					void *token)
1952{
1953	BUG_ON(!domain);
1954
1955	domain->handler = handler;
1956	domain->handler_token = token;
1957}
1958EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1959
1960static void iommu_domain_init(struct iommu_domain *domain, unsigned int type,
1961			      const struct iommu_ops *ops)
1962{
1963	domain->type = type;
1964	domain->owner = ops;
1965	if (!domain->ops)
1966		domain->ops = ops->default_domain_ops;
1967
1968	/*
1969	 * If not already set, assume all sizes by default; the driver
1970	 * may override this later
1971	 */
1972	if (!domain->pgsize_bitmap)
1973		domain->pgsize_bitmap = ops->pgsize_bitmap;
1974}
1975
1976static struct iommu_domain *
1977__iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type,
1978				  unsigned int flags)
1979{
1980	const struct iommu_ops *ops;
1981	struct iommu_domain *domain;
1982
1983	if (!dev_has_iommu(dev))
1984		return ERR_PTR(-ENODEV);
1985
1986	ops = dev_iommu_ops(dev);
 
 
1987
1988	if (ops->domain_alloc_paging && !flags)
1989		domain = ops->domain_alloc_paging(dev);
1990	else if (ops->domain_alloc_paging_flags)
1991		domain = ops->domain_alloc_paging_flags(dev, flags, NULL);
1992	else if (ops->domain_alloc && !flags)
1993		domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
1994	else
1995		return ERR_PTR(-EOPNOTSUPP);
1996
1997	if (IS_ERR(domain))
1998		return domain;
1999	if (!domain)
2000		return ERR_PTR(-ENOMEM);
2001
2002	iommu_domain_init(domain, type, ops);
2003	return domain;
2004}
2005
2006/**
2007 * iommu_paging_domain_alloc_flags() - Allocate a paging domain
2008 * @dev: device for which the domain is allocated
2009 * @flags: Bitmap of iommufd_hwpt_alloc_flags
2010 *
2011 * Allocate a paging domain which will be managed by a kernel driver. Return
2012 * allocated domain if successful, or an ERR pointer for failure.
2013 */
2014struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
2015						     unsigned int flags)
2016{
2017	return __iommu_paging_domain_alloc_flags(dev,
2018					 IOMMU_DOMAIN_UNMANAGED, flags);
2019}
2020EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags);
2021
2022void iommu_domain_free(struct iommu_domain *domain)
2023{
2024	if (domain->type == IOMMU_DOMAIN_SVA)
2025		mmdrop(domain->mm);
2026	iommu_put_dma_cookie(domain);
2027	if (domain->ops->free)
2028		domain->ops->free(domain);
2029}
2030EXPORT_SYMBOL_GPL(iommu_domain_free);
2031
2032/*
2033 * Put the group's domain back to the appropriate core-owned domain - either the
2034 * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
2035 */
2036static void __iommu_group_set_core_domain(struct iommu_group *group)
2037{
2038	struct iommu_domain *new_domain;
 
2039
2040	if (group->owner)
2041		new_domain = group->blocking_domain;
2042	else
2043		new_domain = group->default_domain;
2044
2045	__iommu_group_set_domain_nofail(group, new_domain);
 
2046}
2047
2048static int __iommu_attach_device(struct iommu_domain *domain,
2049				 struct device *dev)
2050{
2051	int ret;
2052
2053	if (unlikely(domain->ops->attach_dev == NULL))
2054		return -ENODEV;
2055
2056	ret = domain->ops->attach_dev(domain, dev);
2057	if (ret)
2058		return ret;
2059	dev->iommu->attach_deferred = 0;
2060	trace_attach_device_to_domain(dev);
2061	return 0;
2062}
2063
2064/**
2065 * iommu_attach_device - Attach an IOMMU domain to a device
2066 * @domain: IOMMU domain to attach
2067 * @dev: Device that will be attached
2068 *
2069 * Returns 0 on success and error code on failure
2070 *
2071 * Note that EINVAL can be treated as a soft failure, indicating
2072 * that certain configuration of the domain is incompatible with
2073 * the device. In this case attaching a different domain to the
2074 * device may succeed.
2075 */
2076int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
2077{
2078	/* Caller must be a probed driver on dev */
2079	struct iommu_group *group = dev->iommu_group;
2080	int ret;
2081
 
2082	if (!group)
2083		return -ENODEV;
2084
2085	/*
2086	 * Lock the group to make sure the device-count doesn't
2087	 * change while we are attaching
2088	 */
2089	mutex_lock(&group->mutex);
2090	ret = -EINVAL;
2091	if (list_count_nodes(&group->devices) != 1)
2092		goto out_unlock;
2093
2094	ret = __iommu_attach_group(domain, group);
2095
2096out_unlock:
2097	mutex_unlock(&group->mutex);
 
 
2098	return ret;
2099}
2100EXPORT_SYMBOL_GPL(iommu_attach_device);
2101
2102int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
2103{
2104	if (dev->iommu && dev->iommu->attach_deferred)
2105		return __iommu_attach_device(domain, dev);
2106
2107	return 0;
2108}
2109
 
 
 
 
 
 
 
 
 
 
2110void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2111{
2112	/* Caller must be a probed driver on dev */
2113	struct iommu_group *group = dev->iommu_group;
2114
 
2115	if (!group)
2116		return;
2117
2118	mutex_lock(&group->mutex);
2119	if (WARN_ON(domain != group->domain) ||
2120	    WARN_ON(list_count_nodes(&group->devices) != 1))
2121		goto out_unlock;
2122	__iommu_group_set_core_domain(group);
2123
2124out_unlock:
2125	mutex_unlock(&group->mutex);
 
2126}
2127EXPORT_SYMBOL_GPL(iommu_detach_device);
2128
2129struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2130{
2131	/* Caller must be a probed driver on dev */
2132	struct iommu_group *group = dev->iommu_group;
2133
 
2134	if (!group)
2135		return NULL;
2136
2137	return group->domain;
 
 
 
 
2138}
2139EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2140
2141/*
2142 * For IOMMU_DOMAIN_DMA implementations which already provide their own
2143 * guarantees that the group and its default domain are valid and correct.
2144 */
2145struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2146{
2147	return dev->iommu_group->default_domain;
2148}
2149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2150static int __iommu_attach_group(struct iommu_domain *domain,
2151				struct iommu_group *group)
2152{
2153	struct device *dev;
2154
2155	if (group->domain && group->domain != group->default_domain &&
2156	    group->domain != group->blocking_domain)
2157		return -EBUSY;
2158
2159	dev = iommu_group_first_dev(group);
2160	if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
2161		return -EINVAL;
 
2162
2163	return __iommu_group_set_domain(group, domain);
2164}
2165
2166/**
2167 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
2168 * @domain: IOMMU domain to attach
2169 * @group: IOMMU group that will be attached
2170 *
2171 * Returns 0 on success and error code on failure
2172 *
2173 * Note that EINVAL can be treated as a soft failure, indicating
2174 * that certain configuration of the domain is incompatible with
2175 * the group. In this case attaching a different domain to the
2176 * group may succeed.
2177 */
2178int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2179{
2180	int ret;
2181
2182	mutex_lock(&group->mutex);
2183	ret = __iommu_attach_group(domain, group);
2184	mutex_unlock(&group->mutex);
2185
2186	return ret;
2187}
2188EXPORT_SYMBOL_GPL(iommu_attach_group);
2189
2190/**
2191 * iommu_group_replace_domain - replace the domain that a group is attached to
2192 * @group: IOMMU group that will be attached to the new domain
2193 * @new_domain: new IOMMU domain to replace with
2194 *
2195 * This API allows the group to switch domains without being forced to go to
2196 * the blocking domain in-between.
2197 *
2198 * If the currently attached domain is a core domain (e.g. a default_domain),
2199 * it will act just like the iommu_attach_group().
2200 */
2201int iommu_group_replace_domain(struct iommu_group *group,
2202			       struct iommu_domain *new_domain)
2203{
2204	int ret;
2205
2206	if (!new_domain)
2207		return -EINVAL;
2208
2209	mutex_lock(&group->mutex);
2210	ret = __iommu_group_set_domain(group, new_domain);
2211	mutex_unlock(&group->mutex);
2212	return ret;
2213}
2214EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, "IOMMUFD_INTERNAL");
2215
2216static int __iommu_device_set_domain(struct iommu_group *group,
2217				     struct device *dev,
2218				     struct iommu_domain *new_domain,
2219				     unsigned int flags)
2220{
2221	int ret;
2222
2223	/*
2224	 * If the device requires IOMMU_RESV_DIRECT then we cannot allow
2225	 * the blocking domain to be attached as it does not contain the
2226	 * required 1:1 mapping. This test effectively excludes the device
2227	 * being used with iommu_group_claim_dma_owner() which will block
2228	 * vfio and iommufd as well.
2229	 */
2230	if (dev->iommu->require_direct &&
2231	    (new_domain->type == IOMMU_DOMAIN_BLOCKED ||
2232	     new_domain == group->blocking_domain)) {
2233		dev_warn(dev,
2234			 "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n");
2235		return -EINVAL;
2236	}
2237
2238	if (dev->iommu->attach_deferred) {
2239		if (new_domain == group->default_domain)
2240			return 0;
2241		dev->iommu->attach_deferred = 0;
2242	}
2243
2244	ret = __iommu_attach_device(new_domain, dev);
2245	if (ret) {
2246		/*
2247		 * If we have a blocking domain then try to attach that in hopes
2248		 * of avoiding a UAF. Modern drivers should implement blocking
2249		 * domains as global statics that cannot fail.
2250		 */
2251		if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) &&
2252		    group->blocking_domain &&
2253		    group->blocking_domain != new_domain)
2254			__iommu_attach_device(group->blocking_domain, dev);
2255		return ret;
2256	}
2257	return 0;
2258}
2259
2260/*
2261 * If 0 is returned the group's domain is new_domain. If an error is returned
2262 * then the group's domain will be set back to the existing domain unless
2263 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's
2264 * domains is left inconsistent. This is a driver bug to fail attach with a
2265 * previously good domain. We try to avoid a kernel UAF because of this.
2266 *
2267 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU
2268 * API works on domains and devices.  Bridge that gap by iterating over the
2269 * devices in a group.  Ideally we'd have a single device which represents the
2270 * requestor ID of the group, but we also allow IOMMU drivers to create policy
2271 * defined minimum sets, where the physical hardware may be able to distiguish
2272 * members, but we wish to group them at a higher level (ex. untrusted
2273 * multi-function PCI devices).  Thus we attach each device.
2274 */
2275static int __iommu_group_set_domain_internal(struct iommu_group *group,
2276					     struct iommu_domain *new_domain,
2277					     unsigned int flags)
2278{
2279	struct group_device *last_gdev;
2280	struct group_device *gdev;
2281	int result;
2282	int ret;
2283
2284	lockdep_assert_held(&group->mutex);
2285
2286	if (group->domain == new_domain)
2287		return 0;
2288
2289	if (WARN_ON(!new_domain))
2290		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
2291
2292	/*
2293	 * Changing the domain is done by calling attach_dev() on the new
2294	 * domain. This switch does not have to be atomic and DMA can be
2295	 * discarded during the transition. DMA must only be able to access
2296	 * either new_domain or group->domain, never something else.
 
 
 
2297	 */
2298	result = 0;
2299	for_each_group_device(group, gdev) {
2300		ret = __iommu_device_set_domain(group, gdev->dev, new_domain,
2301						flags);
2302		if (ret) {
2303			result = ret;
2304			/*
2305			 * Keep trying the other devices in the group. If a
2306			 * driver fails attach to an otherwise good domain, and
2307			 * does not support blocking domains, it should at least
2308			 * drop its reference on the current domain so we don't
2309			 * UAF.
2310			 */
2311			if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED)
2312				continue;
2313			goto err_revert;
2314		}
2315	}
2316	group->domain = new_domain;
2317	return result;
2318
2319err_revert:
2320	/*
2321	 * This is called in error unwind paths. A well behaved driver should
2322	 * always allow us to attach to a domain that was already attached.
2323	 */
2324	last_gdev = gdev;
2325	for_each_group_device(group, gdev) {
2326		/*
2327		 * A NULL domain can happen only for first probe, in which case
2328		 * we leave group->domain as NULL and let release clean
2329		 * everything up.
2330		 */
2331		if (group->domain)
2332			WARN_ON(__iommu_device_set_domain(
2333				group, gdev->dev, group->domain,
2334				IOMMU_SET_DOMAIN_MUST_SUCCEED));
2335		if (gdev == last_gdev)
2336			break;
2337	}
2338	return ret;
2339}
2340
2341void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2342{
2343	mutex_lock(&group->mutex);
2344	__iommu_group_set_core_domain(group);
2345	mutex_unlock(&group->mutex);
2346}
2347EXPORT_SYMBOL_GPL(iommu_detach_group);
2348
2349phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2350{
2351	if (domain->type == IOMMU_DOMAIN_IDENTITY)
2352		return iova;
2353
2354	if (domain->type == IOMMU_DOMAIN_BLOCKED)
2355		return 0;
2356
2357	return domain->ops->iova_to_phys(domain, iova);
2358}
2359EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2360
2361static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2362			   phys_addr_t paddr, size_t size, size_t *count)
2363{
2364	unsigned int pgsize_idx, pgsize_idx_next;
2365	unsigned long pgsizes;
2366	size_t offset, pgsize, pgsize_next;
2367	unsigned long addr_merge = paddr | iova;
2368
2369	/* Page sizes supported by the hardware and small enough for @size */
2370	pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
2371
2372	/* Constrain the page sizes further based on the maximum alignment */
2373	if (likely(addr_merge))
2374		pgsizes &= GENMASK(__ffs(addr_merge), 0);
2375
2376	/* Make sure we have at least one suitable page size */
2377	BUG_ON(!pgsizes);
2378
2379	/* Pick the biggest page size remaining */
2380	pgsize_idx = __fls(pgsizes);
2381	pgsize = BIT(pgsize_idx);
2382	if (!count)
2383		return pgsize;
2384
2385	/* Find the next biggest support page size, if it exists */
2386	pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
2387	if (!pgsizes)
2388		goto out_set_count;
2389
2390	pgsize_idx_next = __ffs(pgsizes);
2391	pgsize_next = BIT(pgsize_idx_next);
2392
2393	/*
2394	 * There's no point trying a bigger page size unless the virtual
2395	 * and physical addresses are similarly offset within the larger page.
2396	 */
2397	if ((iova ^ paddr) & (pgsize_next - 1))
2398		goto out_set_count;
2399
2400	/* Calculate the offset to the next page size alignment boundary */
2401	offset = pgsize_next - (addr_merge & (pgsize_next - 1));
2402
2403	/*
2404	 * If size is big enough to accommodate the larger page, reduce
2405	 * the number of smaller pages.
2406	 */
2407	if (offset + pgsize_next <= size)
2408		size = offset;
2409
2410out_set_count:
2411	*count = size >> pgsize_idx;
2412	return pgsize;
2413}
2414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2415static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2416		       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2417{
2418	const struct iommu_domain_ops *ops = domain->ops;
2419	unsigned long orig_iova = iova;
2420	unsigned int min_pagesz;
2421	size_t orig_size = size;
2422	phys_addr_t orig_paddr = paddr;
2423	int ret = 0;
2424
 
 
 
 
2425	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2426		return -EINVAL;
2427
2428	if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL))
2429		return -ENODEV;
2430
2431	/* find out the minimum page size supported */
2432	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2433
2434	/*
2435	 * both the virtual address and the physical one, as well as
2436	 * the size of the mapping, must be aligned (at least) to the
2437	 * size of the smallest page supported by the hardware
2438	 */
2439	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2440		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2441		       iova, &paddr, size, min_pagesz);
2442		return -EINVAL;
2443	}
2444
2445	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2446
2447	while (size) {
2448		size_t pgsize, count, mapped = 0;
2449
2450		pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
2451
2452		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
2453			 iova, &paddr, pgsize, count);
2454		ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
2455				     gfp, &mapped);
2456		/*
2457		 * Some pages may have been mapped, even if an error occurred,
2458		 * so we should account for those so they can be unmapped.
2459		 */
2460		size -= mapped;
2461
2462		if (ret)
2463			break;
2464
2465		iova += mapped;
2466		paddr += mapped;
2467	}
2468
2469	/* unroll mapping in case something went wrong */
2470	if (ret)
2471		iommu_unmap(domain, orig_iova, orig_size - size);
2472	else
2473		trace_map(orig_iova, orig_paddr, orig_size);
2474
2475	return ret;
2476}
2477
2478int iommu_map(struct iommu_domain *domain, unsigned long iova,
2479	      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2480{
2481	const struct iommu_domain_ops *ops = domain->ops;
2482	int ret;
2483
2484	might_sleep_if(gfpflags_allow_blocking(gfp));
 
 
2485
2486	/* Discourage passing strange GFP flags */
2487	if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
2488				__GFP_HIGHMEM)))
2489		return -EINVAL;
2490
2491	ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2492	if (ret == 0 && ops->iotlb_sync_map) {
2493		ret = ops->iotlb_sync_map(domain, iova, size);
2494		if (ret)
2495			goto out_err;
2496	}
 
2497
2498	return ret;
 
 
 
 
 
2499
2500out_err:
2501	/* undo mappings already done */
2502	iommu_unmap(domain, iova, size);
 
 
 
2503
2504	return ret;
 
 
 
2505}
2506EXPORT_SYMBOL_GPL(iommu_map);
2507
2508static size_t __iommu_unmap(struct iommu_domain *domain,
2509			    unsigned long iova, size_t size,
2510			    struct iommu_iotlb_gather *iotlb_gather)
2511{
2512	const struct iommu_domain_ops *ops = domain->ops;
2513	size_t unmapped_page, unmapped = 0;
2514	unsigned long orig_iova = iova;
2515	unsigned int min_pagesz;
2516
2517	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
 
2518		return 0;
2519
2520	if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL))
2521		return 0;
2522
2523	/* find out the minimum page size supported */
2524	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2525
2526	/*
2527	 * The virtual address, as well as the size of the mapping, must be
2528	 * aligned (at least) to the size of the smallest page supported
2529	 * by the hardware
2530	 */
2531	if (!IS_ALIGNED(iova | size, min_pagesz)) {
2532		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2533		       iova, size, min_pagesz);
2534		return 0;
2535	}
2536
2537	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2538
2539	/*
2540	 * Keep iterating until we either unmap 'size' bytes (or more)
2541	 * or we hit an area that isn't mapped.
2542	 */
2543	while (unmapped < size) {
2544		size_t pgsize, count;
2545
2546		pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count);
2547		unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather);
2548		if (!unmapped_page)
2549			break;
2550
2551		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2552			 iova, unmapped_page);
2553
2554		iova += unmapped_page;
2555		unmapped += unmapped_page;
2556	}
2557
2558	trace_unmap(orig_iova, size, unmapped);
2559	return unmapped;
2560}
2561
2562/**
2563 * iommu_unmap() - Remove mappings from a range of IOVA
2564 * @domain: Domain to manipulate
2565 * @iova: IO virtual address to start
2566 * @size: Length of the range starting from @iova
2567 *
2568 * iommu_unmap() will remove a translation created by iommu_map(). It cannot
2569 * subdivide a mapping created by iommu_map(), so it should be called with IOVA
2570 * ranges that match what was passed to iommu_map(). The range can aggregate
2571 * contiguous iommu_map() calls so long as no individual range is split.
2572 *
2573 * Returns: Number of bytes of IOVA unmapped. iova + res will be the point
2574 * unmapping stopped.
2575 */
2576size_t iommu_unmap(struct iommu_domain *domain,
2577		   unsigned long iova, size_t size)
2578{
2579	struct iommu_iotlb_gather iotlb_gather;
2580	size_t ret;
2581
2582	iommu_iotlb_gather_init(&iotlb_gather);
2583	ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2584	iommu_iotlb_sync(domain, &iotlb_gather);
2585
2586	return ret;
2587}
2588EXPORT_SYMBOL_GPL(iommu_unmap);
2589
2590size_t iommu_unmap_fast(struct iommu_domain *domain,
2591			unsigned long iova, size_t size,
2592			struct iommu_iotlb_gather *iotlb_gather)
2593{
2594	return __iommu_unmap(domain, iova, size, iotlb_gather);
2595}
2596EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2597
2598ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2599		     struct scatterlist *sg, unsigned int nents, int prot,
2600		     gfp_t gfp)
2601{
2602	const struct iommu_domain_ops *ops = domain->ops;
2603	size_t len = 0, mapped = 0;
2604	phys_addr_t start;
2605	unsigned int i = 0;
2606	int ret;
2607
2608	might_sleep_if(gfpflags_allow_blocking(gfp));
2609
2610	/* Discourage passing strange GFP flags */
2611	if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
2612				__GFP_HIGHMEM)))
2613		return -EINVAL;
2614
2615	while (i <= nents) {
2616		phys_addr_t s_phys = sg_phys(sg);
2617
2618		if (len && s_phys != start + len) {
2619			ret = __iommu_map(domain, iova + mapped, start,
2620					len, prot, gfp);
2621
2622			if (ret)
2623				goto out_err;
2624
2625			mapped += len;
2626			len = 0;
2627		}
2628
2629		if (sg_dma_is_bus_address(sg))
2630			goto next;
2631
2632		if (len) {
2633			len += sg->length;
2634		} else {
2635			len = sg->length;
2636			start = s_phys;
2637		}
2638
2639next:
2640		if (++i < nents)
2641			sg = sg_next(sg);
2642	}
2643
2644	if (ops->iotlb_sync_map) {
2645		ret = ops->iotlb_sync_map(domain, iova, mapped);
2646		if (ret)
2647			goto out_err;
2648	}
2649	return mapped;
2650
2651out_err:
2652	/* undo mappings already done */
2653	iommu_unmap(domain, iova, mapped);
2654
2655	return ret;
2656}
 
 
 
 
 
 
 
2657EXPORT_SYMBOL_GPL(iommu_map_sg);
2658
 
 
 
 
 
 
2659/**
2660 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2661 * @domain: the iommu domain where the fault has happened
2662 * @dev: the device where the fault has happened
2663 * @iova: the faulting address
2664 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2665 *
2666 * This function should be called by the low-level IOMMU implementations
2667 * whenever IOMMU faults happen, to allow high-level users, that are
2668 * interested in such events, to know about them.
2669 *
2670 * This event may be useful for several possible use cases:
2671 * - mere logging of the event
2672 * - dynamic TLB/PTE loading
2673 * - if restarting of the faulting device is required
2674 *
2675 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2676 * PTE/TLB loading will one day be supported, implementations will be able
2677 * to tell whether it succeeded or not according to this return value).
2678 *
2679 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2680 * (though fault handlers can also return -ENOSYS, in case they want to
2681 * elicit the default behavior of the IOMMU drivers).
2682 */
2683int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2684		       unsigned long iova, int flags)
2685{
2686	int ret = -ENOSYS;
2687
2688	/*
2689	 * if upper layers showed interest and installed a fault handler,
2690	 * invoke it.
2691	 */
2692	if (domain->handler)
2693		ret = domain->handler(domain, dev, iova, flags,
2694						domain->handler_token);
2695
2696	trace_io_page_fault(dev, iova, flags);
2697	return ret;
2698}
2699EXPORT_SYMBOL_GPL(report_iommu_fault);
2700
2701static int __init iommu_init(void)
2702{
2703	iommu_group_kset = kset_create_and_add("iommu_groups",
2704					       NULL, kernel_kobj);
2705	BUG_ON(!iommu_group_kset);
2706
2707	iommu_debugfs_setup();
2708
2709	return 0;
2710}
2711core_initcall(iommu_init);
2712
 
 
 
 
 
 
 
 
 
 
2713int iommu_set_pgtable_quirks(struct iommu_domain *domain,
2714		unsigned long quirk)
2715{
2716	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2717		return -EINVAL;
2718	if (!domain->ops->set_pgtable_quirks)
2719		return -EINVAL;
2720	return domain->ops->set_pgtable_quirks(domain, quirk);
2721}
2722EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
2723
2724/**
2725 * iommu_get_resv_regions - get reserved regions
2726 * @dev: device for which to get reserved regions
2727 * @list: reserved region list for device
2728 *
2729 * This returns a list of reserved IOVA regions specific to this device.
2730 * A domain user should not map IOVA in these ranges.
2731 */
2732void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2733{
2734	const struct iommu_ops *ops = dev_iommu_ops(dev);
2735
2736	if (ops->get_resv_regions)
2737		ops->get_resv_regions(dev, list);
2738}
2739EXPORT_SYMBOL_GPL(iommu_get_resv_regions);
2740
2741/**
2742 * iommu_put_resv_regions - release reserved regions
2743 * @dev: device for which to free reserved regions
2744 * @list: reserved region list for device
2745 *
2746 * This releases a reserved region list acquired by iommu_get_resv_regions().
2747 */
2748void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2749{
2750	struct iommu_resv_region *entry, *next;
2751
2752	list_for_each_entry_safe(entry, next, list, list) {
2753		if (entry->free)
2754			entry->free(dev, entry);
2755		else
2756			kfree(entry);
2757	}
2758}
2759EXPORT_SYMBOL(iommu_put_resv_regions);
2760
2761struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2762						  size_t length, int prot,
2763						  enum iommu_resv_type type,
2764						  gfp_t gfp)
2765{
2766	struct iommu_resv_region *region;
2767
2768	region = kzalloc(sizeof(*region), gfp);
2769	if (!region)
2770		return NULL;
2771
2772	INIT_LIST_HEAD(&region->list);
2773	region->start = start;
2774	region->length = length;
2775	region->prot = prot;
2776	region->type = type;
2777	return region;
2778}
2779EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2780
2781void iommu_set_default_passthrough(bool cmd_line)
2782{
2783	if (cmd_line)
2784		iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2785	iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2786}
2787
2788void iommu_set_default_translated(bool cmd_line)
2789{
2790	if (cmd_line)
2791		iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2792	iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2793}
2794
2795bool iommu_default_passthrough(void)
2796{
2797	return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2798}
2799EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2800
2801const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
2802{
2803	const struct iommu_ops *ops = NULL;
2804	struct iommu_device *iommu;
2805
2806	spin_lock(&iommu_device_lock);
2807	list_for_each_entry(iommu, &iommu_device_list, list)
2808		if (iommu->fwnode == fwnode) {
2809			ops = iommu->ops;
2810			break;
2811		}
2812	spin_unlock(&iommu_device_lock);
2813	return ops;
2814}
2815
2816int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
 
2817{
2818	const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode);
2819	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2820
2821	if (!ops)
2822		return -EPROBE_DEFER;
2823
2824	if (fwspec)
2825		return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
2826
2827	if (!dev_iommu_get(dev))
2828		return -ENOMEM;
2829
2830	/* Preallocate for the overwhelmingly common case of 1 ID */
2831	fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2832	if (!fwspec)
2833		return -ENOMEM;
2834
2835	fwnode_handle_get(iommu_fwnode);
2836	fwspec->iommu_fwnode = iommu_fwnode;
 
2837	dev_iommu_fwspec_set(dev, fwspec);
2838	return 0;
2839}
2840EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2841
2842void iommu_fwspec_free(struct device *dev)
2843{
2844	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2845
2846	if (fwspec) {
2847		fwnode_handle_put(fwspec->iommu_fwnode);
2848		kfree(fwspec);
2849		dev_iommu_fwspec_set(dev, NULL);
2850	}
2851}
2852EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2853
2854int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
2855{
2856	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2857	int i, new_num;
2858
2859	if (!fwspec)
2860		return -EINVAL;
2861
2862	new_num = fwspec->num_ids + num_ids;
2863	if (new_num > 1) {
2864		fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2865				  GFP_KERNEL);
2866		if (!fwspec)
2867			return -ENOMEM;
2868
2869		dev_iommu_fwspec_set(dev, fwspec);
2870	}
2871
2872	for (i = 0; i < num_ids; i++)
2873		fwspec->ids[fwspec->num_ids + i] = ids[i];
2874
2875	fwspec->num_ids = new_num;
2876	return 0;
2877}
2878EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2879
2880/*
2881 * Per device IOMMU features.
2882 */
2883int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2884{
2885	if (dev_has_iommu(dev)) {
2886		const struct iommu_ops *ops = dev_iommu_ops(dev);
2887
2888		if (ops->dev_enable_feat)
2889			return ops->dev_enable_feat(dev, feat);
2890	}
2891
2892	return -ENODEV;
2893}
2894EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2895
2896/*
2897 * The device drivers should do the necessary cleanups before calling this.
2898 */
2899int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2900{
2901	if (dev_has_iommu(dev)) {
2902		const struct iommu_ops *ops = dev_iommu_ops(dev);
2903
2904		if (ops->dev_disable_feat)
2905			return ops->dev_disable_feat(dev, feat);
2906	}
2907
2908	return -EBUSY;
2909}
2910EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2911
2912/**
2913 * iommu_setup_default_domain - Set the default_domain for the group
2914 * @group: Group to change
2915 * @target_type: Domain type to set as the default_domain
2916 *
2917 * Allocate a default domain and set it as the current domain on the group. If
2918 * the group already has a default domain it will be changed to the target_type.
2919 * When target_type is 0 the default domain is selected based on driver and
2920 * system preferences.
2921 */
2922static int iommu_setup_default_domain(struct iommu_group *group,
2923				      int target_type)
2924{
2925	struct iommu_domain *old_dom = group->default_domain;
2926	struct group_device *gdev;
2927	struct iommu_domain *dom;
2928	bool direct_failed;
2929	int req_type;
2930	int ret;
 
 
 
2931
2932	lockdep_assert_held(&group->mutex);
2933
2934	req_type = iommu_get_default_domain_type(group, target_type);
2935	if (req_type < 0)
2936		return -EINVAL;
 
 
2937
2938	dom = iommu_group_alloc_default_domain(group, req_type);
2939	if (IS_ERR(dom))
2940		return PTR_ERR(dom);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2941
2942	if (group->default_domain == dom)
2943		return 0;
 
2944
2945	if (iommu_is_dma_domain(dom)) {
2946		ret = iommu_get_dma_cookie(dom);
2947		if (ret) {
2948			iommu_domain_free(dom);
2949			return ret;
2950		}
2951	}
2952
2953	/*
2954	 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be
2955	 * mapped before their device is attached, in order to guarantee
2956	 * continuity with any FW activity
2957	 */
2958	direct_failed = false;
2959	for_each_group_device(group, gdev) {
2960		if (iommu_create_device_direct_mappings(dom, gdev->dev)) {
2961			direct_failed = true;
2962			dev_warn_once(
2963				gdev->dev->iommu->iommu_dev->dev,
2964				"IOMMU driver was not able to establish FW requested direct mapping.");
2965		}
2966	}
2967
2968	/* We must set default_domain early for __iommu_device_set_domain */
2969	group->default_domain = dom;
2970	if (!group->domain) {
2971		/*
2972		 * Drivers are not allowed to fail the first domain attach.
2973		 * The only way to recover from this is to fail attaching the
2974		 * iommu driver and call ops->release_device. Put the domain
2975		 * in group->default_domain so it is freed after.
2976		 */
2977		ret = __iommu_group_set_domain_internal(
2978			group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
2979		if (WARN_ON(ret))
2980			goto out_free_old;
2981	} else {
2982		ret = __iommu_group_set_domain(group, dom);
2983		if (ret)
2984			goto err_restore_def_domain;
2985	}
2986
2987	/*
2988	 * Drivers are supposed to allow mappings to be installed in a domain
2989	 * before device attachment, but some don't. Hack around this defect by
2990	 * trying again after attaching. If this happens it means the device
2991	 * will not continuously have the IOMMU_RESV_DIRECT map.
2992	 */
2993	if (direct_failed) {
2994		for_each_group_device(group, gdev) {
2995			ret = iommu_create_device_direct_mappings(dom, gdev->dev);
2996			if (ret)
2997				goto err_restore_domain;
2998		}
 
 
 
 
 
2999	}
3000
3001out_free_old:
3002	if (old_dom)
3003		iommu_domain_free(old_dom);
3004	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3005
3006err_restore_domain:
3007	if (old_dom)
3008		__iommu_group_set_domain_internal(
3009			group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED);
3010err_restore_def_domain:
3011	if (old_dom) {
3012		iommu_domain_free(dom);
3013		group->default_domain = old_dom;
3014	}
3015	return ret;
3016}
3017
3018/*
3019 * Changing the default domain through sysfs requires the users to unbind the
3020 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3021 * transition. Return failure if this isn't met.
3022 *
3023 * We need to consider the race between this and the device release path.
3024 * group->mutex is used here to guarantee that the device release path
3025 * will not be entered at the same time.
3026 */
3027static ssize_t iommu_group_store_type(struct iommu_group *group,
3028				      const char *buf, size_t count)
3029{
3030	struct group_device *gdev;
 
3031	int ret, req_type;
3032
3033	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3034		return -EACCES;
3035
3036	if (WARN_ON(!group) || !group->default_domain)
3037		return -EINVAL;
3038
3039	if (sysfs_streq(buf, "identity"))
3040		req_type = IOMMU_DOMAIN_IDENTITY;
3041	else if (sysfs_streq(buf, "DMA"))
3042		req_type = IOMMU_DOMAIN_DMA;
3043	else if (sysfs_streq(buf, "DMA-FQ"))
3044		req_type = IOMMU_DOMAIN_DMA_FQ;
3045	else if (sysfs_streq(buf, "auto"))
3046		req_type = 0;
3047	else
3048		return -EINVAL;
3049
 
 
 
 
 
 
3050	mutex_lock(&group->mutex);
3051	/* We can bring up a flush queue without tearing down the domain. */
3052	if (req_type == IOMMU_DOMAIN_DMA_FQ &&
3053	    group->default_domain->type == IOMMU_DOMAIN_DMA) {
3054		ret = iommu_dma_init_fq(group->default_domain);
3055		if (ret)
3056			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3057
3058		group->default_domain->type = IOMMU_DOMAIN_DMA_FQ;
3059		ret = count;
3060		goto out_unlock;
 
 
 
 
3061	}
3062
3063	/* Otherwise, ensure that device exists and no driver is bound. */
3064	if (list_empty(&group->devices) || group->owner_cnt) {
3065		ret = -EPERM;
3066		goto out_unlock;
3067	}
 
3068
3069	ret = iommu_setup_default_domain(group, req_type);
3070	if (ret)
3071		goto out_unlock;
3072
3073	/* Make sure dma_ops is appropriatley set */
3074	for_each_group_device(group, gdev)
3075		iommu_setup_dma_ops(gdev->dev);
 
3076
3077out_unlock:
3078	mutex_unlock(&group->mutex);
3079	return ret ?: count;
 
 
 
 
 
 
 
 
3080}
3081
3082/**
3083 * iommu_device_use_default_domain() - Device driver wants to handle device
3084 *                                     DMA through the kernel DMA API.
3085 * @dev: The device.
3086 *
3087 * The device driver about to bind @dev wants to do DMA through the kernel
3088 * DMA API. Return 0 if it is allowed, otherwise an error.
3089 */
3090int iommu_device_use_default_domain(struct device *dev)
3091{
3092	/* Caller is the driver core during the pre-probe path */
3093	struct iommu_group *group = dev->iommu_group;
3094	int ret = 0;
3095
3096	if (!group)
3097		return 0;
3098
3099	mutex_lock(&group->mutex);
3100	if (group->owner_cnt) {
3101		if (group->domain != group->default_domain || group->owner ||
3102		    !xa_empty(&group->pasid_array)) {
3103			ret = -EBUSY;
3104			goto unlock_out;
3105		}
3106	}
3107
3108	group->owner_cnt++;
3109
3110unlock_out:
3111	mutex_unlock(&group->mutex);
 
 
3112	return ret;
3113}
3114
3115/**
3116 * iommu_device_unuse_default_domain() - Device driver stops handling device
3117 *                                       DMA through the kernel DMA API.
3118 * @dev: The device.
3119 *
3120 * The device driver doesn't want to do DMA through kernel DMA API anymore.
3121 * It must be called after iommu_device_use_default_domain().
3122 */
3123void iommu_device_unuse_default_domain(struct device *dev)
3124{
3125	/* Caller is the driver core during the post-probe path */
3126	struct iommu_group *group = dev->iommu_group;
3127
3128	if (!group)
3129		return;
3130
3131	mutex_lock(&group->mutex);
3132	if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array)))
3133		group->owner_cnt--;
3134
3135	mutex_unlock(&group->mutex);
 
3136}
3137
3138static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
3139{
3140	struct device *dev = iommu_group_first_dev(group);
3141	const struct iommu_ops *ops = dev_iommu_ops(dev);
3142	struct iommu_domain *domain;
3143
3144	if (group->blocking_domain)
3145		return 0;
3146
3147	if (ops->blocked_domain) {
3148		group->blocking_domain = ops->blocked_domain;
3149		return 0;
 
 
 
 
 
 
 
 
3150	}
3151
3152	/*
3153	 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED create an
3154	 * empty PAGING domain instead.
3155	 */
3156	domain = iommu_paging_domain_alloc(dev);
3157	if (IS_ERR(domain))
3158		return PTR_ERR(domain);
3159	group->blocking_domain = domain;
3160	return 0;
3161}
3162
3163static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner)
3164{
3165	int ret;
3166
3167	if ((group->domain && group->domain != group->default_domain) ||
3168	    !xa_empty(&group->pasid_array))
3169		return -EBUSY;
3170
3171	ret = __iommu_group_alloc_blocking_domain(group);
3172	if (ret)
3173		return ret;
3174	ret = __iommu_group_set_domain(group, group->blocking_domain);
3175	if (ret)
3176		return ret;
3177
3178	group->owner = owner;
3179	group->owner_cnt++;
3180	return 0;
3181}
3182
3183/**
3184 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
3185 * @group: The group.
3186 * @owner: Caller specified pointer. Used for exclusive ownership.
3187 *
3188 * This is to support backward compatibility for vfio which manages the dma
3189 * ownership in iommu_group level. New invocations on this interface should be
3190 * prohibited. Only a single owner may exist for a group.
3191 */
3192int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
3193{
3194	int ret = 0;
3195
3196	if (WARN_ON(!owner))
3197		return -EINVAL;
3198
3199	mutex_lock(&group->mutex);
3200	if (group->owner_cnt) {
3201		ret = -EPERM;
3202		goto unlock_out;
3203	}
3204
3205	ret = __iommu_take_dma_ownership(group, owner);
3206unlock_out:
3207	mutex_unlock(&group->mutex);
3208
3209	return ret;
3210}
3211EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
3212
3213/**
3214 * iommu_device_claim_dma_owner() - Set DMA ownership of a device
3215 * @dev: The device.
3216 * @owner: Caller specified pointer. Used for exclusive ownership.
3217 *
3218 * Claim the DMA ownership of a device. Multiple devices in the same group may
3219 * concurrently claim ownership if they present the same owner value. Returns 0
3220 * on success and error code on failure
3221 */
3222int iommu_device_claim_dma_owner(struct device *dev, void *owner)
3223{
3224	/* Caller must be a probed driver on dev */
3225	struct iommu_group *group = dev->iommu_group;
3226	int ret = 0;
3227
3228	if (WARN_ON(!owner))
3229		return -EINVAL;
3230
 
3231	if (!group)
3232		return -ENODEV;
3233
3234	mutex_lock(&group->mutex);
3235	if (group->owner_cnt) {
3236		if (group->owner != owner) {
3237			ret = -EPERM;
3238			goto unlock_out;
3239		}
3240		group->owner_cnt++;
3241		goto unlock_out;
3242	}
3243
3244	ret = __iommu_take_dma_ownership(group, owner);
3245unlock_out:
3246	mutex_unlock(&group->mutex);
 
 
3247	return ret;
3248}
3249EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner);
3250
3251static void __iommu_release_dma_ownership(struct iommu_group *group)
3252{
 
 
3253	if (WARN_ON(!group->owner_cnt || !group->owner ||
3254		    !xa_empty(&group->pasid_array)))
3255		return;
3256
3257	group->owner_cnt = 0;
3258	group->owner = NULL;
3259	__iommu_group_set_domain_nofail(group, group->default_domain);
 
3260}
3261
3262/**
3263 * iommu_group_release_dma_owner() - Release DMA ownership of a group
3264 * @group: The group
3265 *
3266 * Release the DMA ownership claimed by iommu_group_claim_dma_owner().
3267 */
3268void iommu_group_release_dma_owner(struct iommu_group *group)
3269{
3270	mutex_lock(&group->mutex);
3271	__iommu_release_dma_ownership(group);
3272	mutex_unlock(&group->mutex);
3273}
3274EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
3275
3276/**
3277 * iommu_device_release_dma_owner() - Release DMA ownership of a device
3278 * @dev: The device.
3279 *
3280 * Release the DMA ownership claimed by iommu_device_claim_dma_owner().
3281 */
3282void iommu_device_release_dma_owner(struct device *dev)
3283{
3284	/* Caller must be a probed driver on dev */
3285	struct iommu_group *group = dev->iommu_group;
3286
3287	mutex_lock(&group->mutex);
3288	if (group->owner_cnt > 1)
3289		group->owner_cnt--;
3290	else
3291		__iommu_release_dma_ownership(group);
3292	mutex_unlock(&group->mutex);
 
3293}
3294EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner);
3295
3296/**
3297 * iommu_group_dma_owner_claimed() - Query group dma ownership status
3298 * @group: The group.
3299 *
3300 * This provides status query on a given group. It is racy and only for
3301 * non-binding status reporting.
3302 */
3303bool iommu_group_dma_owner_claimed(struct iommu_group *group)
3304{
3305	unsigned int user;
3306
3307	mutex_lock(&group->mutex);
3308	user = group->owner_cnt;
3309	mutex_unlock(&group->mutex);
3310
3311	return user;
3312}
3313EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
3314
3315static int __iommu_set_group_pasid(struct iommu_domain *domain,
3316				   struct iommu_group *group, ioasid_t pasid)
3317{
3318	struct group_device *device, *last_gdev;
3319	int ret;
3320
3321	for_each_group_device(group, device) {
3322		ret = domain->ops->set_dev_pasid(domain, device->dev,
3323						 pasid, NULL);
3324		if (ret)
3325			goto err_revert;
3326	}
3327
3328	return 0;
3329
3330err_revert:
3331	last_gdev = device;
3332	for_each_group_device(group, device) {
3333		const struct iommu_ops *ops = dev_iommu_ops(device->dev);
3334
3335		if (device == last_gdev)
3336			break;
3337		ops->remove_dev_pasid(device->dev, pasid, domain);
3338	}
3339	return ret;
3340}
3341
3342static void __iommu_remove_group_pasid(struct iommu_group *group,
3343				       ioasid_t pasid,
3344				       struct iommu_domain *domain)
3345{
3346	struct group_device *device;
3347	const struct iommu_ops *ops;
3348
3349	for_each_group_device(group, device) {
3350		ops = dev_iommu_ops(device->dev);
3351		ops->remove_dev_pasid(device->dev, pasid, domain);
3352	}
3353}
3354
3355/*
3356 * iommu_attach_device_pasid() - Attach a domain to pasid of device
3357 * @domain: the iommu domain.
3358 * @dev: the attached device.
3359 * @pasid: the pasid of the device.
3360 * @handle: the attach handle.
3361 *
3362 * Return: 0 on success, or an error.
3363 */
3364int iommu_attach_device_pasid(struct iommu_domain *domain,
3365			      struct device *dev, ioasid_t pasid,
3366			      struct iommu_attach_handle *handle)
3367{
3368	/* Caller must be a probed driver on dev */
3369	struct iommu_group *group = dev->iommu_group;
3370	struct group_device *device;
3371	int ret;
3372
3373	if (!domain->ops->set_dev_pasid)
3374		return -EOPNOTSUPP;
3375
 
3376	if (!group)
3377		return -ENODEV;
3378
3379	if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner ||
3380	    pasid == IOMMU_NO_PASID)
3381		return -EINVAL;
3382
3383	mutex_lock(&group->mutex);
3384	for_each_group_device(group, device) {
3385		if (pasid >= device->dev->iommu->max_pasids) {
3386			ret = -EINVAL;
3387			goto out_unlock;
3388		}
3389	}
3390
3391	if (handle)
3392		handle->domain = domain;
3393
3394	ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL);
3395	if (ret)
3396		goto out_unlock;
3397
3398	ret = __iommu_set_group_pasid(domain, group, pasid);
3399	if (ret)
 
3400		xa_erase(&group->pasid_array, pasid);
 
3401out_unlock:
3402	mutex_unlock(&group->mutex);
 
 
3403	return ret;
3404}
3405EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
3406
3407/*
3408 * iommu_detach_device_pasid() - Detach the domain from pasid of device
3409 * @domain: the iommu domain.
3410 * @dev: the attached device.
3411 * @pasid: the pasid of the device.
3412 *
3413 * The @domain must have been attached to @pasid of the @dev with
3414 * iommu_attach_device_pasid().
3415 */
3416void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
3417			       ioasid_t pasid)
3418{
3419	/* Caller must be a probed driver on dev */
3420	struct iommu_group *group = dev->iommu_group;
3421
3422	mutex_lock(&group->mutex);
3423	__iommu_remove_group_pasid(group, pasid, domain);
3424	xa_erase(&group->pasid_array, pasid);
3425	mutex_unlock(&group->mutex);
 
 
3426}
3427EXPORT_SYMBOL_GPL(iommu_detach_device_pasid);
3428
3429ioasid_t iommu_alloc_global_pasid(struct device *dev)
3430{
3431	int ret;
3432
3433	/* max_pasids == 0 means that the device does not support PASID */
3434	if (!dev->iommu->max_pasids)
3435		return IOMMU_PASID_INVALID;
3436
3437	/*
3438	 * max_pasids is set up by vendor driver based on number of PASID bits
3439	 * supported but the IDA allocation is inclusive.
3440	 */
3441	ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID,
3442			      dev->iommu->max_pasids - 1, GFP_KERNEL);
3443	return ret < 0 ? IOMMU_PASID_INVALID : ret;
3444}
3445EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid);
3446
3447void iommu_free_global_pasid(ioasid_t pasid)
3448{
3449	if (WARN_ON(pasid == IOMMU_PASID_INVALID))
3450		return;
3451
3452	ida_free(&iommu_global_pasid_ida, pasid);
3453}
3454EXPORT_SYMBOL_GPL(iommu_free_global_pasid);
3455
3456/**
3457 * iommu_attach_handle_get - Return the attach handle
3458 * @group: the iommu group that domain was attached to
3459 * @pasid: the pasid within the group
3460 * @type: matched domain type, 0 for any match
3461 *
3462 * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch.
3463 *
3464 * Return the attach handle to the caller. The life cycle of an iommu attach
3465 * handle is from the time when the domain is attached to the time when the
3466 * domain is detached. Callers are required to synchronize the call of
3467 * iommu_attach_handle_get() with domain attachment and detachment. The attach
3468 * handle can only be used during its life cycle.
3469 */
3470struct iommu_attach_handle *
3471iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type)
 
3472{
3473	struct iommu_attach_handle *handle;
 
 
 
 
 
3474
3475	xa_lock(&group->pasid_array);
3476	handle = xa_load(&group->pasid_array, pasid);
3477	if (!handle)
3478		handle = ERR_PTR(-ENOENT);
3479	else if (type && handle->domain->type != type)
3480		handle = ERR_PTR(-EBUSY);
3481	xa_unlock(&group->pasid_array);
 
3482
3483	return handle;
3484}
3485EXPORT_SYMBOL_NS_GPL(iommu_attach_handle_get, "IOMMUFD_INTERNAL");
3486
3487/**
3488 * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group
3489 * @domain: IOMMU domain to attach
3490 * @group: IOMMU group that will be attached
3491 * @handle: attach handle
3492 *
3493 * Returns 0 on success and error code on failure.
3494 *
3495 * This is a variant of iommu_attach_group(). It allows the caller to provide
3496 * an attach handle and use it when the domain is attached. This is currently
3497 * used by IOMMUFD to deliver the I/O page faults.
3498 */
3499int iommu_attach_group_handle(struct iommu_domain *domain,
3500			      struct iommu_group *group,
3501			      struct iommu_attach_handle *handle)
3502{
3503	int ret;
3504
3505	if (handle)
3506		handle->domain = domain;
3507
3508	mutex_lock(&group->mutex);
3509	ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
3510	if (ret)
3511		goto err_unlock;
3512
3513	ret = __iommu_attach_group(domain, group);
3514	if (ret)
3515		goto err_erase;
3516	mutex_unlock(&group->mutex);
3517
3518	return 0;
3519err_erase:
3520	xa_erase(&group->pasid_array, IOMMU_NO_PASID);
3521err_unlock:
3522	mutex_unlock(&group->mutex);
3523	return ret;
3524}
3525EXPORT_SYMBOL_NS_GPL(iommu_attach_group_handle, "IOMMUFD_INTERNAL");
3526
3527/**
3528 * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group
3529 * @domain: IOMMU domain to attach
3530 * @group: IOMMU group that will be attached
3531 *
3532 * Detach the specified IOMMU domain from the specified IOMMU group.
3533 * It must be used in conjunction with iommu_attach_group_handle().
3534 */
3535void iommu_detach_group_handle(struct iommu_domain *domain,
3536			       struct iommu_group *group)
3537{
3538	mutex_lock(&group->mutex);
3539	__iommu_group_set_core_domain(group);
3540	xa_erase(&group->pasid_array, IOMMU_NO_PASID);
3541	mutex_unlock(&group->mutex);
3542}
3543EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL");
3544
3545/**
3546 * iommu_replace_group_handle - replace the domain that a group is attached to
3547 * @group: IOMMU group that will be attached to the new domain
3548 * @new_domain: new IOMMU domain to replace with
3549 * @handle: attach handle
3550 *
3551 * This is a variant of iommu_group_replace_domain(). It allows the caller to
3552 * provide an attach handle for the new domain and use it when the domain is
3553 * attached.
3554 */
3555int iommu_replace_group_handle(struct iommu_group *group,
3556			       struct iommu_domain *new_domain,
3557			       struct iommu_attach_handle *handle)
3558{
3559	void *curr;
3560	int ret;
3561
3562	if (!new_domain)
3563		return -EINVAL;
 
 
 
3564
3565	mutex_lock(&group->mutex);
3566	if (handle) {
3567		ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
3568		if (ret)
3569			goto err_unlock;
3570		handle->domain = new_domain;
3571	}
3572
3573	ret = __iommu_group_set_domain(group, new_domain);
3574	if (ret)
3575		goto err_release;
3576
3577	curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
3578	WARN_ON(xa_is_err(curr));
3579
3580	mutex_unlock(&group->mutex);
3581
3582	return 0;
3583err_release:
3584	xa_release(&group->pasid_array, IOMMU_NO_PASID);
3585err_unlock:
3586	mutex_unlock(&group->mutex);
3587	return ret;
3588}
3589EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL");