Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2016, Semihalf
   4 *	Author: Tomasz Nowicki <tn@semihalf.com>
   5 *
   6 * This file implements early detection/parsing of I/O mapping
   7 * reported to OS through firmware via I/O Remapping Table (IORT)
   8 * IORT document number: ARM DEN 0049A
   9 */
  10
  11#define pr_fmt(fmt)	"ACPI: IORT: " fmt
  12
  13#include <linux/acpi_iort.h>
  14#include <linux/bitfield.h>
  15#include <linux/iommu.h>
  16#include <linux/kernel.h>
  17#include <linux/list.h>
  18#include <linux/pci.h>
  19#include <linux/platform_device.h>
  20#include <linux/slab.h>
  21#include <linux/dma-map-ops.h>
  22#include "init.h"
  23
  24#define IORT_TYPE_MASK(type)	(1 << (type))
  25#define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
  26#define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
  27				(1 << ACPI_IORT_NODE_SMMU_V3))
  28
  29struct iort_its_msi_chip {
  30	struct list_head	list;
  31	struct fwnode_handle	*fw_node;
  32	phys_addr_t		base_addr;
  33	u32			translation_id;
  34};
  35
  36struct iort_fwnode {
  37	struct list_head list;
  38	struct acpi_iort_node *iort_node;
  39	struct fwnode_handle *fwnode;
  40};
  41static LIST_HEAD(iort_fwnode_list);
  42static DEFINE_SPINLOCK(iort_fwnode_lock);
  43
  44/**
  45 * iort_set_fwnode() - Create iort_fwnode and use it to register
  46 *		       iommu data in the iort_fwnode_list
  47 *
  48 * @iort_node: IORT table node associated with the IOMMU
  49 * @fwnode: fwnode associated with the IORT node
  50 *
  51 * Returns: 0 on success
  52 *          <0 on failure
  53 */
  54static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
  55				  struct fwnode_handle *fwnode)
  56{
  57	struct iort_fwnode *np;
  58
  59	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
  60
  61	if (WARN_ON(!np))
  62		return -ENOMEM;
  63
  64	INIT_LIST_HEAD(&np->list);
  65	np->iort_node = iort_node;
  66	np->fwnode = fwnode;
  67
  68	spin_lock(&iort_fwnode_lock);
  69	list_add_tail(&np->list, &iort_fwnode_list);
  70	spin_unlock(&iort_fwnode_lock);
  71
  72	return 0;
  73}
  74
  75/**
  76 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
  77 *
  78 * @node: IORT table node to be looked-up
  79 *
  80 * Returns: fwnode_handle pointer on success, NULL on failure
  81 */
  82static inline struct fwnode_handle *iort_get_fwnode(
  83			struct acpi_iort_node *node)
  84{
  85	struct iort_fwnode *curr;
  86	struct fwnode_handle *fwnode = NULL;
  87
  88	spin_lock(&iort_fwnode_lock);
  89	list_for_each_entry(curr, &iort_fwnode_list, list) {
  90		if (curr->iort_node == node) {
  91			fwnode = curr->fwnode;
  92			break;
  93		}
  94	}
  95	spin_unlock(&iort_fwnode_lock);
  96
  97	return fwnode;
  98}
  99
 100/**
 101 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
 102 *
 103 * @node: IORT table node associated with fwnode to delete
 104 */
 105static inline void iort_delete_fwnode(struct acpi_iort_node *node)
 106{
 107	struct iort_fwnode *curr, *tmp;
 108
 109	spin_lock(&iort_fwnode_lock);
 110	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
 111		if (curr->iort_node == node) {
 112			list_del(&curr->list);
 113			kfree(curr);
 114			break;
 115		}
 116	}
 117	spin_unlock(&iort_fwnode_lock);
 118}
 119
 120/**
 121 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
 122 *
 123 * @fwnode: fwnode associated with device to be looked-up
 124 *
 125 * Returns: iort_node pointer on success, NULL on failure
 126 */
 127static inline struct acpi_iort_node *iort_get_iort_node(
 128			struct fwnode_handle *fwnode)
 129{
 130	struct iort_fwnode *curr;
 131	struct acpi_iort_node *iort_node = NULL;
 132
 133	spin_lock(&iort_fwnode_lock);
 134	list_for_each_entry(curr, &iort_fwnode_list, list) {
 135		if (curr->fwnode == fwnode) {
 136			iort_node = curr->iort_node;
 137			break;
 138		}
 139	}
 140	spin_unlock(&iort_fwnode_lock);
 141
 142	return iort_node;
 143}
 144
 145typedef acpi_status (*iort_find_node_callback)
 146	(struct acpi_iort_node *node, void *context);
 147
 148/* Root pointer to the mapped IORT table */
 149static struct acpi_table_header *iort_table;
 150
 151static LIST_HEAD(iort_msi_chip_list);
 152static DEFINE_SPINLOCK(iort_msi_chip_lock);
 153
 154/**
 155 * iort_register_domain_token() - register domain token along with related
 156 * ITS ID and base address to the list from where we can get it back later on.
 157 * @trans_id: ITS ID.
 158 * @base: ITS base address.
 159 * @fw_node: Domain token.
 160 *
 161 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
 162 */
 163int iort_register_domain_token(int trans_id, phys_addr_t base,
 164			       struct fwnode_handle *fw_node)
 165{
 166	struct iort_its_msi_chip *its_msi_chip;
 167
 168	its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
 169	if (!its_msi_chip)
 170		return -ENOMEM;
 171
 172	its_msi_chip->fw_node = fw_node;
 173	its_msi_chip->translation_id = trans_id;
 174	its_msi_chip->base_addr = base;
 175
 176	spin_lock(&iort_msi_chip_lock);
 177	list_add(&its_msi_chip->list, &iort_msi_chip_list);
 178	spin_unlock(&iort_msi_chip_lock);
 179
 180	return 0;
 181}
 182
 183/**
 184 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
 185 * @trans_id: ITS ID.
 186 *
 187 * Returns: none.
 188 */
 189void iort_deregister_domain_token(int trans_id)
 190{
 191	struct iort_its_msi_chip *its_msi_chip, *t;
 192
 193	spin_lock(&iort_msi_chip_lock);
 194	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
 195		if (its_msi_chip->translation_id == trans_id) {
 196			list_del(&its_msi_chip->list);
 197			kfree(its_msi_chip);
 198			break;
 199		}
 200	}
 201	spin_unlock(&iort_msi_chip_lock);
 202}
 203
 204/**
 205 * iort_find_domain_token() - Find domain token based on given ITS ID
 206 * @trans_id: ITS ID.
 207 *
 208 * Returns: domain token when find on the list, NULL otherwise
 209 */
 210struct fwnode_handle *iort_find_domain_token(int trans_id)
 211{
 212	struct fwnode_handle *fw_node = NULL;
 213	struct iort_its_msi_chip *its_msi_chip;
 214
 215	spin_lock(&iort_msi_chip_lock);
 216	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
 217		if (its_msi_chip->translation_id == trans_id) {
 218			fw_node = its_msi_chip->fw_node;
 219			break;
 220		}
 221	}
 222	spin_unlock(&iort_msi_chip_lock);
 223
 224	return fw_node;
 225}
 226
 227static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
 228					     iort_find_node_callback callback,
 229					     void *context)
 230{
 231	struct acpi_iort_node *iort_node, *iort_end;
 232	struct acpi_table_iort *iort;
 233	int i;
 234
 235	if (!iort_table)
 236		return NULL;
 237
 238	/* Get the first IORT node */
 239	iort = (struct acpi_table_iort *)iort_table;
 240	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
 241				 iort->node_offset);
 242	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 243				iort_table->length);
 244
 245	for (i = 0; i < iort->node_count; i++) {
 246		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
 247			       "IORT node pointer overflows, bad table!\n"))
 248			return NULL;
 249
 250		if (iort_node->type == type &&
 251		    ACPI_SUCCESS(callback(iort_node, context)))
 252			return iort_node;
 253
 254		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
 255					 iort_node->length);
 256	}
 257
 258	return NULL;
 259}
 260
 261static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
 262					    void *context)
 263{
 264	struct device *dev = context;
 265	acpi_status status = AE_NOT_FOUND;
 266
 267	if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
 268		struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
 269		struct acpi_device *adev;
 270		struct acpi_iort_named_component *ncomp;
 271		struct device *nc_dev = dev;
 272
 273		/*
 274		 * Walk the device tree to find a device with an
 275		 * ACPI companion; there is no point in scanning
 276		 * IORT for a device matching a named component if
 277		 * the device does not have an ACPI companion to
 278		 * start with.
 279		 */
 280		do {
 281			adev = ACPI_COMPANION(nc_dev);
 282			if (adev)
 283				break;
 284
 285			nc_dev = nc_dev->parent;
 286		} while (nc_dev);
 287
 288		if (!adev)
 289			goto out;
 290
 291		status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
 292		if (ACPI_FAILURE(status)) {
 293			dev_warn(nc_dev, "Can't get device full path name\n");
 294			goto out;
 295		}
 296
 297		ncomp = (struct acpi_iort_named_component *)node->node_data;
 298		status = !strcmp(ncomp->device_name, buf.pointer) ?
 299							AE_OK : AE_NOT_FOUND;
 300		acpi_os_free(buf.pointer);
 301	} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
 302		struct acpi_iort_root_complex *pci_rc;
 303		struct pci_bus *bus;
 304
 305		bus = to_pci_bus(dev);
 306		pci_rc = (struct acpi_iort_root_complex *)node->node_data;
 307
 308		/*
 309		 * It is assumed that PCI segment numbers maps one-to-one
 310		 * with root complexes. Each segment number can represent only
 311		 * one root complex.
 312		 */
 313		status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
 314							AE_OK : AE_NOT_FOUND;
 315	}
 316out:
 317	return status;
 318}
 319
 320static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
 321		       u32 *rid_out, bool check_overlap)
 322{
 323	/* Single mapping does not care for input id */
 324	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
 325		if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
 326		    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
 327			*rid_out = map->output_base;
 328			return 0;
 329		}
 330
 331		pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
 332			map, type);
 333		return -ENXIO;
 334	}
 335
 336	if (rid_in < map->input_base ||
 337	    (rid_in > map->input_base + map->id_count))
 338		return -ENXIO;
 339
 340	if (check_overlap) {
 341		/*
 342		 * We already found a mapping for this input ID at the end of
 343		 * another region. If it coincides with the start of this
 344		 * region, we assume the prior match was due to the off-by-1
 345		 * issue mentioned below, and allow it to be superseded.
 346		 * Otherwise, things are *really* broken, and we just disregard
 347		 * duplicate matches entirely to retain compatibility.
 348		 */
 349		pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
 350		       map, rid_in);
 351		if (rid_in != map->input_base)
 352			return -ENXIO;
 353
 354		pr_err(FW_BUG "applying workaround.\n");
 355	}
 356
 357	*rid_out = map->output_base + (rid_in - map->input_base);
 358
 359	/*
 360	 * Due to confusion regarding the meaning of the id_count field (which
 361	 * carries the number of IDs *minus 1*), we may have to disregard this
 362	 * match if it is at the end of the range, and overlaps with the start
 363	 * of another one.
 364	 */
 365	if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
 366		return -EAGAIN;
 367	return 0;
 368}
 369
 370static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
 371					       u32 *id_out, int index)
 372{
 373	struct acpi_iort_node *parent;
 374	struct acpi_iort_id_mapping *map;
 375
 376	if (!node->mapping_offset || !node->mapping_count ||
 377				     index >= node->mapping_count)
 378		return NULL;
 379
 380	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 381			   node->mapping_offset + index * sizeof(*map));
 382
 383	/* Firmware bug! */
 384	if (!map->output_reference) {
 385		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
 386		       node, node->type);
 387		return NULL;
 388	}
 389
 390	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 391			       map->output_reference);
 392
 393	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
 394		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
 395		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
 396		    node->type == ACPI_IORT_NODE_SMMU_V3 ||
 397		    node->type == ACPI_IORT_NODE_PMCG) {
 398			*id_out = map->output_base;
 399			return parent;
 400		}
 401	}
 402
 403	return NULL;
 404}
 405
 406#ifndef ACPI_IORT_SMMU_V3_DEVICEID_VALID
 407#define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1 << 4)
 408#endif
 409
 410static int iort_get_id_mapping_index(struct acpi_iort_node *node)
 411{
 412	struct acpi_iort_smmu_v3 *smmu;
 413	struct acpi_iort_pmcg *pmcg;
 414
 415	switch (node->type) {
 416	case ACPI_IORT_NODE_SMMU_V3:
 417		/*
 418		 * SMMUv3 dev ID mapping index was introduced in revision 1
 419		 * table, not available in revision 0
 420		 */
 421		if (node->revision < 1)
 422			return -EINVAL;
 423
 424		smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
 425		/*
 426		 * Until IORT E.e (node rev. 5), the ID mapping index was
 427		 * defined to be valid unless all interrupts are GSIV-based.
 428		 */
 429		if (node->revision < 5) {
 430			if (smmu->event_gsiv && smmu->pri_gsiv &&
 431			    smmu->gerr_gsiv && smmu->sync_gsiv)
 432				return -EINVAL;
 433		} else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) {
 434			return -EINVAL;
 435		}
 436
 437		if (smmu->id_mapping_index >= node->mapping_count) {
 438			pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
 439			       node, node->type);
 440			return -EINVAL;
 441		}
 442
 443		return smmu->id_mapping_index;
 444	case ACPI_IORT_NODE_PMCG:
 445		pmcg = (struct acpi_iort_pmcg *)node->node_data;
 446		if (pmcg->overflow_gsiv || node->mapping_count == 0)
 447			return -EINVAL;
 448
 449		return 0;
 450	default:
 451		return -EINVAL;
 452	}
 453}
 454
 455static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
 456					       u32 id_in, u32 *id_out,
 457					       u8 type_mask)
 458{
 459	u32 id = id_in;
 460
 461	/* Parse the ID mapping tree to find specified node type */
 462	while (node) {
 463		struct acpi_iort_id_mapping *map;
 464		int i, index, rc = 0;
 465		u32 out_ref = 0, map_id = id;
 466
 467		if (IORT_TYPE_MASK(node->type) & type_mask) {
 468			if (id_out)
 469				*id_out = id;
 470			return node;
 471		}
 472
 473		if (!node->mapping_offset || !node->mapping_count)
 474			goto fail_map;
 475
 476		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 477				   node->mapping_offset);
 478
 479		/* Firmware bug! */
 480		if (!map->output_reference) {
 481			pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
 482			       node, node->type);
 483			goto fail_map;
 484		}
 485
 486		/*
 487		 * Get the special ID mapping index (if any) and skip its
 488		 * associated ID map to prevent erroneous multi-stage
 489		 * IORT ID translations.
 490		 */
 491		index = iort_get_id_mapping_index(node);
 492
 493		/* Do the ID translation */
 494		for (i = 0; i < node->mapping_count; i++, map++) {
 495			/* if it is special mapping index, skip it */
 496			if (i == index)
 497				continue;
 498
 499			rc = iort_id_map(map, node->type, map_id, &id, out_ref);
 500			if (!rc)
 501				break;
 502			if (rc == -EAGAIN)
 503				out_ref = map->output_reference;
 504		}
 505
 506		if (i == node->mapping_count && !out_ref)
 507			goto fail_map;
 508
 509		node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 510				    rc ? out_ref : map->output_reference);
 511	}
 512
 513fail_map:
 514	/* Map input ID to output ID unchanged on mapping failure */
 515	if (id_out)
 516		*id_out = id_in;
 517
 518	return NULL;
 519}
 520
 521static struct acpi_iort_node *iort_node_map_platform_id(
 522		struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
 523		int index)
 524{
 525	struct acpi_iort_node *parent;
 526	u32 id;
 527
 528	/* step 1: retrieve the initial dev id */
 529	parent = iort_node_get_id(node, &id, index);
 530	if (!parent)
 531		return NULL;
 532
 533	/*
 534	 * optional step 2: map the initial dev id if its parent is not
 535	 * the target type we want, map it again for the use cases such
 536	 * as NC (named component) -> SMMU -> ITS. If the type is matched,
 537	 * return the initial dev id and its parent pointer directly.
 538	 */
 539	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
 540		parent = iort_node_map_id(parent, id, id_out, type_mask);
 541	else
 542		if (id_out)
 543			*id_out = id;
 544
 545	return parent;
 546}
 547
 548static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
 549{
 550	struct pci_bus *pbus;
 551
 552	if (!dev_is_pci(dev)) {
 553		struct acpi_iort_node *node;
 554		/*
 555		 * scan iort_fwnode_list to see if it's an iort platform
 556		 * device (such as SMMU, PMCG),its iort node already cached
 557		 * and associated with fwnode when iort platform devices
 558		 * were initialized.
 559		 */
 560		node = iort_get_iort_node(dev->fwnode);
 561		if (node)
 562			return node;
 563		/*
 564		 * if not, then it should be a platform device defined in
 565		 * DSDT/SSDT (with Named Component node in IORT)
 566		 */
 567		return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 568				      iort_match_node_callback, dev);
 569	}
 570
 571	pbus = to_pci_dev(dev)->bus;
 572
 573	return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
 574			      iort_match_node_callback, &pbus->dev);
 575}
 576
 577/**
 578 * iort_msi_map_id() - Map a MSI input ID for a device
 579 * @dev: The device for which the mapping is to be done.
 580 * @input_id: The device input ID.
 581 *
 582 * Returns: mapped MSI ID on success, input ID otherwise
 583 */
 584u32 iort_msi_map_id(struct device *dev, u32 input_id)
 585{
 586	struct acpi_iort_node *node;
 587	u32 dev_id;
 588
 589	node = iort_find_dev_node(dev);
 590	if (!node)
 591		return input_id;
 592
 593	iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
 594	return dev_id;
 595}
 596
 597/**
 598 * iort_pmsi_get_dev_id() - Get the device id for a device
 599 * @dev: The device for which the mapping is to be done.
 600 * @dev_id: The device ID found.
 601 *
 602 * Returns: 0 for successful find a dev id, -ENODEV on error
 603 */
 604int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
 605{
 606	int i, index;
 607	struct acpi_iort_node *node;
 608
 609	node = iort_find_dev_node(dev);
 610	if (!node)
 611		return -ENODEV;
 612
 613	index = iort_get_id_mapping_index(node);
 614	/* if there is a valid index, go get the dev_id directly */
 615	if (index >= 0) {
 616		if (iort_node_get_id(node, dev_id, index))
 617			return 0;
 618	} else {
 619		for (i = 0; i < node->mapping_count; i++) {
 620			if (iort_node_map_platform_id(node, dev_id,
 621						      IORT_MSI_TYPE, i))
 622				return 0;
 623		}
 624	}
 625
 626	return -ENODEV;
 627}
 628
 629static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
 630{
 631	struct iort_its_msi_chip *its_msi_chip;
 632	int ret = -ENODEV;
 633
 634	spin_lock(&iort_msi_chip_lock);
 635	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
 636		if (its_msi_chip->translation_id == its_id) {
 637			*base = its_msi_chip->base_addr;
 638			ret = 0;
 639			break;
 640		}
 641	}
 642	spin_unlock(&iort_msi_chip_lock);
 643
 644	return ret;
 645}
 646
 647/**
 648 * iort_dev_find_its_id() - Find the ITS identifier for a device
 649 * @dev: The device.
 650 * @id: Device's ID
 651 * @idx: Index of the ITS identifier list.
 652 * @its_id: ITS identifier.
 653 *
 654 * Returns: 0 on success, appropriate error value otherwise
 655 */
 656static int iort_dev_find_its_id(struct device *dev, u32 id,
 657				unsigned int idx, int *its_id)
 658{
 659	struct acpi_iort_its_group *its;
 660	struct acpi_iort_node *node;
 661
 662	node = iort_find_dev_node(dev);
 663	if (!node)
 664		return -ENXIO;
 665
 666	node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
 667	if (!node)
 668		return -ENXIO;
 669
 670	/* Move to ITS specific data */
 671	its = (struct acpi_iort_its_group *)node->node_data;
 672	if (idx >= its->its_count) {
 673		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
 674			idx, its->its_count);
 675		return -ENXIO;
 676	}
 677
 678	*its_id = its->identifiers[idx];
 679	return 0;
 680}
 681
 682/**
 683 * iort_get_device_domain() - Find MSI domain related to a device
 684 * @dev: The device.
 685 * @id: Requester ID for the device.
 686 * @bus_token: irq domain bus token.
 687 *
 688 * Returns: the MSI domain for this device, NULL otherwise
 689 */
 690struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
 691					  enum irq_domain_bus_token bus_token)
 692{
 693	struct fwnode_handle *handle;
 694	int its_id;
 695
 696	if (iort_dev_find_its_id(dev, id, 0, &its_id))
 697		return NULL;
 698
 699	handle = iort_find_domain_token(its_id);
 700	if (!handle)
 701		return NULL;
 702
 703	return irq_find_matching_fwnode(handle, bus_token);
 704}
 705
 706static void iort_set_device_domain(struct device *dev,
 707				   struct acpi_iort_node *node)
 708{
 709	struct acpi_iort_its_group *its;
 710	struct acpi_iort_node *msi_parent;
 711	struct acpi_iort_id_mapping *map;
 712	struct fwnode_handle *iort_fwnode;
 713	struct irq_domain *domain;
 714	int index;
 715
 716	index = iort_get_id_mapping_index(node);
 717	if (index < 0)
 718		return;
 719
 720	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 721			   node->mapping_offset + index * sizeof(*map));
 722
 723	/* Firmware bug! */
 724	if (!map->output_reference ||
 725	    !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
 726		pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
 727		       node, node->type);
 728		return;
 729	}
 730
 731	msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 732				  map->output_reference);
 733
 734	if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
 735		return;
 736
 737	/* Move to ITS specific data */
 738	its = (struct acpi_iort_its_group *)msi_parent->node_data;
 739
 740	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
 741	if (!iort_fwnode)
 742		return;
 743
 744	domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
 745	if (domain)
 746		dev_set_msi_domain(dev, domain);
 747}
 748
 749/**
 750 * iort_get_platform_device_domain() - Find MSI domain related to a
 751 * platform device
 752 * @dev: the dev pointer associated with the platform device
 753 *
 754 * Returns: the MSI domain for this device, NULL otherwise
 755 */
 756static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
 757{
 758	struct acpi_iort_node *node, *msi_parent = NULL;
 759	struct fwnode_handle *iort_fwnode;
 760	struct acpi_iort_its_group *its;
 761	int i;
 762
 763	/* find its associated iort node */
 764	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 765			      iort_match_node_callback, dev);
 766	if (!node)
 767		return NULL;
 768
 769	/* then find its msi parent node */
 770	for (i = 0; i < node->mapping_count; i++) {
 771		msi_parent = iort_node_map_platform_id(node, NULL,
 772						       IORT_MSI_TYPE, i);
 773		if (msi_parent)
 774			break;
 775	}
 776
 777	if (!msi_parent)
 778		return NULL;
 779
 780	/* Move to ITS specific data */
 781	its = (struct acpi_iort_its_group *)msi_parent->node_data;
 782
 783	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
 784	if (!iort_fwnode)
 785		return NULL;
 786
 787	return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
 788}
 789
 790void acpi_configure_pmsi_domain(struct device *dev)
 791{
 792	struct irq_domain *msi_domain;
 793
 794	msi_domain = iort_get_platform_device_domain(dev);
 795	if (msi_domain)
 796		dev_set_msi_domain(dev, msi_domain);
 797}
 798
 799#ifdef CONFIG_IOMMU_API
 800static void iort_rmr_free(struct device *dev,
 801			  struct iommu_resv_region *region)
 802{
 803	struct iommu_iort_rmr_data *rmr_data;
 804
 805	rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
 806	kfree(rmr_data->sids);
 807	kfree(rmr_data);
 808}
 809
 810static struct iommu_iort_rmr_data *iort_rmr_alloc(
 811					struct acpi_iort_rmr_desc *rmr_desc,
 812					int prot, enum iommu_resv_type type,
 813					u32 *sids, u32 num_sids)
 814{
 815	struct iommu_iort_rmr_data *rmr_data;
 816	struct iommu_resv_region *region;
 817	u32 *sids_copy;
 818	u64 addr = rmr_desc->base_address, size = rmr_desc->length;
 819
 820	rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
 821	if (!rmr_data)
 822		return NULL;
 823
 824	/* Create a copy of SIDs array to associate with this rmr_data */
 825	sids_copy = kmemdup_array(sids, num_sids, sizeof(*sids), GFP_KERNEL);
 826	if (!sids_copy) {
 827		kfree(rmr_data);
 828		return NULL;
 829	}
 830	rmr_data->sids = sids_copy;
 831	rmr_data->num_sids = num_sids;
 832
 833	if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
 834		/* PAGE align base addr and size */
 835		addr &= PAGE_MASK;
 836		size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
 837
 838		pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
 839		       rmr_desc->base_address,
 840		       rmr_desc->base_address + rmr_desc->length - 1,
 841		       addr, addr + size - 1);
 842	}
 843
 844	region = &rmr_data->rr;
 845	INIT_LIST_HEAD(&region->list);
 846	region->start = addr;
 847	region->length = size;
 848	region->prot = prot;
 849	region->type = type;
 850	region->free = iort_rmr_free;
 851
 852	return rmr_data;
 853}
 854
 855static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
 856					u32 count)
 857{
 858	int i, j;
 859
 860	for (i = 0; i < count; i++) {
 861		u64 end, start = desc[i].base_address, length = desc[i].length;
 862
 863		if (!length) {
 864			pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
 865			       start);
 866			continue;
 867		}
 868
 869		end = start + length - 1;
 870
 871		/* Check for address overlap */
 872		for (j = i + 1; j < count; j++) {
 873			u64 e_start = desc[j].base_address;
 874			u64 e_end = e_start + desc[j].length - 1;
 875
 876			if (start <= e_end && end >= e_start)
 877				pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
 878				       start, end);
 879		}
 880	}
 881}
 882
 883/*
 884 * Please note, we will keep the already allocated RMR reserve
 885 * regions in case of a memory allocation failure.
 886 */
 887static void iort_get_rmrs(struct acpi_iort_node *node,
 888			  struct acpi_iort_node *smmu,
 889			  u32 *sids, u32 num_sids,
 890			  struct list_head *head)
 891{
 892	struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
 893	struct acpi_iort_rmr_desc *rmr_desc;
 894	int i;
 895
 896	rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
 897				rmr->rmr_offset);
 898
 899	iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
 900
 901	for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
 902		struct iommu_iort_rmr_data *rmr_data;
 903		enum iommu_resv_type type;
 904		int prot = IOMMU_READ | IOMMU_WRITE;
 905
 906		if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
 907			type = IOMMU_RESV_DIRECT_RELAXABLE;
 908		else
 909			type = IOMMU_RESV_DIRECT;
 910
 911		if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
 912			prot |= IOMMU_PRIV;
 913
 914		/* Attributes 0x00 - 0x03 represents device memory */
 915		if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
 916				ACPI_IORT_RMR_ATTR_DEVICE_GRE)
 917			prot |= IOMMU_MMIO;
 918		else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
 919				ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
 920			prot |= IOMMU_CACHE;
 921
 922		rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
 923					  sids, num_sids);
 924		if (!rmr_data)
 925			return;
 926
 927		list_add_tail(&rmr_data->rr.list, head);
 928	}
 929}
 930
 931static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
 932				u32 new_count)
 933{
 934	u32 *new_sids;
 935	u32 total_count = count + new_count;
 936	int i;
 937
 938	new_sids = krealloc_array(sids, count + new_count,
 939				  sizeof(*new_sids), GFP_KERNEL);
 940	if (!new_sids)
 941		return NULL;
 942
 943	for (i = count; i < total_count; i++)
 944		new_sids[i] = id_start++;
 945
 946	return new_sids;
 947}
 948
 949static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
 950			     u32 id_count)
 951{
 952	int i;
 953	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 954
 955	/*
 956	 * Make sure the kernel has preserved the boot firmware PCIe
 957	 * configuration. This is required to ensure that the RMR PCIe
 958	 * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
 959	 */
 960	if (dev_is_pci(dev)) {
 961		struct pci_dev *pdev = to_pci_dev(dev);
 962		struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
 963
 964		if (!host->preserve_config)
 965			return false;
 966	}
 967
 968	for (i = 0; i < fwspec->num_ids; i++) {
 969		if (fwspec->ids[i] >= id_start &&
 970		    fwspec->ids[i] <= id_start + id_count)
 971			return true;
 972	}
 973
 974	return false;
 975}
 976
 977static void iort_node_get_rmr_info(struct acpi_iort_node *node,
 978				   struct acpi_iort_node *iommu,
 979				   struct device *dev, struct list_head *head)
 980{
 981	struct acpi_iort_node *smmu = NULL;
 982	struct acpi_iort_rmr *rmr;
 983	struct acpi_iort_id_mapping *map;
 984	u32 *sids = NULL;
 985	u32 num_sids = 0;
 986	int i;
 987
 988	if (!node->mapping_offset || !node->mapping_count) {
 989		pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
 990		       node);
 991		return;
 992	}
 993
 994	rmr = (struct acpi_iort_rmr *)node->node_data;
 995	if (!rmr->rmr_offset || !rmr->rmr_count)
 996		return;
 997
 998	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 999			   node->mapping_offset);
1000
1001	/*
1002	 * Go through the ID mappings and see if we have a match for SMMU
1003	 * and dev(if !NULL). If found, get the sids for the Node.
1004	 * Please note, id_count is equal to the number of IDs  in the
1005	 * range minus one.
1006	 */
1007	for (i = 0; i < node->mapping_count; i++, map++) {
1008		struct acpi_iort_node *parent;
1009
 
 
 
1010		parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
1011				      map->output_reference);
1012		if (parent != iommu)
1013			continue;
1014
1015		/* If dev is valid, check RMR node corresponds to the dev SID */
1016		if (dev && !iort_rmr_has_dev(dev, map->output_base,
1017					     map->id_count))
1018			continue;
1019
1020		/* Retrieve SIDs associated with the Node. */
1021		sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
1022					   map->id_count + 1);
1023		if (!sids)
1024			return;
1025
1026		num_sids += map->id_count + 1;
1027	}
1028
1029	if (!sids)
1030		return;
1031
1032	iort_get_rmrs(node, smmu, sids, num_sids, head);
1033	kfree(sids);
1034}
1035
1036static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
1037			   struct list_head *head)
1038{
1039	struct acpi_table_iort *iort;
1040	struct acpi_iort_node *iort_node, *iort_end;
1041	int i;
1042
1043	/* Only supports ARM DEN 0049E.d onwards */
1044	if (iort_table->revision < 5)
1045		return;
1046
1047	iort = (struct acpi_table_iort *)iort_table;
1048
1049	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1050				 iort->node_offset);
1051	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1052				iort_table->length);
1053
1054	for (i = 0; i < iort->node_count; i++) {
1055		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
1056			       "IORT node pointer overflows, bad table!\n"))
1057			return;
1058
1059		if (iort_node->type == ACPI_IORT_NODE_RMR)
1060			iort_node_get_rmr_info(iort_node, iommu, dev, head);
1061
1062		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1063					 iort_node->length);
1064	}
1065}
1066
1067/*
1068 * Populate the RMR list associated with a given IOMMU and dev(if provided).
1069 * If dev is NULL, the function populates all the RMRs associated with the
1070 * given IOMMU.
1071 */
1072static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
1073					    struct device *dev,
1074					    struct list_head *head)
1075{
1076	struct acpi_iort_node *iommu;
1077
1078	iommu = iort_get_iort_node(iommu_fwnode);
1079	if (!iommu)
1080		return;
1081
1082	iort_find_rmrs(iommu, dev, head);
1083}
1084
1085static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
1086{
1087	struct acpi_iort_node *iommu;
1088	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1089
1090	iommu = iort_get_iort_node(fwspec->iommu_fwnode);
1091
1092	if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
1093		struct acpi_iort_smmu_v3 *smmu;
1094
1095		smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
1096		if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
1097			return iommu;
1098	}
1099
1100	return NULL;
1101}
1102
1103/*
1104 * Retrieve platform specific HW MSI reserve regions.
1105 * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
1106 * associated with the device are the HW MSI reserved regions.
1107 */
1108static void iort_iommu_msi_get_resv_regions(struct device *dev,
1109					    struct list_head *head)
1110{
1111	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1112	struct acpi_iort_its_group *its;
1113	struct acpi_iort_node *iommu_node, *its_node = NULL;
1114	int i;
1115
1116	iommu_node = iort_get_msi_resv_iommu(dev);
1117	if (!iommu_node)
1118		return;
1119
1120	/*
1121	 * Current logic to reserve ITS regions relies on HW topologies
1122	 * where a given PCI or named component maps its IDs to only one
1123	 * ITS group; if a PCI or named component can map its IDs to
1124	 * different ITS groups through IORT mappings this function has
1125	 * to be reworked to ensure we reserve regions for all ITS groups
1126	 * a given PCI or named component may map IDs to.
1127	 */
1128
1129	for (i = 0; i < fwspec->num_ids; i++) {
1130		its_node = iort_node_map_id(iommu_node,
1131					fwspec->ids[i],
1132					NULL, IORT_MSI_TYPE);
1133		if (its_node)
1134			break;
1135	}
1136
1137	if (!its_node)
1138		return;
1139
1140	/* Move to ITS specific data */
1141	its = (struct acpi_iort_its_group *)its_node->node_data;
1142
1143	for (i = 0; i < its->its_count; i++) {
1144		phys_addr_t base;
1145
1146		if (!iort_find_its_base(its->identifiers[i], &base)) {
1147			int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1148			struct iommu_resv_region *region;
1149
1150			region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
1151							 prot, IOMMU_RESV_MSI,
1152							 GFP_KERNEL);
1153			if (region)
1154				list_add_tail(&region->list, head);
1155		}
1156	}
1157}
1158
1159/**
1160 * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
1161 * @dev: Device from iommu_get_resv_regions()
1162 * @head: Reserved region list from iommu_get_resv_regions()
1163 */
1164void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1165{
1166	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1167
1168	iort_iommu_msi_get_resv_regions(dev, head);
1169	iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
1170}
1171
1172/**
1173 * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
1174 *                     associated StreamIDs information.
1175 * @iommu_fwnode: fwnode associated with IOMMU
1176 * @head: Resereved region list
1177 */
1178void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
1179		       struct list_head *head)
1180{
1181	iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
1182}
1183EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
1184
1185/**
1186 * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
1187 * @iommu_fwnode: fwnode associated with IOMMU
1188 * @head: Resereved region list
1189 */
1190void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
1191		       struct list_head *head)
1192{
1193	struct iommu_resv_region *entry, *next;
1194
1195	list_for_each_entry_safe(entry, next, head, list)
1196		entry->free(NULL, entry);
1197}
1198EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
1199
1200static inline bool iort_iommu_driver_enabled(u8 type)
1201{
1202	switch (type) {
1203	case ACPI_IORT_NODE_SMMU_V3:
1204		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
1205	case ACPI_IORT_NODE_SMMU:
1206		return IS_ENABLED(CONFIG_ARM_SMMU);
1207	default:
1208		pr_warn("IORT node type %u does not describe an SMMU\n", type);
1209		return false;
1210	}
1211}
1212
1213static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
1214{
1215	struct acpi_iort_root_complex *pci_rc;
1216
1217	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
1218	return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
1219}
1220
1221static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node)
1222{
1223	struct acpi_iort_memory_access *memory_access;
1224	struct acpi_iort_root_complex *pci_rc;
1225
1226	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
1227	memory_access =
1228		(struct acpi_iort_memory_access *)&pci_rc->memory_properties;
1229	return memory_access->memory_flags & ACPI_IORT_MF_CANWBS;
1230}
1231
1232static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
1233			    u32 streamid)
1234{
 
1235	struct fwnode_handle *iort_fwnode;
1236
1237	/* If there's no SMMU driver at all, give up now */
1238	if (!node || !iort_iommu_driver_enabled(node->type))
1239		return -ENODEV;
1240
1241	iort_fwnode = iort_get_fwnode(node);
1242	if (!iort_fwnode)
1243		return -ENODEV;
1244
1245	/*
1246	 * If the SMMU drivers are enabled but not loaded/probed
1247	 * yet, this will defer.
 
 
 
 
1248	 */
1249	return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode);
 
 
 
 
 
1250}
1251
1252struct iort_pci_alias_info {
1253	struct device *dev;
1254	struct acpi_iort_node *node;
1255};
1256
1257static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
1258{
1259	struct iort_pci_alias_info *info = data;
1260	struct acpi_iort_node *parent;
1261	u32 streamid;
1262
1263	parent = iort_node_map_id(info->node, alias, &streamid,
1264				  IORT_IOMMU_TYPE);
1265	return iort_iommu_xlate(info->dev, parent, streamid);
1266}
1267
1268static void iort_named_component_init(struct device *dev,
1269				      struct acpi_iort_node *node)
1270{
1271	struct property_entry props[3] = {};
1272	struct acpi_iort_named_component *nc;
1273
1274	nc = (struct acpi_iort_named_component *)node->node_data;
1275	props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
1276				      FIELD_GET(ACPI_IORT_NC_PASID_BITS,
1277						nc->node_flags));
1278	if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
1279		props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
1280
1281	if (device_create_managed_software_node(dev, props, NULL))
1282		dev_warn(dev, "Could not add device properties\n");
1283}
1284
1285static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
1286{
1287	struct acpi_iort_node *parent;
1288	int err = -ENODEV, i = 0;
1289	u32 streamid = 0;
1290
1291	do {
1292
1293		parent = iort_node_map_platform_id(node, &streamid,
1294						   IORT_IOMMU_TYPE,
1295						   i++);
1296
1297		if (parent)
1298			err = iort_iommu_xlate(dev, parent, streamid);
1299	} while (parent && !err);
1300
1301	return err;
1302}
1303
1304static int iort_nc_iommu_map_id(struct device *dev,
1305				struct acpi_iort_node *node,
1306				const u32 *in_id)
1307{
1308	struct acpi_iort_node *parent;
1309	u32 streamid;
1310
1311	parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
1312	if (parent)
1313		return iort_iommu_xlate(dev, parent, streamid);
1314
1315	return -ENODEV;
1316}
1317
1318
1319/**
1320 * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1321 *
1322 * @dev: device to configure
1323 * @id_in: optional input id const value pointer
1324 *
1325 * Returns: 0 on success, <0 on failure
1326 */
1327int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
1328{
1329	struct acpi_iort_node *node;
1330	int err = -ENODEV;
1331
1332	if (dev_is_pci(dev)) {
1333		struct iommu_fwspec *fwspec;
1334		struct pci_bus *bus = to_pci_dev(dev)->bus;
1335		struct iort_pci_alias_info info = { .dev = dev };
1336
1337		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1338				      iort_match_node_callback, &bus->dev);
1339		if (!node)
1340			return -ENODEV;
1341
1342		info.node = node;
1343		err = pci_for_each_dma_alias(to_pci_dev(dev),
1344					     iort_pci_iommu_init, &info);
1345
1346		fwspec = dev_iommu_fwspec_get(dev);
1347		if (fwspec && iort_pci_rc_supports_ats(node))
1348			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
1349		if (fwspec && iort_pci_rc_supports_canwbs(node))
1350			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_CANWBS;
1351	} else {
1352		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1353				      iort_match_node_callback, dev);
1354		if (!node)
1355			return -ENODEV;
1356
1357		err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
1358			      iort_nc_iommu_map(dev, node);
1359
1360		if (!err)
1361			iort_named_component_init(dev, node);
1362	}
1363
1364	return err;
1365}
1366
1367#else
1368void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1369{ }
1370int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
1371{ return -ENODEV; }
1372#endif
1373
1374static int nc_dma_get_range(struct device *dev, u64 *limit)
1375{
1376	struct acpi_iort_node *node;
1377	struct acpi_iort_named_component *ncomp;
1378
1379	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1380			      iort_match_node_callback, dev);
1381	if (!node)
1382		return -ENODEV;
1383
1384	ncomp = (struct acpi_iort_named_component *)node->node_data;
1385
1386	if (!ncomp->memory_address_limit) {
1387		pr_warn(FW_BUG "Named component missing memory address limit\n");
1388		return -EINVAL;
1389	}
1390
1391	*limit = ncomp->memory_address_limit >= 64 ? U64_MAX :
1392			(1ULL << ncomp->memory_address_limit) - 1;
1393
1394	return 0;
1395}
1396
1397static int rc_dma_get_range(struct device *dev, u64 *limit)
1398{
1399	struct acpi_iort_node *node;
1400	struct acpi_iort_root_complex *rc;
1401	struct pci_bus *pbus = to_pci_dev(dev)->bus;
1402
1403	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1404			      iort_match_node_callback, &pbus->dev);
1405	if (!node || node->revision < 1)
1406		return -ENODEV;
1407
1408	rc = (struct acpi_iort_root_complex *)node->node_data;
1409
1410	if (!rc->memory_address_limit) {
1411		pr_warn(FW_BUG "Root complex missing memory address limit\n");
1412		return -EINVAL;
1413	}
1414
1415	*limit = rc->memory_address_limit >= 64 ? U64_MAX :
1416			(1ULL << rc->memory_address_limit) - 1;
1417
1418	return 0;
1419}
1420
1421/**
1422 * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1423 * @dev: device to lookup
1424 * @limit: DMA limit result pointer
1425 *
1426 * Return: 0 on success, an error otherwise.
1427 */
1428int iort_dma_get_ranges(struct device *dev, u64 *limit)
1429{
1430	if (dev_is_pci(dev))
1431		return rc_dma_get_range(dev, limit);
1432	else
1433		return nc_dma_get_range(dev, limit);
1434}
1435
1436static void __init acpi_iort_register_irq(int hwirq, const char *name,
1437					  int trigger,
1438					  struct resource *res)
1439{
1440	int irq = acpi_register_gsi(NULL, hwirq, trigger,
1441				    ACPI_ACTIVE_HIGH);
1442
1443	if (irq <= 0) {
1444		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1445								      name);
1446		return;
1447	}
1448
1449	res->start = irq;
1450	res->end = irq;
1451	res->flags = IORESOURCE_IRQ;
1452	res->name = name;
1453}
1454
1455static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1456{
1457	struct acpi_iort_smmu_v3 *smmu;
1458	/* Always present mem resource */
1459	int num_res = 1;
1460
1461	/* Retrieve SMMUv3 specific data */
1462	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1463
1464	if (smmu->event_gsiv)
1465		num_res++;
1466
1467	if (smmu->pri_gsiv)
1468		num_res++;
1469
1470	if (smmu->gerr_gsiv)
1471		num_res++;
1472
1473	if (smmu->sync_gsiv)
1474		num_res++;
1475
1476	return num_res;
1477}
1478
1479static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1480{
1481	/*
1482	 * Cavium ThunderX2 implementation doesn't not support unique
1483	 * irq line. Use single irq line for all the SMMUv3 interrupts.
1484	 */
1485	if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1486		return false;
1487
1488	/*
1489	 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1490	 * SPI numbers here.
1491	 */
1492	return smmu->event_gsiv == smmu->pri_gsiv &&
1493	       smmu->event_gsiv == smmu->gerr_gsiv &&
1494	       smmu->event_gsiv == smmu->sync_gsiv;
1495}
1496
1497static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1498{
1499	/*
1500	 * Override the size, for Cavium ThunderX2 implementation
1501	 * which doesn't support the page 1 SMMU register space.
1502	 */
1503	if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1504		return SZ_64K;
1505
1506	return SZ_128K;
1507}
1508
1509static void __init arm_smmu_v3_init_resources(struct resource *res,
1510					      struct acpi_iort_node *node)
1511{
1512	struct acpi_iort_smmu_v3 *smmu;
1513	int num_res = 0;
1514
1515	/* Retrieve SMMUv3 specific data */
1516	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1517
1518	res[num_res].start = smmu->base_address;
1519	res[num_res].end = smmu->base_address +
1520				arm_smmu_v3_resource_size(smmu) - 1;
1521	res[num_res].flags = IORESOURCE_MEM;
1522
1523	num_res++;
1524	if (arm_smmu_v3_is_combined_irq(smmu)) {
1525		if (smmu->event_gsiv)
1526			acpi_iort_register_irq(smmu->event_gsiv, "combined",
1527					       ACPI_EDGE_SENSITIVE,
1528					       &res[num_res++]);
1529	} else {
1530
1531		if (smmu->event_gsiv)
1532			acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1533					       ACPI_EDGE_SENSITIVE,
1534					       &res[num_res++]);
1535
1536		if (smmu->pri_gsiv)
1537			acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1538					       ACPI_EDGE_SENSITIVE,
1539					       &res[num_res++]);
1540
1541		if (smmu->gerr_gsiv)
1542			acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1543					       ACPI_EDGE_SENSITIVE,
1544					       &res[num_res++]);
1545
1546		if (smmu->sync_gsiv)
1547			acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1548					       ACPI_EDGE_SENSITIVE,
1549					       &res[num_res++]);
1550	}
1551}
1552
1553static void __init arm_smmu_v3_dma_configure(struct device *dev,
1554					     struct acpi_iort_node *node)
1555{
1556	struct acpi_iort_smmu_v3 *smmu;
1557	enum dev_dma_attr attr;
1558
1559	/* Retrieve SMMUv3 specific data */
1560	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1561
1562	attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
1563			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1564
1565	/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1566	dev->dma_mask = &dev->coherent_dma_mask;
1567
1568	/* Configure DMA for the page table walker */
1569	acpi_dma_configure(dev, attr);
1570}
1571
1572#if defined(CONFIG_ACPI_NUMA)
1573/*
1574 * set numa proximity domain for smmuv3 device
1575 */
1576static int  __init arm_smmu_v3_set_proximity(struct device *dev,
1577					      struct acpi_iort_node *node)
1578{
1579	struct acpi_iort_smmu_v3 *smmu;
1580
1581	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1582	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1583		int dev_node = pxm_to_node(smmu->pxm);
1584
1585		if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
1586			return -EINVAL;
1587
1588		set_dev_node(dev, dev_node);
1589		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1590			smmu->base_address,
1591			smmu->pxm);
1592	}
1593	return 0;
1594}
1595#else
1596#define arm_smmu_v3_set_proximity NULL
1597#endif
1598
1599static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1600{
1601	struct acpi_iort_smmu *smmu;
1602
1603	/* Retrieve SMMU specific data */
1604	smmu = (struct acpi_iort_smmu *)node->node_data;
1605
1606	/*
1607	 * Only consider the global fault interrupt and ignore the
1608	 * configuration access interrupt.
1609	 *
1610	 * MMIO address and global fault interrupt resources are always
1611	 * present so add them to the context interrupt count as a static
1612	 * value.
1613	 */
1614	return smmu->context_interrupt_count + 2;
1615}
1616
1617static void __init arm_smmu_init_resources(struct resource *res,
1618					   struct acpi_iort_node *node)
1619{
1620	struct acpi_iort_smmu *smmu;
1621	int i, hw_irq, trigger, num_res = 0;
1622	u64 *ctx_irq, *glb_irq;
1623
1624	/* Retrieve SMMU specific data */
1625	smmu = (struct acpi_iort_smmu *)node->node_data;
1626
1627	res[num_res].start = smmu->base_address;
1628	res[num_res].end = smmu->base_address + smmu->span - 1;
1629	res[num_res].flags = IORESOURCE_MEM;
1630	num_res++;
1631
1632	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1633	/* Global IRQs */
1634	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1635	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1636
1637	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1638				     &res[num_res++]);
1639
1640	/* Context IRQs */
1641	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1642	for (i = 0; i < smmu->context_interrupt_count; i++) {
1643		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1644		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1645
1646		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1647				       &res[num_res++]);
1648	}
1649}
1650
1651static void __init arm_smmu_dma_configure(struct device *dev,
1652					  struct acpi_iort_node *node)
1653{
1654	struct acpi_iort_smmu *smmu;
1655	enum dev_dma_attr attr;
1656
1657	/* Retrieve SMMU specific data */
1658	smmu = (struct acpi_iort_smmu *)node->node_data;
1659
1660	attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
1661			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1662
1663	/* We expect the dma masks to be equivalent for SMMU set-ups */
1664	dev->dma_mask = &dev->coherent_dma_mask;
1665
1666	/* Configure DMA for the page table walker */
1667	acpi_dma_configure(dev, attr);
1668}
1669
1670static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
1671{
1672	struct acpi_iort_pmcg *pmcg;
1673
1674	/* Retrieve PMCG specific data */
1675	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1676
1677	/*
1678	 * There are always 2 memory resources.
1679	 * If the overflow_gsiv is present then add that for a total of 3.
1680	 */
1681	return pmcg->overflow_gsiv ? 3 : 2;
1682}
1683
1684static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
1685						   struct acpi_iort_node *node)
1686{
1687	struct acpi_iort_pmcg *pmcg;
1688
1689	/* Retrieve PMCG specific data */
1690	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1691
1692	res[0].start = pmcg->page0_base_address;
1693	res[0].end = pmcg->page0_base_address + SZ_4K - 1;
1694	res[0].flags = IORESOURCE_MEM;
1695	/*
1696	 * The initial version in DEN0049C lacked a way to describe register
1697	 * page 1, which makes it broken for most PMCG implementations; in
1698	 * that case, just let the driver fail gracefully if it expects to
1699	 * find a second memory resource.
1700	 */
1701	if (node->revision > 0) {
1702		res[1].start = pmcg->page1_base_address;
1703		res[1].end = pmcg->page1_base_address + SZ_4K - 1;
1704		res[1].flags = IORESOURCE_MEM;
1705	}
1706
1707	if (pmcg->overflow_gsiv)
1708		acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
1709				       ACPI_EDGE_SENSITIVE, &res[2]);
1710}
1711
1712static struct acpi_platform_list pmcg_plat_info[] __initdata = {
1713	/* HiSilicon Hip08 Platform */
1714	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1715	 "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
1716	/* HiSilicon Hip09 Platform */
1717	{"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1718	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1719	{"HISI  ", "HIP09A  ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1720	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1721	/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
1722	{"HISI  ", "HIP10   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1723	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1724	{"HISI  ", "HIP10C  ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1725	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1726	{"HISI  ", "HIP11   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1727	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1728	{ }
1729};
1730
1731static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
1732{
1733	u32 model;
1734	int idx;
1735
1736	idx = acpi_match_platform_list(pmcg_plat_info);
1737	if (idx >= 0)
1738		model = pmcg_plat_info[idx].data;
1739	else
1740		model = IORT_SMMU_V3_PMCG_GENERIC;
1741
1742	return platform_device_add_data(pdev, &model, sizeof(model));
1743}
1744
1745struct iort_dev_config {
1746	const char *name;
1747	int (*dev_init)(struct acpi_iort_node *node);
1748	void (*dev_dma_configure)(struct device *dev,
1749				  struct acpi_iort_node *node);
1750	int (*dev_count_resources)(struct acpi_iort_node *node);
1751	void (*dev_init_resources)(struct resource *res,
1752				     struct acpi_iort_node *node);
1753	int (*dev_set_proximity)(struct device *dev,
1754				    struct acpi_iort_node *node);
1755	int (*dev_add_platdata)(struct platform_device *pdev);
1756};
1757
1758static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1759	.name = "arm-smmu-v3",
1760	.dev_dma_configure = arm_smmu_v3_dma_configure,
1761	.dev_count_resources = arm_smmu_v3_count_resources,
1762	.dev_init_resources = arm_smmu_v3_init_resources,
1763	.dev_set_proximity = arm_smmu_v3_set_proximity,
1764};
1765
1766static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1767	.name = "arm-smmu",
1768	.dev_dma_configure = arm_smmu_dma_configure,
1769	.dev_count_resources = arm_smmu_count_resources,
1770	.dev_init_resources = arm_smmu_init_resources,
1771};
1772
1773static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
1774	.name = "arm-smmu-v3-pmcg",
1775	.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
1776	.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
1777	.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
1778};
1779
1780static __init const struct iort_dev_config *iort_get_dev_cfg(
1781			struct acpi_iort_node *node)
1782{
1783	switch (node->type) {
1784	case ACPI_IORT_NODE_SMMU_V3:
1785		return &iort_arm_smmu_v3_cfg;
1786	case ACPI_IORT_NODE_SMMU:
1787		return &iort_arm_smmu_cfg;
1788	case ACPI_IORT_NODE_PMCG:
1789		return &iort_arm_smmu_v3_pmcg_cfg;
1790	default:
1791		return NULL;
1792	}
1793}
1794
1795/**
1796 * iort_add_platform_device() - Allocate a platform device for IORT node
1797 * @node: Pointer to device ACPI IORT node
1798 * @ops: Pointer to IORT device config struct
1799 *
1800 * Returns: 0 on success, <0 failure
1801 */
1802static int __init iort_add_platform_device(struct acpi_iort_node *node,
1803					   const struct iort_dev_config *ops)
1804{
1805	struct fwnode_handle *fwnode;
1806	struct platform_device *pdev;
1807	struct resource *r;
1808	int ret, count;
1809
1810	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1811	if (!pdev)
1812		return -ENOMEM;
1813
1814	if (ops->dev_set_proximity) {
1815		ret = ops->dev_set_proximity(&pdev->dev, node);
1816		if (ret)
1817			goto dev_put;
1818	}
1819
1820	count = ops->dev_count_resources(node);
1821
1822	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1823	if (!r) {
1824		ret = -ENOMEM;
1825		goto dev_put;
1826	}
1827
1828	ops->dev_init_resources(r, node);
1829
1830	ret = platform_device_add_resources(pdev, r, count);
1831	/*
1832	 * Resources are duplicated in platform_device_add_resources,
1833	 * free their allocated memory
1834	 */
1835	kfree(r);
1836
1837	if (ret)
1838		goto dev_put;
1839
1840	/*
1841	 * Platform devices based on PMCG nodes uses platform_data to
1842	 * pass the hardware model info to the driver. For others, add
1843	 * a copy of IORT node pointer to platform_data to be used to
1844	 * retrieve IORT data information.
1845	 */
1846	if (ops->dev_add_platdata)
1847		ret = ops->dev_add_platdata(pdev);
1848	else
1849		ret = platform_device_add_data(pdev, &node, sizeof(node));
1850
1851	if (ret)
1852		goto dev_put;
1853
1854	fwnode = iort_get_fwnode(node);
1855
1856	if (!fwnode) {
1857		ret = -ENODEV;
1858		goto dev_put;
1859	}
1860
1861	pdev->dev.fwnode = fwnode;
1862
1863	if (ops->dev_dma_configure)
1864		ops->dev_dma_configure(&pdev->dev, node);
1865
1866	iort_set_device_domain(&pdev->dev, node);
1867
1868	ret = platform_device_add(pdev);
1869	if (ret)
1870		goto dma_deconfigure;
1871
1872	return 0;
1873
1874dma_deconfigure:
1875	arch_teardown_dma_ops(&pdev->dev);
1876dev_put:
1877	platform_device_put(pdev);
1878
1879	return ret;
1880}
1881
1882#ifdef CONFIG_PCI
1883static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
1884{
1885	static bool acs_enabled __initdata;
1886
1887	if (acs_enabled)
1888		return;
1889
1890	if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1891		struct acpi_iort_node *parent;
1892		struct acpi_iort_id_mapping *map;
1893		int i;
1894
1895		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1896				   iort_node->mapping_offset);
1897
1898		for (i = 0; i < iort_node->mapping_count; i++, map++) {
1899			if (!map->output_reference)
1900				continue;
1901
1902			parent = ACPI_ADD_PTR(struct acpi_iort_node,
1903					iort_table,  map->output_reference);
1904			/*
1905			 * If we detect a RC->SMMU mapping, make sure
1906			 * we enable ACS on the system.
1907			 */
1908			if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1909				(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1910				pci_request_acs();
1911				acs_enabled = true;
1912				return;
1913			}
1914		}
1915	}
1916}
1917#else
1918static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
1919#endif
1920
1921static void __init iort_init_platform_devices(void)
1922{
1923	struct acpi_iort_node *iort_node, *iort_end;
1924	struct acpi_table_iort *iort;
1925	struct fwnode_handle *fwnode;
1926	int i, ret;
1927	const struct iort_dev_config *ops;
1928
1929	/*
1930	 * iort_table and iort both point to the start of IORT table, but
1931	 * have different struct types
1932	 */
1933	iort = (struct acpi_table_iort *)iort_table;
1934
1935	/* Get the first IORT node */
1936	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1937				 iort->node_offset);
1938	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1939				iort_table->length);
1940
1941	for (i = 0; i < iort->node_count; i++) {
1942		if (iort_node >= iort_end) {
1943			pr_err("iort node pointer overflows, bad table\n");
1944			return;
1945		}
1946
1947		iort_enable_acs(iort_node);
1948
1949		ops = iort_get_dev_cfg(iort_node);
1950		if (ops) {
1951			fwnode = acpi_alloc_fwnode_static();
1952			if (!fwnode)
1953				return;
1954
1955			iort_set_fwnode(iort_node, fwnode);
1956
1957			ret = iort_add_platform_device(iort_node, ops);
1958			if (ret) {
1959				iort_delete_fwnode(iort_node);
1960				acpi_free_fwnode_static(fwnode);
1961				return;
1962			}
1963		}
1964
1965		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1966					 iort_node->length);
1967	}
1968}
1969
1970void __init acpi_iort_init(void)
1971{
1972	acpi_status status;
1973
1974	/* iort_table will be used at runtime after the iort init,
1975	 * so we don't need to call acpi_put_table() to release
1976	 * the IORT table mapping.
1977	 */
1978	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1979	if (ACPI_FAILURE(status)) {
1980		if (status != AE_NOT_FOUND) {
1981			const char *msg = acpi_format_exception(status);
1982
1983			pr_err("Failed to get table, %s\n", msg);
1984		}
1985
1986		return;
1987	}
1988
1989	iort_init_platform_devices();
1990}
1991
1992#ifdef CONFIG_ZONE_DMA
1993/*
1994 * Extract the highest CPU physical address accessible to all DMA masters in
1995 * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
1996 */
1997phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
1998{
1999	phys_addr_t limit = PHYS_ADDR_MAX;
2000	struct acpi_iort_node *node, *end;
2001	struct acpi_table_iort *iort;
2002	acpi_status status;
2003	int i;
2004
2005	if (acpi_disabled)
2006		return limit;
2007
2008	status = acpi_get_table(ACPI_SIG_IORT, 0,
2009				(struct acpi_table_header **)&iort);
2010	if (ACPI_FAILURE(status))
2011		return limit;
2012
2013	node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
2014	end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
2015
2016	for (i = 0; i < iort->node_count; i++) {
2017		if (node >= end)
2018			break;
2019
2020		switch (node->type) {
2021			struct acpi_iort_named_component *ncomp;
2022			struct acpi_iort_root_complex *rc;
2023			phys_addr_t local_limit;
2024
2025		case ACPI_IORT_NODE_NAMED_COMPONENT:
2026			ncomp = (struct acpi_iort_named_component *)node->node_data;
2027			local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
2028			limit = min_not_zero(limit, local_limit);
2029			break;
2030
2031		case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
2032			if (node->revision < 1)
2033				break;
2034
2035			rc = (struct acpi_iort_root_complex *)node->node_data;
2036			local_limit = DMA_BIT_MASK(rc->memory_address_limit);
2037			limit = min_not_zero(limit, local_limit);
2038			break;
2039		}
2040		node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
2041	}
2042	acpi_put_table(&iort->header);
2043	return limit;
2044}
2045#endif
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2016, Semihalf
   4 *	Author: Tomasz Nowicki <tn@semihalf.com>
   5 *
   6 * This file implements early detection/parsing of I/O mapping
   7 * reported to OS through firmware via I/O Remapping Table (IORT)
   8 * IORT document number: ARM DEN 0049A
   9 */
  10
  11#define pr_fmt(fmt)	"ACPI: IORT: " fmt
  12
  13#include <linux/acpi_iort.h>
  14#include <linux/bitfield.h>
  15#include <linux/iommu.h>
  16#include <linux/kernel.h>
  17#include <linux/list.h>
  18#include <linux/pci.h>
  19#include <linux/platform_device.h>
  20#include <linux/slab.h>
  21#include <linux/dma-map-ops.h>
 
  22
  23#define IORT_TYPE_MASK(type)	(1 << (type))
  24#define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
  25#define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
  26				(1 << ACPI_IORT_NODE_SMMU_V3))
  27
  28struct iort_its_msi_chip {
  29	struct list_head	list;
  30	struct fwnode_handle	*fw_node;
  31	phys_addr_t		base_addr;
  32	u32			translation_id;
  33};
  34
  35struct iort_fwnode {
  36	struct list_head list;
  37	struct acpi_iort_node *iort_node;
  38	struct fwnode_handle *fwnode;
  39};
  40static LIST_HEAD(iort_fwnode_list);
  41static DEFINE_SPINLOCK(iort_fwnode_lock);
  42
  43/**
  44 * iort_set_fwnode() - Create iort_fwnode and use it to register
  45 *		       iommu data in the iort_fwnode_list
  46 *
  47 * @iort_node: IORT table node associated with the IOMMU
  48 * @fwnode: fwnode associated with the IORT node
  49 *
  50 * Returns: 0 on success
  51 *          <0 on failure
  52 */
  53static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
  54				  struct fwnode_handle *fwnode)
  55{
  56	struct iort_fwnode *np;
  57
  58	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
  59
  60	if (WARN_ON(!np))
  61		return -ENOMEM;
  62
  63	INIT_LIST_HEAD(&np->list);
  64	np->iort_node = iort_node;
  65	np->fwnode = fwnode;
  66
  67	spin_lock(&iort_fwnode_lock);
  68	list_add_tail(&np->list, &iort_fwnode_list);
  69	spin_unlock(&iort_fwnode_lock);
  70
  71	return 0;
  72}
  73
  74/**
  75 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
  76 *
  77 * @node: IORT table node to be looked-up
  78 *
  79 * Returns: fwnode_handle pointer on success, NULL on failure
  80 */
  81static inline struct fwnode_handle *iort_get_fwnode(
  82			struct acpi_iort_node *node)
  83{
  84	struct iort_fwnode *curr;
  85	struct fwnode_handle *fwnode = NULL;
  86
  87	spin_lock(&iort_fwnode_lock);
  88	list_for_each_entry(curr, &iort_fwnode_list, list) {
  89		if (curr->iort_node == node) {
  90			fwnode = curr->fwnode;
  91			break;
  92		}
  93	}
  94	spin_unlock(&iort_fwnode_lock);
  95
  96	return fwnode;
  97}
  98
  99/**
 100 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
 101 *
 102 * @node: IORT table node associated with fwnode to delete
 103 */
 104static inline void iort_delete_fwnode(struct acpi_iort_node *node)
 105{
 106	struct iort_fwnode *curr, *tmp;
 107
 108	spin_lock(&iort_fwnode_lock);
 109	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
 110		if (curr->iort_node == node) {
 111			list_del(&curr->list);
 112			kfree(curr);
 113			break;
 114		}
 115	}
 116	spin_unlock(&iort_fwnode_lock);
 117}
 118
 119/**
 120 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
 121 *
 122 * @fwnode: fwnode associated with device to be looked-up
 123 *
 124 * Returns: iort_node pointer on success, NULL on failure
 125 */
 126static inline struct acpi_iort_node *iort_get_iort_node(
 127			struct fwnode_handle *fwnode)
 128{
 129	struct iort_fwnode *curr;
 130	struct acpi_iort_node *iort_node = NULL;
 131
 132	spin_lock(&iort_fwnode_lock);
 133	list_for_each_entry(curr, &iort_fwnode_list, list) {
 134		if (curr->fwnode == fwnode) {
 135			iort_node = curr->iort_node;
 136			break;
 137		}
 138	}
 139	spin_unlock(&iort_fwnode_lock);
 140
 141	return iort_node;
 142}
 143
 144typedef acpi_status (*iort_find_node_callback)
 145	(struct acpi_iort_node *node, void *context);
 146
 147/* Root pointer to the mapped IORT table */
 148static struct acpi_table_header *iort_table;
 149
 150static LIST_HEAD(iort_msi_chip_list);
 151static DEFINE_SPINLOCK(iort_msi_chip_lock);
 152
 153/**
 154 * iort_register_domain_token() - register domain token along with related
 155 * ITS ID and base address to the list from where we can get it back later on.
 156 * @trans_id: ITS ID.
 157 * @base: ITS base address.
 158 * @fw_node: Domain token.
 159 *
 160 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
 161 */
 162int iort_register_domain_token(int trans_id, phys_addr_t base,
 163			       struct fwnode_handle *fw_node)
 164{
 165	struct iort_its_msi_chip *its_msi_chip;
 166
 167	its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
 168	if (!its_msi_chip)
 169		return -ENOMEM;
 170
 171	its_msi_chip->fw_node = fw_node;
 172	its_msi_chip->translation_id = trans_id;
 173	its_msi_chip->base_addr = base;
 174
 175	spin_lock(&iort_msi_chip_lock);
 176	list_add(&its_msi_chip->list, &iort_msi_chip_list);
 177	spin_unlock(&iort_msi_chip_lock);
 178
 179	return 0;
 180}
 181
 182/**
 183 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
 184 * @trans_id: ITS ID.
 185 *
 186 * Returns: none.
 187 */
 188void iort_deregister_domain_token(int trans_id)
 189{
 190	struct iort_its_msi_chip *its_msi_chip, *t;
 191
 192	spin_lock(&iort_msi_chip_lock);
 193	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
 194		if (its_msi_chip->translation_id == trans_id) {
 195			list_del(&its_msi_chip->list);
 196			kfree(its_msi_chip);
 197			break;
 198		}
 199	}
 200	spin_unlock(&iort_msi_chip_lock);
 201}
 202
 203/**
 204 * iort_find_domain_token() - Find domain token based on given ITS ID
 205 * @trans_id: ITS ID.
 206 *
 207 * Returns: domain token when find on the list, NULL otherwise
 208 */
 209struct fwnode_handle *iort_find_domain_token(int trans_id)
 210{
 211	struct fwnode_handle *fw_node = NULL;
 212	struct iort_its_msi_chip *its_msi_chip;
 213
 214	spin_lock(&iort_msi_chip_lock);
 215	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
 216		if (its_msi_chip->translation_id == trans_id) {
 217			fw_node = its_msi_chip->fw_node;
 218			break;
 219		}
 220	}
 221	spin_unlock(&iort_msi_chip_lock);
 222
 223	return fw_node;
 224}
 225
 226static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
 227					     iort_find_node_callback callback,
 228					     void *context)
 229{
 230	struct acpi_iort_node *iort_node, *iort_end;
 231	struct acpi_table_iort *iort;
 232	int i;
 233
 234	if (!iort_table)
 235		return NULL;
 236
 237	/* Get the first IORT node */
 238	iort = (struct acpi_table_iort *)iort_table;
 239	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
 240				 iort->node_offset);
 241	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 242				iort_table->length);
 243
 244	for (i = 0; i < iort->node_count; i++) {
 245		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
 246			       "IORT node pointer overflows, bad table!\n"))
 247			return NULL;
 248
 249		if (iort_node->type == type &&
 250		    ACPI_SUCCESS(callback(iort_node, context)))
 251			return iort_node;
 252
 253		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
 254					 iort_node->length);
 255	}
 256
 257	return NULL;
 258}
 259
 260static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
 261					    void *context)
 262{
 263	struct device *dev = context;
 264	acpi_status status = AE_NOT_FOUND;
 265
 266	if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
 267		struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
 268		struct acpi_device *adev;
 269		struct acpi_iort_named_component *ncomp;
 270		struct device *nc_dev = dev;
 271
 272		/*
 273		 * Walk the device tree to find a device with an
 274		 * ACPI companion; there is no point in scanning
 275		 * IORT for a device matching a named component if
 276		 * the device does not have an ACPI companion to
 277		 * start with.
 278		 */
 279		do {
 280			adev = ACPI_COMPANION(nc_dev);
 281			if (adev)
 282				break;
 283
 284			nc_dev = nc_dev->parent;
 285		} while (nc_dev);
 286
 287		if (!adev)
 288			goto out;
 289
 290		status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
 291		if (ACPI_FAILURE(status)) {
 292			dev_warn(nc_dev, "Can't get device full path name\n");
 293			goto out;
 294		}
 295
 296		ncomp = (struct acpi_iort_named_component *)node->node_data;
 297		status = !strcmp(ncomp->device_name, buf.pointer) ?
 298							AE_OK : AE_NOT_FOUND;
 299		acpi_os_free(buf.pointer);
 300	} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
 301		struct acpi_iort_root_complex *pci_rc;
 302		struct pci_bus *bus;
 303
 304		bus = to_pci_bus(dev);
 305		pci_rc = (struct acpi_iort_root_complex *)node->node_data;
 306
 307		/*
 308		 * It is assumed that PCI segment numbers maps one-to-one
 309		 * with root complexes. Each segment number can represent only
 310		 * one root complex.
 311		 */
 312		status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
 313							AE_OK : AE_NOT_FOUND;
 314	}
 315out:
 316	return status;
 317}
 318
 319static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
 320		       u32 *rid_out, bool check_overlap)
 321{
 322	/* Single mapping does not care for input id */
 323	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
 324		if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
 325		    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
 326			*rid_out = map->output_base;
 327			return 0;
 328		}
 329
 330		pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
 331			map, type);
 332		return -ENXIO;
 333	}
 334
 335	if (rid_in < map->input_base ||
 336	    (rid_in > map->input_base + map->id_count))
 337		return -ENXIO;
 338
 339	if (check_overlap) {
 340		/*
 341		 * We already found a mapping for this input ID at the end of
 342		 * another region. If it coincides with the start of this
 343		 * region, we assume the prior match was due to the off-by-1
 344		 * issue mentioned below, and allow it to be superseded.
 345		 * Otherwise, things are *really* broken, and we just disregard
 346		 * duplicate matches entirely to retain compatibility.
 347		 */
 348		pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
 349		       map, rid_in);
 350		if (rid_in != map->input_base)
 351			return -ENXIO;
 352
 353		pr_err(FW_BUG "applying workaround.\n");
 354	}
 355
 356	*rid_out = map->output_base + (rid_in - map->input_base);
 357
 358	/*
 359	 * Due to confusion regarding the meaning of the id_count field (which
 360	 * carries the number of IDs *minus 1*), we may have to disregard this
 361	 * match if it is at the end of the range, and overlaps with the start
 362	 * of another one.
 363	 */
 364	if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
 365		return -EAGAIN;
 366	return 0;
 367}
 368
 369static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
 370					       u32 *id_out, int index)
 371{
 372	struct acpi_iort_node *parent;
 373	struct acpi_iort_id_mapping *map;
 374
 375	if (!node->mapping_offset || !node->mapping_count ||
 376				     index >= node->mapping_count)
 377		return NULL;
 378
 379	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 380			   node->mapping_offset + index * sizeof(*map));
 381
 382	/* Firmware bug! */
 383	if (!map->output_reference) {
 384		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
 385		       node, node->type);
 386		return NULL;
 387	}
 388
 389	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 390			       map->output_reference);
 391
 392	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
 393		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
 394		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
 395		    node->type == ACPI_IORT_NODE_SMMU_V3 ||
 396		    node->type == ACPI_IORT_NODE_PMCG) {
 397			*id_out = map->output_base;
 398			return parent;
 399		}
 400	}
 401
 402	return NULL;
 403}
 404
 405#ifndef ACPI_IORT_SMMU_V3_DEVICEID_VALID
 406#define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1 << 4)
 407#endif
 408
 409static int iort_get_id_mapping_index(struct acpi_iort_node *node)
 410{
 411	struct acpi_iort_smmu_v3 *smmu;
 412	struct acpi_iort_pmcg *pmcg;
 413
 414	switch (node->type) {
 415	case ACPI_IORT_NODE_SMMU_V3:
 416		/*
 417		 * SMMUv3 dev ID mapping index was introduced in revision 1
 418		 * table, not available in revision 0
 419		 */
 420		if (node->revision < 1)
 421			return -EINVAL;
 422
 423		smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
 424		/*
 425		 * Until IORT E.e (node rev. 5), the ID mapping index was
 426		 * defined to be valid unless all interrupts are GSIV-based.
 427		 */
 428		if (node->revision < 5) {
 429			if (smmu->event_gsiv && smmu->pri_gsiv &&
 430			    smmu->gerr_gsiv && smmu->sync_gsiv)
 431				return -EINVAL;
 432		} else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) {
 433			return -EINVAL;
 434		}
 435
 436		if (smmu->id_mapping_index >= node->mapping_count) {
 437			pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
 438			       node, node->type);
 439			return -EINVAL;
 440		}
 441
 442		return smmu->id_mapping_index;
 443	case ACPI_IORT_NODE_PMCG:
 444		pmcg = (struct acpi_iort_pmcg *)node->node_data;
 445		if (pmcg->overflow_gsiv || node->mapping_count == 0)
 446			return -EINVAL;
 447
 448		return 0;
 449	default:
 450		return -EINVAL;
 451	}
 452}
 453
 454static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
 455					       u32 id_in, u32 *id_out,
 456					       u8 type_mask)
 457{
 458	u32 id = id_in;
 459
 460	/* Parse the ID mapping tree to find specified node type */
 461	while (node) {
 462		struct acpi_iort_id_mapping *map;
 463		int i, index, rc = 0;
 464		u32 out_ref = 0, map_id = id;
 465
 466		if (IORT_TYPE_MASK(node->type) & type_mask) {
 467			if (id_out)
 468				*id_out = id;
 469			return node;
 470		}
 471
 472		if (!node->mapping_offset || !node->mapping_count)
 473			goto fail_map;
 474
 475		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 476				   node->mapping_offset);
 477
 478		/* Firmware bug! */
 479		if (!map->output_reference) {
 480			pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
 481			       node, node->type);
 482			goto fail_map;
 483		}
 484
 485		/*
 486		 * Get the special ID mapping index (if any) and skip its
 487		 * associated ID map to prevent erroneous multi-stage
 488		 * IORT ID translations.
 489		 */
 490		index = iort_get_id_mapping_index(node);
 491
 492		/* Do the ID translation */
 493		for (i = 0; i < node->mapping_count; i++, map++) {
 494			/* if it is special mapping index, skip it */
 495			if (i == index)
 496				continue;
 497
 498			rc = iort_id_map(map, node->type, map_id, &id, out_ref);
 499			if (!rc)
 500				break;
 501			if (rc == -EAGAIN)
 502				out_ref = map->output_reference;
 503		}
 504
 505		if (i == node->mapping_count && !out_ref)
 506			goto fail_map;
 507
 508		node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 509				    rc ? out_ref : map->output_reference);
 510	}
 511
 512fail_map:
 513	/* Map input ID to output ID unchanged on mapping failure */
 514	if (id_out)
 515		*id_out = id_in;
 516
 517	return NULL;
 518}
 519
 520static struct acpi_iort_node *iort_node_map_platform_id(
 521		struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
 522		int index)
 523{
 524	struct acpi_iort_node *parent;
 525	u32 id;
 526
 527	/* step 1: retrieve the initial dev id */
 528	parent = iort_node_get_id(node, &id, index);
 529	if (!parent)
 530		return NULL;
 531
 532	/*
 533	 * optional step 2: map the initial dev id if its parent is not
 534	 * the target type we want, map it again for the use cases such
 535	 * as NC (named component) -> SMMU -> ITS. If the type is matched,
 536	 * return the initial dev id and its parent pointer directly.
 537	 */
 538	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
 539		parent = iort_node_map_id(parent, id, id_out, type_mask);
 540	else
 541		if (id_out)
 542			*id_out = id;
 543
 544	return parent;
 545}
 546
 547static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
 548{
 549	struct pci_bus *pbus;
 550
 551	if (!dev_is_pci(dev)) {
 552		struct acpi_iort_node *node;
 553		/*
 554		 * scan iort_fwnode_list to see if it's an iort platform
 555		 * device (such as SMMU, PMCG),its iort node already cached
 556		 * and associated with fwnode when iort platform devices
 557		 * were initialized.
 558		 */
 559		node = iort_get_iort_node(dev->fwnode);
 560		if (node)
 561			return node;
 562		/*
 563		 * if not, then it should be a platform device defined in
 564		 * DSDT/SSDT (with Named Component node in IORT)
 565		 */
 566		return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 567				      iort_match_node_callback, dev);
 568	}
 569
 570	pbus = to_pci_dev(dev)->bus;
 571
 572	return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
 573			      iort_match_node_callback, &pbus->dev);
 574}
 575
 576/**
 577 * iort_msi_map_id() - Map a MSI input ID for a device
 578 * @dev: The device for which the mapping is to be done.
 579 * @input_id: The device input ID.
 580 *
 581 * Returns: mapped MSI ID on success, input ID otherwise
 582 */
 583u32 iort_msi_map_id(struct device *dev, u32 input_id)
 584{
 585	struct acpi_iort_node *node;
 586	u32 dev_id;
 587
 588	node = iort_find_dev_node(dev);
 589	if (!node)
 590		return input_id;
 591
 592	iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
 593	return dev_id;
 594}
 595
 596/**
 597 * iort_pmsi_get_dev_id() - Get the device id for a device
 598 * @dev: The device for which the mapping is to be done.
 599 * @dev_id: The device ID found.
 600 *
 601 * Returns: 0 for successful find a dev id, -ENODEV on error
 602 */
 603int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
 604{
 605	int i, index;
 606	struct acpi_iort_node *node;
 607
 608	node = iort_find_dev_node(dev);
 609	if (!node)
 610		return -ENODEV;
 611
 612	index = iort_get_id_mapping_index(node);
 613	/* if there is a valid index, go get the dev_id directly */
 614	if (index >= 0) {
 615		if (iort_node_get_id(node, dev_id, index))
 616			return 0;
 617	} else {
 618		for (i = 0; i < node->mapping_count; i++) {
 619			if (iort_node_map_platform_id(node, dev_id,
 620						      IORT_MSI_TYPE, i))
 621				return 0;
 622		}
 623	}
 624
 625	return -ENODEV;
 626}
 627
 628static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
 629{
 630	struct iort_its_msi_chip *its_msi_chip;
 631	int ret = -ENODEV;
 632
 633	spin_lock(&iort_msi_chip_lock);
 634	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
 635		if (its_msi_chip->translation_id == its_id) {
 636			*base = its_msi_chip->base_addr;
 637			ret = 0;
 638			break;
 639		}
 640	}
 641	spin_unlock(&iort_msi_chip_lock);
 642
 643	return ret;
 644}
 645
 646/**
 647 * iort_dev_find_its_id() - Find the ITS identifier for a device
 648 * @dev: The device.
 649 * @id: Device's ID
 650 * @idx: Index of the ITS identifier list.
 651 * @its_id: ITS identifier.
 652 *
 653 * Returns: 0 on success, appropriate error value otherwise
 654 */
 655static int iort_dev_find_its_id(struct device *dev, u32 id,
 656				unsigned int idx, int *its_id)
 657{
 658	struct acpi_iort_its_group *its;
 659	struct acpi_iort_node *node;
 660
 661	node = iort_find_dev_node(dev);
 662	if (!node)
 663		return -ENXIO;
 664
 665	node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
 666	if (!node)
 667		return -ENXIO;
 668
 669	/* Move to ITS specific data */
 670	its = (struct acpi_iort_its_group *)node->node_data;
 671	if (idx >= its->its_count) {
 672		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
 673			idx, its->its_count);
 674		return -ENXIO;
 675	}
 676
 677	*its_id = its->identifiers[idx];
 678	return 0;
 679}
 680
 681/**
 682 * iort_get_device_domain() - Find MSI domain related to a device
 683 * @dev: The device.
 684 * @id: Requester ID for the device.
 685 * @bus_token: irq domain bus token.
 686 *
 687 * Returns: the MSI domain for this device, NULL otherwise
 688 */
 689struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
 690					  enum irq_domain_bus_token bus_token)
 691{
 692	struct fwnode_handle *handle;
 693	int its_id;
 694
 695	if (iort_dev_find_its_id(dev, id, 0, &its_id))
 696		return NULL;
 697
 698	handle = iort_find_domain_token(its_id);
 699	if (!handle)
 700		return NULL;
 701
 702	return irq_find_matching_fwnode(handle, bus_token);
 703}
 704
 705static void iort_set_device_domain(struct device *dev,
 706				   struct acpi_iort_node *node)
 707{
 708	struct acpi_iort_its_group *its;
 709	struct acpi_iort_node *msi_parent;
 710	struct acpi_iort_id_mapping *map;
 711	struct fwnode_handle *iort_fwnode;
 712	struct irq_domain *domain;
 713	int index;
 714
 715	index = iort_get_id_mapping_index(node);
 716	if (index < 0)
 717		return;
 718
 719	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 720			   node->mapping_offset + index * sizeof(*map));
 721
 722	/* Firmware bug! */
 723	if (!map->output_reference ||
 724	    !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
 725		pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
 726		       node, node->type);
 727		return;
 728	}
 729
 730	msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 731				  map->output_reference);
 732
 733	if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
 734		return;
 735
 736	/* Move to ITS specific data */
 737	its = (struct acpi_iort_its_group *)msi_parent->node_data;
 738
 739	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
 740	if (!iort_fwnode)
 741		return;
 742
 743	domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
 744	if (domain)
 745		dev_set_msi_domain(dev, domain);
 746}
 747
 748/**
 749 * iort_get_platform_device_domain() - Find MSI domain related to a
 750 * platform device
 751 * @dev: the dev pointer associated with the platform device
 752 *
 753 * Returns: the MSI domain for this device, NULL otherwise
 754 */
 755static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
 756{
 757	struct acpi_iort_node *node, *msi_parent = NULL;
 758	struct fwnode_handle *iort_fwnode;
 759	struct acpi_iort_its_group *its;
 760	int i;
 761
 762	/* find its associated iort node */
 763	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 764			      iort_match_node_callback, dev);
 765	if (!node)
 766		return NULL;
 767
 768	/* then find its msi parent node */
 769	for (i = 0; i < node->mapping_count; i++) {
 770		msi_parent = iort_node_map_platform_id(node, NULL,
 771						       IORT_MSI_TYPE, i);
 772		if (msi_parent)
 773			break;
 774	}
 775
 776	if (!msi_parent)
 777		return NULL;
 778
 779	/* Move to ITS specific data */
 780	its = (struct acpi_iort_its_group *)msi_parent->node_data;
 781
 782	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
 783	if (!iort_fwnode)
 784		return NULL;
 785
 786	return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
 787}
 788
 789void acpi_configure_pmsi_domain(struct device *dev)
 790{
 791	struct irq_domain *msi_domain;
 792
 793	msi_domain = iort_get_platform_device_domain(dev);
 794	if (msi_domain)
 795		dev_set_msi_domain(dev, msi_domain);
 796}
 797
 798#ifdef CONFIG_IOMMU_API
 799static void iort_rmr_free(struct device *dev,
 800			  struct iommu_resv_region *region)
 801{
 802	struct iommu_iort_rmr_data *rmr_data;
 803
 804	rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
 805	kfree(rmr_data->sids);
 806	kfree(rmr_data);
 807}
 808
 809static struct iommu_iort_rmr_data *iort_rmr_alloc(
 810					struct acpi_iort_rmr_desc *rmr_desc,
 811					int prot, enum iommu_resv_type type,
 812					u32 *sids, u32 num_sids)
 813{
 814	struct iommu_iort_rmr_data *rmr_data;
 815	struct iommu_resv_region *region;
 816	u32 *sids_copy;
 817	u64 addr = rmr_desc->base_address, size = rmr_desc->length;
 818
 819	rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
 820	if (!rmr_data)
 821		return NULL;
 822
 823	/* Create a copy of SIDs array to associate with this rmr_data */
 824	sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
 825	if (!sids_copy) {
 826		kfree(rmr_data);
 827		return NULL;
 828	}
 829	rmr_data->sids = sids_copy;
 830	rmr_data->num_sids = num_sids;
 831
 832	if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
 833		/* PAGE align base addr and size */
 834		addr &= PAGE_MASK;
 835		size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
 836
 837		pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
 838		       rmr_desc->base_address,
 839		       rmr_desc->base_address + rmr_desc->length - 1,
 840		       addr, addr + size - 1);
 841	}
 842
 843	region = &rmr_data->rr;
 844	INIT_LIST_HEAD(&region->list);
 845	region->start = addr;
 846	region->length = size;
 847	region->prot = prot;
 848	region->type = type;
 849	region->free = iort_rmr_free;
 850
 851	return rmr_data;
 852}
 853
 854static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
 855					u32 count)
 856{
 857	int i, j;
 858
 859	for (i = 0; i < count; i++) {
 860		u64 end, start = desc[i].base_address, length = desc[i].length;
 861
 862		if (!length) {
 863			pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
 864			       start);
 865			continue;
 866		}
 867
 868		end = start + length - 1;
 869
 870		/* Check for address overlap */
 871		for (j = i + 1; j < count; j++) {
 872			u64 e_start = desc[j].base_address;
 873			u64 e_end = e_start + desc[j].length - 1;
 874
 875			if (start <= e_end && end >= e_start)
 876				pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
 877				       start, end);
 878		}
 879	}
 880}
 881
 882/*
 883 * Please note, we will keep the already allocated RMR reserve
 884 * regions in case of a memory allocation failure.
 885 */
 886static void iort_get_rmrs(struct acpi_iort_node *node,
 887			  struct acpi_iort_node *smmu,
 888			  u32 *sids, u32 num_sids,
 889			  struct list_head *head)
 890{
 891	struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
 892	struct acpi_iort_rmr_desc *rmr_desc;
 893	int i;
 894
 895	rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
 896				rmr->rmr_offset);
 897
 898	iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
 899
 900	for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
 901		struct iommu_iort_rmr_data *rmr_data;
 902		enum iommu_resv_type type;
 903		int prot = IOMMU_READ | IOMMU_WRITE;
 904
 905		if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
 906			type = IOMMU_RESV_DIRECT_RELAXABLE;
 907		else
 908			type = IOMMU_RESV_DIRECT;
 909
 910		if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
 911			prot |= IOMMU_PRIV;
 912
 913		/* Attributes 0x00 - 0x03 represents device memory */
 914		if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
 915				ACPI_IORT_RMR_ATTR_DEVICE_GRE)
 916			prot |= IOMMU_MMIO;
 917		else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
 918				ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
 919			prot |= IOMMU_CACHE;
 920
 921		rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
 922					  sids, num_sids);
 923		if (!rmr_data)
 924			return;
 925
 926		list_add_tail(&rmr_data->rr.list, head);
 927	}
 928}
 929
 930static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
 931				u32 new_count)
 932{
 933	u32 *new_sids;
 934	u32 total_count = count + new_count;
 935	int i;
 936
 937	new_sids = krealloc_array(sids, count + new_count,
 938				  sizeof(*new_sids), GFP_KERNEL);
 939	if (!new_sids)
 940		return NULL;
 941
 942	for (i = count; i < total_count; i++)
 943		new_sids[i] = id_start++;
 944
 945	return new_sids;
 946}
 947
 948static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
 949			     u32 id_count)
 950{
 951	int i;
 952	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 953
 954	/*
 955	 * Make sure the kernel has preserved the boot firmware PCIe
 956	 * configuration. This is required to ensure that the RMR PCIe
 957	 * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
 958	 */
 959	if (dev_is_pci(dev)) {
 960		struct pci_dev *pdev = to_pci_dev(dev);
 961		struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
 962
 963		if (!host->preserve_config)
 964			return false;
 965	}
 966
 967	for (i = 0; i < fwspec->num_ids; i++) {
 968		if (fwspec->ids[i] >= id_start &&
 969		    fwspec->ids[i] <= id_start + id_count)
 970			return true;
 971	}
 972
 973	return false;
 974}
 975
 976static void iort_node_get_rmr_info(struct acpi_iort_node *node,
 977				   struct acpi_iort_node *iommu,
 978				   struct device *dev, struct list_head *head)
 979{
 980	struct acpi_iort_node *smmu = NULL;
 981	struct acpi_iort_rmr *rmr;
 982	struct acpi_iort_id_mapping *map;
 983	u32 *sids = NULL;
 984	u32 num_sids = 0;
 985	int i;
 986
 987	if (!node->mapping_offset || !node->mapping_count) {
 988		pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
 989		       node);
 990		return;
 991	}
 992
 993	rmr = (struct acpi_iort_rmr *)node->node_data;
 994	if (!rmr->rmr_offset || !rmr->rmr_count)
 995		return;
 996
 997	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 998			   node->mapping_offset);
 999
1000	/*
1001	 * Go through the ID mappings and see if we have a match for SMMU
1002	 * and dev(if !NULL). If found, get the sids for the Node.
1003	 * Please note, id_count is equal to the number of IDs  in the
1004	 * range minus one.
1005	 */
1006	for (i = 0; i < node->mapping_count; i++, map++) {
1007		struct acpi_iort_node *parent;
1008
1009		if (!map->id_count)
1010			continue;
1011
1012		parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
1013				      map->output_reference);
1014		if (parent != iommu)
1015			continue;
1016
1017		/* If dev is valid, check RMR node corresponds to the dev SID */
1018		if (dev && !iort_rmr_has_dev(dev, map->output_base,
1019					     map->id_count))
1020			continue;
1021
1022		/* Retrieve SIDs associated with the Node. */
1023		sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
1024					   map->id_count + 1);
1025		if (!sids)
1026			return;
1027
1028		num_sids += map->id_count + 1;
1029	}
1030
1031	if (!sids)
1032		return;
1033
1034	iort_get_rmrs(node, smmu, sids, num_sids, head);
1035	kfree(sids);
1036}
1037
1038static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
1039			   struct list_head *head)
1040{
1041	struct acpi_table_iort *iort;
1042	struct acpi_iort_node *iort_node, *iort_end;
1043	int i;
1044
1045	/* Only supports ARM DEN 0049E.d onwards */
1046	if (iort_table->revision < 5)
1047		return;
1048
1049	iort = (struct acpi_table_iort *)iort_table;
1050
1051	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1052				 iort->node_offset);
1053	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1054				iort_table->length);
1055
1056	for (i = 0; i < iort->node_count; i++) {
1057		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
1058			       "IORT node pointer overflows, bad table!\n"))
1059			return;
1060
1061		if (iort_node->type == ACPI_IORT_NODE_RMR)
1062			iort_node_get_rmr_info(iort_node, iommu, dev, head);
1063
1064		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1065					 iort_node->length);
1066	}
1067}
1068
1069/*
1070 * Populate the RMR list associated with a given IOMMU and dev(if provided).
1071 * If dev is NULL, the function populates all the RMRs associated with the
1072 * given IOMMU.
1073 */
1074static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
1075					    struct device *dev,
1076					    struct list_head *head)
1077{
1078	struct acpi_iort_node *iommu;
1079
1080	iommu = iort_get_iort_node(iommu_fwnode);
1081	if (!iommu)
1082		return;
1083
1084	iort_find_rmrs(iommu, dev, head);
1085}
1086
1087static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
1088{
1089	struct acpi_iort_node *iommu;
1090	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1091
1092	iommu = iort_get_iort_node(fwspec->iommu_fwnode);
1093
1094	if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
1095		struct acpi_iort_smmu_v3 *smmu;
1096
1097		smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
1098		if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
1099			return iommu;
1100	}
1101
1102	return NULL;
1103}
1104
1105/*
1106 * Retrieve platform specific HW MSI reserve regions.
1107 * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
1108 * associated with the device are the HW MSI reserved regions.
1109 */
1110static void iort_iommu_msi_get_resv_regions(struct device *dev,
1111					    struct list_head *head)
1112{
1113	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1114	struct acpi_iort_its_group *its;
1115	struct acpi_iort_node *iommu_node, *its_node = NULL;
1116	int i;
1117
1118	iommu_node = iort_get_msi_resv_iommu(dev);
1119	if (!iommu_node)
1120		return;
1121
1122	/*
1123	 * Current logic to reserve ITS regions relies on HW topologies
1124	 * where a given PCI or named component maps its IDs to only one
1125	 * ITS group; if a PCI or named component can map its IDs to
1126	 * different ITS groups through IORT mappings this function has
1127	 * to be reworked to ensure we reserve regions for all ITS groups
1128	 * a given PCI or named component may map IDs to.
1129	 */
1130
1131	for (i = 0; i < fwspec->num_ids; i++) {
1132		its_node = iort_node_map_id(iommu_node,
1133					fwspec->ids[i],
1134					NULL, IORT_MSI_TYPE);
1135		if (its_node)
1136			break;
1137	}
1138
1139	if (!its_node)
1140		return;
1141
1142	/* Move to ITS specific data */
1143	its = (struct acpi_iort_its_group *)its_node->node_data;
1144
1145	for (i = 0; i < its->its_count; i++) {
1146		phys_addr_t base;
1147
1148		if (!iort_find_its_base(its->identifiers[i], &base)) {
1149			int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1150			struct iommu_resv_region *region;
1151
1152			region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
1153							 prot, IOMMU_RESV_MSI,
1154							 GFP_KERNEL);
1155			if (region)
1156				list_add_tail(&region->list, head);
1157		}
1158	}
1159}
1160
1161/**
1162 * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
1163 * @dev: Device from iommu_get_resv_regions()
1164 * @head: Reserved region list from iommu_get_resv_regions()
1165 */
1166void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1167{
1168	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1169
1170	iort_iommu_msi_get_resv_regions(dev, head);
1171	iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
1172}
1173
1174/**
1175 * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
1176 *                     associated StreamIDs information.
1177 * @iommu_fwnode: fwnode associated with IOMMU
1178 * @head: Resereved region list
1179 */
1180void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
1181		       struct list_head *head)
1182{
1183	iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
1184}
1185EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
1186
1187/**
1188 * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
1189 * @iommu_fwnode: fwnode associated with IOMMU
1190 * @head: Resereved region list
1191 */
1192void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
1193		       struct list_head *head)
1194{
1195	struct iommu_resv_region *entry, *next;
1196
1197	list_for_each_entry_safe(entry, next, head, list)
1198		entry->free(NULL, entry);
1199}
1200EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
1201
1202static inline bool iort_iommu_driver_enabled(u8 type)
1203{
1204	switch (type) {
1205	case ACPI_IORT_NODE_SMMU_V3:
1206		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
1207	case ACPI_IORT_NODE_SMMU:
1208		return IS_ENABLED(CONFIG_ARM_SMMU);
1209	default:
1210		pr_warn("IORT node type %u does not describe an SMMU\n", type);
1211		return false;
1212	}
1213}
1214
1215static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
1216{
1217	struct acpi_iort_root_complex *pci_rc;
1218
1219	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
1220	return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
1221}
1222
 
 
 
 
 
 
 
 
 
 
 
1223static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
1224			    u32 streamid)
1225{
1226	const struct iommu_ops *ops;
1227	struct fwnode_handle *iort_fwnode;
1228
1229	if (!node)
 
1230		return -ENODEV;
1231
1232	iort_fwnode = iort_get_fwnode(node);
1233	if (!iort_fwnode)
1234		return -ENODEV;
1235
1236	/*
1237	 * If the ops look-up fails, this means that either
1238	 * the SMMU drivers have not been probed yet or that
1239	 * the SMMU drivers are not built in the kernel;
1240	 * Depending on whether the SMMU drivers are built-in
1241	 * in the kernel or not, defer the IOMMU configuration
1242	 * or just abort it.
1243	 */
1244	ops = iommu_ops_from_fwnode(iort_fwnode);
1245	if (!ops)
1246		return iort_iommu_driver_enabled(node->type) ?
1247		       -EPROBE_DEFER : -ENODEV;
1248
1249	return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops);
1250}
1251
1252struct iort_pci_alias_info {
1253	struct device *dev;
1254	struct acpi_iort_node *node;
1255};
1256
1257static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
1258{
1259	struct iort_pci_alias_info *info = data;
1260	struct acpi_iort_node *parent;
1261	u32 streamid;
1262
1263	parent = iort_node_map_id(info->node, alias, &streamid,
1264				  IORT_IOMMU_TYPE);
1265	return iort_iommu_xlate(info->dev, parent, streamid);
1266}
1267
1268static void iort_named_component_init(struct device *dev,
1269				      struct acpi_iort_node *node)
1270{
1271	struct property_entry props[3] = {};
1272	struct acpi_iort_named_component *nc;
1273
1274	nc = (struct acpi_iort_named_component *)node->node_data;
1275	props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
1276				      FIELD_GET(ACPI_IORT_NC_PASID_BITS,
1277						nc->node_flags));
1278	if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
1279		props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
1280
1281	if (device_create_managed_software_node(dev, props, NULL))
1282		dev_warn(dev, "Could not add device properties\n");
1283}
1284
1285static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
1286{
1287	struct acpi_iort_node *parent;
1288	int err = -ENODEV, i = 0;
1289	u32 streamid = 0;
1290
1291	do {
1292
1293		parent = iort_node_map_platform_id(node, &streamid,
1294						   IORT_IOMMU_TYPE,
1295						   i++);
1296
1297		if (parent)
1298			err = iort_iommu_xlate(dev, parent, streamid);
1299	} while (parent && !err);
1300
1301	return err;
1302}
1303
1304static int iort_nc_iommu_map_id(struct device *dev,
1305				struct acpi_iort_node *node,
1306				const u32 *in_id)
1307{
1308	struct acpi_iort_node *parent;
1309	u32 streamid;
1310
1311	parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
1312	if (parent)
1313		return iort_iommu_xlate(dev, parent, streamid);
1314
1315	return -ENODEV;
1316}
1317
1318
1319/**
1320 * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1321 *
1322 * @dev: device to configure
1323 * @id_in: optional input id const value pointer
1324 *
1325 * Returns: 0 on success, <0 on failure
1326 */
1327int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
1328{
1329	struct acpi_iort_node *node;
1330	int err = -ENODEV;
1331
1332	if (dev_is_pci(dev)) {
1333		struct iommu_fwspec *fwspec;
1334		struct pci_bus *bus = to_pci_dev(dev)->bus;
1335		struct iort_pci_alias_info info = { .dev = dev };
1336
1337		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1338				      iort_match_node_callback, &bus->dev);
1339		if (!node)
1340			return -ENODEV;
1341
1342		info.node = node;
1343		err = pci_for_each_dma_alias(to_pci_dev(dev),
1344					     iort_pci_iommu_init, &info);
1345
1346		fwspec = dev_iommu_fwspec_get(dev);
1347		if (fwspec && iort_pci_rc_supports_ats(node))
1348			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
 
 
1349	} else {
1350		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1351				      iort_match_node_callback, dev);
1352		if (!node)
1353			return -ENODEV;
1354
1355		err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
1356			      iort_nc_iommu_map(dev, node);
1357
1358		if (!err)
1359			iort_named_component_init(dev, node);
1360	}
1361
1362	return err;
1363}
1364
1365#else
1366void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1367{ }
1368int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
1369{ return -ENODEV; }
1370#endif
1371
1372static int nc_dma_get_range(struct device *dev, u64 *size)
1373{
1374	struct acpi_iort_node *node;
1375	struct acpi_iort_named_component *ncomp;
1376
1377	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1378			      iort_match_node_callback, dev);
1379	if (!node)
1380		return -ENODEV;
1381
1382	ncomp = (struct acpi_iort_named_component *)node->node_data;
1383
1384	if (!ncomp->memory_address_limit) {
1385		pr_warn(FW_BUG "Named component missing memory address limit\n");
1386		return -EINVAL;
1387	}
1388
1389	*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1390			1ULL<<ncomp->memory_address_limit;
1391
1392	return 0;
1393}
1394
1395static int rc_dma_get_range(struct device *dev, u64 *size)
1396{
1397	struct acpi_iort_node *node;
1398	struct acpi_iort_root_complex *rc;
1399	struct pci_bus *pbus = to_pci_dev(dev)->bus;
1400
1401	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1402			      iort_match_node_callback, &pbus->dev);
1403	if (!node || node->revision < 1)
1404		return -ENODEV;
1405
1406	rc = (struct acpi_iort_root_complex *)node->node_data;
1407
1408	if (!rc->memory_address_limit) {
1409		pr_warn(FW_BUG "Root complex missing memory address limit\n");
1410		return -EINVAL;
1411	}
1412
1413	*size = rc->memory_address_limit >= 64 ? U64_MAX :
1414			1ULL<<rc->memory_address_limit;
1415
1416	return 0;
1417}
1418
1419/**
1420 * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1421 * @dev: device to lookup
1422 * @size: DMA range size result pointer
1423 *
1424 * Return: 0 on success, an error otherwise.
1425 */
1426int iort_dma_get_ranges(struct device *dev, u64 *size)
1427{
1428	if (dev_is_pci(dev))
1429		return rc_dma_get_range(dev, size);
1430	else
1431		return nc_dma_get_range(dev, size);
1432}
1433
1434static void __init acpi_iort_register_irq(int hwirq, const char *name,
1435					  int trigger,
1436					  struct resource *res)
1437{
1438	int irq = acpi_register_gsi(NULL, hwirq, trigger,
1439				    ACPI_ACTIVE_HIGH);
1440
1441	if (irq <= 0) {
1442		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1443								      name);
1444		return;
1445	}
1446
1447	res->start = irq;
1448	res->end = irq;
1449	res->flags = IORESOURCE_IRQ;
1450	res->name = name;
1451}
1452
1453static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1454{
1455	struct acpi_iort_smmu_v3 *smmu;
1456	/* Always present mem resource */
1457	int num_res = 1;
1458
1459	/* Retrieve SMMUv3 specific data */
1460	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1461
1462	if (smmu->event_gsiv)
1463		num_res++;
1464
1465	if (smmu->pri_gsiv)
1466		num_res++;
1467
1468	if (smmu->gerr_gsiv)
1469		num_res++;
1470
1471	if (smmu->sync_gsiv)
1472		num_res++;
1473
1474	return num_res;
1475}
1476
1477static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1478{
1479	/*
1480	 * Cavium ThunderX2 implementation doesn't not support unique
1481	 * irq line. Use single irq line for all the SMMUv3 interrupts.
1482	 */
1483	if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1484		return false;
1485
1486	/*
1487	 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1488	 * SPI numbers here.
1489	 */
1490	return smmu->event_gsiv == smmu->pri_gsiv &&
1491	       smmu->event_gsiv == smmu->gerr_gsiv &&
1492	       smmu->event_gsiv == smmu->sync_gsiv;
1493}
1494
1495static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1496{
1497	/*
1498	 * Override the size, for Cavium ThunderX2 implementation
1499	 * which doesn't support the page 1 SMMU register space.
1500	 */
1501	if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1502		return SZ_64K;
1503
1504	return SZ_128K;
1505}
1506
1507static void __init arm_smmu_v3_init_resources(struct resource *res,
1508					      struct acpi_iort_node *node)
1509{
1510	struct acpi_iort_smmu_v3 *smmu;
1511	int num_res = 0;
1512
1513	/* Retrieve SMMUv3 specific data */
1514	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1515
1516	res[num_res].start = smmu->base_address;
1517	res[num_res].end = smmu->base_address +
1518				arm_smmu_v3_resource_size(smmu) - 1;
1519	res[num_res].flags = IORESOURCE_MEM;
1520
1521	num_res++;
1522	if (arm_smmu_v3_is_combined_irq(smmu)) {
1523		if (smmu->event_gsiv)
1524			acpi_iort_register_irq(smmu->event_gsiv, "combined",
1525					       ACPI_EDGE_SENSITIVE,
1526					       &res[num_res++]);
1527	} else {
1528
1529		if (smmu->event_gsiv)
1530			acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1531					       ACPI_EDGE_SENSITIVE,
1532					       &res[num_res++]);
1533
1534		if (smmu->pri_gsiv)
1535			acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1536					       ACPI_EDGE_SENSITIVE,
1537					       &res[num_res++]);
1538
1539		if (smmu->gerr_gsiv)
1540			acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1541					       ACPI_EDGE_SENSITIVE,
1542					       &res[num_res++]);
1543
1544		if (smmu->sync_gsiv)
1545			acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1546					       ACPI_EDGE_SENSITIVE,
1547					       &res[num_res++]);
1548	}
1549}
1550
1551static void __init arm_smmu_v3_dma_configure(struct device *dev,
1552					     struct acpi_iort_node *node)
1553{
1554	struct acpi_iort_smmu_v3 *smmu;
1555	enum dev_dma_attr attr;
1556
1557	/* Retrieve SMMUv3 specific data */
1558	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1559
1560	attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
1561			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1562
1563	/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1564	dev->dma_mask = &dev->coherent_dma_mask;
1565
1566	/* Configure DMA for the page table walker */
1567	acpi_dma_configure(dev, attr);
1568}
1569
1570#if defined(CONFIG_ACPI_NUMA)
1571/*
1572 * set numa proximity domain for smmuv3 device
1573 */
1574static int  __init arm_smmu_v3_set_proximity(struct device *dev,
1575					      struct acpi_iort_node *node)
1576{
1577	struct acpi_iort_smmu_v3 *smmu;
1578
1579	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1580	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1581		int dev_node = pxm_to_node(smmu->pxm);
1582
1583		if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
1584			return -EINVAL;
1585
1586		set_dev_node(dev, dev_node);
1587		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1588			smmu->base_address,
1589			smmu->pxm);
1590	}
1591	return 0;
1592}
1593#else
1594#define arm_smmu_v3_set_proximity NULL
1595#endif
1596
1597static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1598{
1599	struct acpi_iort_smmu *smmu;
1600
1601	/* Retrieve SMMU specific data */
1602	smmu = (struct acpi_iort_smmu *)node->node_data;
1603
1604	/*
1605	 * Only consider the global fault interrupt and ignore the
1606	 * configuration access interrupt.
1607	 *
1608	 * MMIO address and global fault interrupt resources are always
1609	 * present so add them to the context interrupt count as a static
1610	 * value.
1611	 */
1612	return smmu->context_interrupt_count + 2;
1613}
1614
1615static void __init arm_smmu_init_resources(struct resource *res,
1616					   struct acpi_iort_node *node)
1617{
1618	struct acpi_iort_smmu *smmu;
1619	int i, hw_irq, trigger, num_res = 0;
1620	u64 *ctx_irq, *glb_irq;
1621
1622	/* Retrieve SMMU specific data */
1623	smmu = (struct acpi_iort_smmu *)node->node_data;
1624
1625	res[num_res].start = smmu->base_address;
1626	res[num_res].end = smmu->base_address + smmu->span - 1;
1627	res[num_res].flags = IORESOURCE_MEM;
1628	num_res++;
1629
1630	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1631	/* Global IRQs */
1632	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1633	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1634
1635	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1636				     &res[num_res++]);
1637
1638	/* Context IRQs */
1639	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1640	for (i = 0; i < smmu->context_interrupt_count; i++) {
1641		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1642		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1643
1644		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1645				       &res[num_res++]);
1646	}
1647}
1648
1649static void __init arm_smmu_dma_configure(struct device *dev,
1650					  struct acpi_iort_node *node)
1651{
1652	struct acpi_iort_smmu *smmu;
1653	enum dev_dma_attr attr;
1654
1655	/* Retrieve SMMU specific data */
1656	smmu = (struct acpi_iort_smmu *)node->node_data;
1657
1658	attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
1659			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1660
1661	/* We expect the dma masks to be equivalent for SMMU set-ups */
1662	dev->dma_mask = &dev->coherent_dma_mask;
1663
1664	/* Configure DMA for the page table walker */
1665	acpi_dma_configure(dev, attr);
1666}
1667
1668static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
1669{
1670	struct acpi_iort_pmcg *pmcg;
1671
1672	/* Retrieve PMCG specific data */
1673	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1674
1675	/*
1676	 * There are always 2 memory resources.
1677	 * If the overflow_gsiv is present then add that for a total of 3.
1678	 */
1679	return pmcg->overflow_gsiv ? 3 : 2;
1680}
1681
1682static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
1683						   struct acpi_iort_node *node)
1684{
1685	struct acpi_iort_pmcg *pmcg;
1686
1687	/* Retrieve PMCG specific data */
1688	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1689
1690	res[0].start = pmcg->page0_base_address;
1691	res[0].end = pmcg->page0_base_address + SZ_4K - 1;
1692	res[0].flags = IORESOURCE_MEM;
1693	/*
1694	 * The initial version in DEN0049C lacked a way to describe register
1695	 * page 1, which makes it broken for most PMCG implementations; in
1696	 * that case, just let the driver fail gracefully if it expects to
1697	 * find a second memory resource.
1698	 */
1699	if (node->revision > 0) {
1700		res[1].start = pmcg->page1_base_address;
1701		res[1].end = pmcg->page1_base_address + SZ_4K - 1;
1702		res[1].flags = IORESOURCE_MEM;
1703	}
1704
1705	if (pmcg->overflow_gsiv)
1706		acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
1707				       ACPI_EDGE_SENSITIVE, &res[2]);
1708}
1709
1710static struct acpi_platform_list pmcg_plat_info[] __initdata = {
1711	/* HiSilicon Hip08 Platform */
1712	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1713	 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
 
 
 
 
 
 
 
 
 
 
 
 
1714	{ }
1715};
1716
1717static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
1718{
1719	u32 model;
1720	int idx;
1721
1722	idx = acpi_match_platform_list(pmcg_plat_info);
1723	if (idx >= 0)
1724		model = pmcg_plat_info[idx].data;
1725	else
1726		model = IORT_SMMU_V3_PMCG_GENERIC;
1727
1728	return platform_device_add_data(pdev, &model, sizeof(model));
1729}
1730
1731struct iort_dev_config {
1732	const char *name;
1733	int (*dev_init)(struct acpi_iort_node *node);
1734	void (*dev_dma_configure)(struct device *dev,
1735				  struct acpi_iort_node *node);
1736	int (*dev_count_resources)(struct acpi_iort_node *node);
1737	void (*dev_init_resources)(struct resource *res,
1738				     struct acpi_iort_node *node);
1739	int (*dev_set_proximity)(struct device *dev,
1740				    struct acpi_iort_node *node);
1741	int (*dev_add_platdata)(struct platform_device *pdev);
1742};
1743
1744static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1745	.name = "arm-smmu-v3",
1746	.dev_dma_configure = arm_smmu_v3_dma_configure,
1747	.dev_count_resources = arm_smmu_v3_count_resources,
1748	.dev_init_resources = arm_smmu_v3_init_resources,
1749	.dev_set_proximity = arm_smmu_v3_set_proximity,
1750};
1751
1752static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1753	.name = "arm-smmu",
1754	.dev_dma_configure = arm_smmu_dma_configure,
1755	.dev_count_resources = arm_smmu_count_resources,
1756	.dev_init_resources = arm_smmu_init_resources,
1757};
1758
1759static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
1760	.name = "arm-smmu-v3-pmcg",
1761	.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
1762	.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
1763	.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
1764};
1765
1766static __init const struct iort_dev_config *iort_get_dev_cfg(
1767			struct acpi_iort_node *node)
1768{
1769	switch (node->type) {
1770	case ACPI_IORT_NODE_SMMU_V3:
1771		return &iort_arm_smmu_v3_cfg;
1772	case ACPI_IORT_NODE_SMMU:
1773		return &iort_arm_smmu_cfg;
1774	case ACPI_IORT_NODE_PMCG:
1775		return &iort_arm_smmu_v3_pmcg_cfg;
1776	default:
1777		return NULL;
1778	}
1779}
1780
1781/**
1782 * iort_add_platform_device() - Allocate a platform device for IORT node
1783 * @node: Pointer to device ACPI IORT node
1784 * @ops: Pointer to IORT device config struct
1785 *
1786 * Returns: 0 on success, <0 failure
1787 */
1788static int __init iort_add_platform_device(struct acpi_iort_node *node,
1789					   const struct iort_dev_config *ops)
1790{
1791	struct fwnode_handle *fwnode;
1792	struct platform_device *pdev;
1793	struct resource *r;
1794	int ret, count;
1795
1796	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1797	if (!pdev)
1798		return -ENOMEM;
1799
1800	if (ops->dev_set_proximity) {
1801		ret = ops->dev_set_proximity(&pdev->dev, node);
1802		if (ret)
1803			goto dev_put;
1804	}
1805
1806	count = ops->dev_count_resources(node);
1807
1808	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1809	if (!r) {
1810		ret = -ENOMEM;
1811		goto dev_put;
1812	}
1813
1814	ops->dev_init_resources(r, node);
1815
1816	ret = platform_device_add_resources(pdev, r, count);
1817	/*
1818	 * Resources are duplicated in platform_device_add_resources,
1819	 * free their allocated memory
1820	 */
1821	kfree(r);
1822
1823	if (ret)
1824		goto dev_put;
1825
1826	/*
1827	 * Platform devices based on PMCG nodes uses platform_data to
1828	 * pass the hardware model info to the driver. For others, add
1829	 * a copy of IORT node pointer to platform_data to be used to
1830	 * retrieve IORT data information.
1831	 */
1832	if (ops->dev_add_platdata)
1833		ret = ops->dev_add_platdata(pdev);
1834	else
1835		ret = platform_device_add_data(pdev, &node, sizeof(node));
1836
1837	if (ret)
1838		goto dev_put;
1839
1840	fwnode = iort_get_fwnode(node);
1841
1842	if (!fwnode) {
1843		ret = -ENODEV;
1844		goto dev_put;
1845	}
1846
1847	pdev->dev.fwnode = fwnode;
1848
1849	if (ops->dev_dma_configure)
1850		ops->dev_dma_configure(&pdev->dev, node);
1851
1852	iort_set_device_domain(&pdev->dev, node);
1853
1854	ret = platform_device_add(pdev);
1855	if (ret)
1856		goto dma_deconfigure;
1857
1858	return 0;
1859
1860dma_deconfigure:
1861	arch_teardown_dma_ops(&pdev->dev);
1862dev_put:
1863	platform_device_put(pdev);
1864
1865	return ret;
1866}
1867
1868#ifdef CONFIG_PCI
1869static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
1870{
1871	static bool acs_enabled __initdata;
1872
1873	if (acs_enabled)
1874		return;
1875
1876	if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1877		struct acpi_iort_node *parent;
1878		struct acpi_iort_id_mapping *map;
1879		int i;
1880
1881		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1882				   iort_node->mapping_offset);
1883
1884		for (i = 0; i < iort_node->mapping_count; i++, map++) {
1885			if (!map->output_reference)
1886				continue;
1887
1888			parent = ACPI_ADD_PTR(struct acpi_iort_node,
1889					iort_table,  map->output_reference);
1890			/*
1891			 * If we detect a RC->SMMU mapping, make sure
1892			 * we enable ACS on the system.
1893			 */
1894			if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1895				(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1896				pci_request_acs();
1897				acs_enabled = true;
1898				return;
1899			}
1900		}
1901	}
1902}
1903#else
1904static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
1905#endif
1906
1907static void __init iort_init_platform_devices(void)
1908{
1909	struct acpi_iort_node *iort_node, *iort_end;
1910	struct acpi_table_iort *iort;
1911	struct fwnode_handle *fwnode;
1912	int i, ret;
1913	const struct iort_dev_config *ops;
1914
1915	/*
1916	 * iort_table and iort both point to the start of IORT table, but
1917	 * have different struct types
1918	 */
1919	iort = (struct acpi_table_iort *)iort_table;
1920
1921	/* Get the first IORT node */
1922	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1923				 iort->node_offset);
1924	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1925				iort_table->length);
1926
1927	for (i = 0; i < iort->node_count; i++) {
1928		if (iort_node >= iort_end) {
1929			pr_err("iort node pointer overflows, bad table\n");
1930			return;
1931		}
1932
1933		iort_enable_acs(iort_node);
1934
1935		ops = iort_get_dev_cfg(iort_node);
1936		if (ops) {
1937			fwnode = acpi_alloc_fwnode_static();
1938			if (!fwnode)
1939				return;
1940
1941			iort_set_fwnode(iort_node, fwnode);
1942
1943			ret = iort_add_platform_device(iort_node, ops);
1944			if (ret) {
1945				iort_delete_fwnode(iort_node);
1946				acpi_free_fwnode_static(fwnode);
1947				return;
1948			}
1949		}
1950
1951		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1952					 iort_node->length);
1953	}
1954}
1955
1956void __init acpi_iort_init(void)
1957{
1958	acpi_status status;
1959
1960	/* iort_table will be used at runtime after the iort init,
1961	 * so we don't need to call acpi_put_table() to release
1962	 * the IORT table mapping.
1963	 */
1964	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1965	if (ACPI_FAILURE(status)) {
1966		if (status != AE_NOT_FOUND) {
1967			const char *msg = acpi_format_exception(status);
1968
1969			pr_err("Failed to get table, %s\n", msg);
1970		}
1971
1972		return;
1973	}
1974
1975	iort_init_platform_devices();
1976}
1977
1978#ifdef CONFIG_ZONE_DMA
1979/*
1980 * Extract the highest CPU physical address accessible to all DMA masters in
1981 * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
1982 */
1983phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
1984{
1985	phys_addr_t limit = PHYS_ADDR_MAX;
1986	struct acpi_iort_node *node, *end;
1987	struct acpi_table_iort *iort;
1988	acpi_status status;
1989	int i;
1990
1991	if (acpi_disabled)
1992		return limit;
1993
1994	status = acpi_get_table(ACPI_SIG_IORT, 0,
1995				(struct acpi_table_header **)&iort);
1996	if (ACPI_FAILURE(status))
1997		return limit;
1998
1999	node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
2000	end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
2001
2002	for (i = 0; i < iort->node_count; i++) {
2003		if (node >= end)
2004			break;
2005
2006		switch (node->type) {
2007			struct acpi_iort_named_component *ncomp;
2008			struct acpi_iort_root_complex *rc;
2009			phys_addr_t local_limit;
2010
2011		case ACPI_IORT_NODE_NAMED_COMPONENT:
2012			ncomp = (struct acpi_iort_named_component *)node->node_data;
2013			local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
2014			limit = min_not_zero(limit, local_limit);
2015			break;
2016
2017		case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
2018			if (node->revision < 1)
2019				break;
2020
2021			rc = (struct acpi_iort_root_complex *)node->node_data;
2022			local_limit = DMA_BIT_MASK(rc->memory_address_limit);
2023			limit = min_not_zero(limit, local_limit);
2024			break;
2025		}
2026		node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
2027	}
2028	acpi_put_table(&iort->header);
2029	return limit;
2030}
2031#endif