Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/ratelimit.h>
  21#include <linux/pci.h>
 
 
 
  22#include <linux/pci-ats.h>
  23#include <linux/bitmap.h>
  24#include <linux/slab.h>
  25#include <linux/debugfs.h>
  26#include <linux/scatterlist.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/iommu-helper.h>
  29#include <linux/iommu.h>
  30#include <linux/delay.h>
  31#include <linux/amd-iommu.h>
  32#include <linux/notifier.h>
  33#include <linux/export.h>
  34#include <linux/irq.h>
  35#include <linux/msi.h>
  36#include <linux/dma-contiguous.h>
  37#include <linux/irqdomain.h>
  38#include <linux/percpu.h>
 
  39#include <asm/irq_remapping.h>
  40#include <asm/io_apic.h>
  41#include <asm/apic.h>
  42#include <asm/hw_irq.h>
  43#include <asm/msidef.h>
  44#include <asm/proto.h>
  45#include <asm/iommu.h>
  46#include <asm/gart.h>
  47#include <asm/dma.h>
  48
  49#include "amd_iommu_proto.h"
  50#include "amd_iommu_types.h"
  51#include "irq_remapping.h"
  52
  53#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  54
  55#define LOOP_TIMEOUT	100000
  56
 
 
 
 
 
 
 
 
 
 
 
  57/*
  58 * This bitmap is used to advertise the page sizes our hardware support
  59 * to the IOMMU core, which will then use this information to split
  60 * physically contiguous memory regions it is mapping into page sizes
  61 * that we support.
  62 *
  63 * 512GB Pages are not supported due to a hardware bug
  64 */
  65#define AMD_IOMMU_PGSIZES	((~0xFFFUL) & ~(2ULL << 38))
  66
  67static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  68
  69/* List of all available dev_data structures */
  70static LIST_HEAD(dev_data_list);
  71static DEFINE_SPINLOCK(dev_data_list_lock);
  72
  73LIST_HEAD(ioapic_map);
  74LIST_HEAD(hpet_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  75
  76/*
  77 * Domain for untranslated devices - only allocated
  78 * if iommu=pt passed on kernel cmd line.
  79 */
  80static const struct iommu_ops amd_iommu_ops;
  81
  82static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
  83int amd_iommu_max_glx_val = -1;
  84
  85static struct dma_map_ops amd_iommu_dma_ops;
  86
  87/*
  88 * This struct contains device specific data for the IOMMU
  89 */
  90struct iommu_dev_data {
  91	struct list_head list;		  /* For domain->dev_list */
  92	struct list_head dev_data_list;	  /* For global dev_data_list */
  93	struct protection_domain *domain; /* Domain the device is bound to */
  94	u16 devid;			  /* PCI Device ID */
  95	u16 alias;			  /* Alias Device ID */
  96	bool iommu_v2;			  /* Device can make use of IOMMUv2 */
  97	bool passthrough;		  /* Device is identity mapped */
  98	struct {
  99		bool enabled;
 100		int qdep;
 101	} ats;				  /* ATS state */
 102	bool pri_tlp;			  /* PASID TLB required for
 103					     PPR completions */
 104	u32 errata;			  /* Bitmap for errata to apply */
 
 105};
 106
 107/*
 108 * general struct to manage commands send to an IOMMU
 109 */
 110struct iommu_cmd {
 111	u32 data[4];
 112};
 113
 114struct kmem_cache *amd_iommu_irq_cache;
 115
 116static void update_domain(struct protection_domain *domain);
 117static int protection_domain_init(struct protection_domain *domain);
 118static void detach_device(struct device *dev);
 119
 120/*
 121 * For dynamic growth the aperture size is split into ranges of 128MB of
 122 * DMA address space each. This struct represents one such range.
 123 */
 124struct aperture_range {
 125
 126	spinlock_t bitmap_lock;
 127
 128	/* address allocation bitmap */
 129	unsigned long *bitmap;
 130	unsigned long offset;
 131	unsigned long next_bit;
 132
 133	/*
 134	 * Array of PTE pages for the aperture. In this array we save all the
 135	 * leaf pages of the domain page table used for the aperture. This way
 136	 * we don't need to walk the page table to find a specific PTE. We can
 137	 * just calculate its address in constant time.
 138	 */
 139	u64 *pte_pages[64];
 140};
 141
 142/*
 143 * Data container for a dma_ops specific protection domain
 144 */
 145struct dma_ops_domain {
 146	/* generic protection domain information */
 147	struct protection_domain domain;
 148
 149	/* size of the aperture for the mappings */
 150	unsigned long aperture_size;
 151
 152	/* aperture index we start searching for free addresses */
 153	u32 __percpu *next_index;
 154
 155	/* address space relevant data */
 156	struct aperture_range *aperture[APERTURE_MAX_RANGES];
 157};
 158
 
 
 
 159/****************************************************************************
 160 *
 161 * Helper functions
 162 *
 163 ****************************************************************************/
 164
 165static struct protection_domain *to_pdomain(struct iommu_domain *dom)
 
 166{
 167	return container_of(dom, struct protection_domain, domain);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 168}
 169
 170static inline u16 get_device_id(struct device *dev)
 171{
 172	struct pci_dev *pdev = to_pci_dev(dev);
 173
 174	return PCI_DEVID(pdev->bus->number, pdev->devfn);
 175}
 176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 177static struct iommu_dev_data *alloc_dev_data(u16 devid)
 178{
 179	struct iommu_dev_data *dev_data;
 180	unsigned long flags;
 181
 182	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
 183	if (!dev_data)
 184		return NULL;
 185
 186	dev_data->devid = devid;
 187
 188	spin_lock_irqsave(&dev_data_list_lock, flags);
 189	list_add_tail(&dev_data->dev_data_list, &dev_data_list);
 190	spin_unlock_irqrestore(&dev_data_list_lock, flags);
 191
 192	return dev_data;
 193}
 194
 195static struct iommu_dev_data *search_dev_data(u16 devid)
 196{
 197	struct iommu_dev_data *dev_data;
 198	unsigned long flags;
 199
 200	spin_lock_irqsave(&dev_data_list_lock, flags);
 201	list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
 202		if (dev_data->devid == devid)
 203			goto out_unlock;
 204	}
 205
 206	dev_data = NULL;
 207
 208out_unlock:
 209	spin_unlock_irqrestore(&dev_data_list_lock, flags);
 210
 211	return dev_data;
 212}
 213
 214static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
 215{
 216	*(u16 *)data = alias;
 217	return 0;
 218}
 219
 220static u16 get_alias(struct device *dev)
 221{
 222	struct pci_dev *pdev = to_pci_dev(dev);
 223	u16 devid, ivrs_alias, pci_alias;
 224
 
 225	devid = get_device_id(dev);
 226	ivrs_alias = amd_iommu_alias_table[devid];
 227	pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
 228
 229	if (ivrs_alias == pci_alias)
 230		return ivrs_alias;
 231
 232	/*
 233	 * DMA alias showdown
 234	 *
 235	 * The IVRS is fairly reliable in telling us about aliases, but it
 236	 * can't know about every screwy device.  If we don't have an IVRS
 237	 * reported alias, use the PCI reported alias.  In that case we may
 238	 * still need to initialize the rlookup and dev_table entries if the
 239	 * alias is to a non-existent device.
 240	 */
 241	if (ivrs_alias == devid) {
 242		if (!amd_iommu_rlookup_table[pci_alias]) {
 243			amd_iommu_rlookup_table[pci_alias] =
 244				amd_iommu_rlookup_table[devid];
 245			memcpy(amd_iommu_dev_table[pci_alias].data,
 246			       amd_iommu_dev_table[devid].data,
 247			       sizeof(amd_iommu_dev_table[pci_alias].data));
 248		}
 249
 250		return pci_alias;
 251	}
 252
 253	pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
 254		"for device %s[%04x:%04x], kernel reported alias "
 255		"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
 256		PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
 257		PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
 258		PCI_FUNC(pci_alias));
 259
 260	/*
 261	 * If we don't have a PCI DMA alias and the IVRS alias is on the same
 262	 * bus, then the IVRS table may know about a quirk that we don't.
 263	 */
 264	if (pci_alias == devid &&
 265	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
 266		pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
 267		pdev->dma_alias_devfn = ivrs_alias & 0xff;
 268		pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
 269			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
 270			dev_name(dev));
 271	}
 272
 273	return ivrs_alias;
 274}
 275
 276static struct iommu_dev_data *find_dev_data(u16 devid)
 277{
 278	struct iommu_dev_data *dev_data;
 279
 280	dev_data = search_dev_data(devid);
 281
 282	if (dev_data == NULL)
 283		dev_data = alloc_dev_data(devid);
 284
 285	return dev_data;
 286}
 287
 288static struct iommu_dev_data *get_dev_data(struct device *dev)
 289{
 290	return dev->archdata.iommu;
 291}
 292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 293static bool pci_iommuv2_capable(struct pci_dev *pdev)
 294{
 295	static const int caps[] = {
 296		PCI_EXT_CAP_ID_ATS,
 297		PCI_EXT_CAP_ID_PRI,
 298		PCI_EXT_CAP_ID_PASID,
 299	};
 300	int i, pos;
 301
 302	for (i = 0; i < 3; ++i) {
 303		pos = pci_find_ext_capability(pdev, caps[i]);
 304		if (pos == 0)
 305			return false;
 306	}
 307
 308	return true;
 309}
 310
 311static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
 312{
 313	struct iommu_dev_data *dev_data;
 314
 315	dev_data = get_dev_data(&pdev->dev);
 316
 317	return dev_data->errata & (1 << erratum) ? true : false;
 318}
 319
 320/*
 321 * This function actually applies the mapping to the page table of the
 322 * dma_ops domain.
 323 */
 324static void alloc_unity_mapping(struct dma_ops_domain *dma_dom,
 325				struct unity_map_entry *e)
 326{
 327	u64 addr;
 328
 329	for (addr = e->address_start; addr < e->address_end;
 330	     addr += PAGE_SIZE) {
 331		if (addr < dma_dom->aperture_size)
 332			__set_bit(addr >> PAGE_SHIFT,
 333				  dma_dom->aperture[0]->bitmap);
 334	}
 335}
 336
 337/*
 338 * Inits the unity mappings required for a specific device
 339 */
 340static void init_unity_mappings_for_device(struct device *dev,
 341					   struct dma_ops_domain *dma_dom)
 342{
 343	struct unity_map_entry *e;
 344	u16 devid;
 345
 346	devid = get_device_id(dev);
 347
 348	list_for_each_entry(e, &amd_iommu_unity_map, list) {
 349		if (!(devid >= e->devid_start && devid <= e->devid_end))
 350			continue;
 351		alloc_unity_mapping(dma_dom, e);
 352	}
 353}
 354
 355/*
 356 * This function checks if the driver got a valid device from the caller to
 357 * avoid dereferencing invalid pointers.
 358 */
 359static bool check_device(struct device *dev)
 360{
 361	u16 devid;
 362
 363	if (!dev || !dev->dma_mask)
 364		return false;
 365
 366	/* No PCI device */
 367	if (!dev_is_pci(dev))
 368		return false;
 369
 370	devid = get_device_id(dev);
 
 
 371
 372	/* Out of our scope? */
 373	if (devid > amd_iommu_last_bdf)
 374		return false;
 375
 376	if (amd_iommu_rlookup_table[devid] == NULL)
 377		return false;
 378
 379	return true;
 380}
 381
 382static void init_iommu_group(struct device *dev)
 383{
 384	struct dma_ops_domain *dma_domain;
 385	struct iommu_domain *domain;
 386	struct iommu_group *group;
 387
 388	group = iommu_group_get_for_dev(dev);
 389	if (IS_ERR(group))
 390		return;
 391
 392	domain = iommu_group_default_domain(group);
 393	if (!domain)
 394		goto out;
 395
 396	dma_domain = to_pdomain(domain)->priv;
 397
 398	init_unity_mappings_for_device(dev, dma_domain);
 399out:
 400	iommu_group_put(group);
 401}
 402
 403static int iommu_init_device(struct device *dev)
 404{
 405	struct pci_dev *pdev = to_pci_dev(dev);
 406	struct iommu_dev_data *dev_data;
 
 407
 408	if (dev->archdata.iommu)
 409		return 0;
 410
 411	dev_data = find_dev_data(get_device_id(dev));
 
 
 
 
 412	if (!dev_data)
 413		return -ENOMEM;
 414
 415	dev_data->alias = get_alias(dev);
 416
 417	if (pci_iommuv2_capable(pdev)) {
 418		struct amd_iommu *iommu;
 419
 420		iommu              = amd_iommu_rlookup_table[dev_data->devid];
 421		dev_data->iommu_v2 = iommu->is_iommu_v2;
 422	}
 423
 424	dev->archdata.iommu = dev_data;
 425
 426	iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
 427			  dev);
 428
 429	return 0;
 430}
 431
 432static void iommu_ignore_device(struct device *dev)
 433{
 434	u16 devid, alias;
 
 435
 436	devid = get_device_id(dev);
 
 
 
 437	alias = get_alias(dev);
 438
 439	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
 440	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
 441
 442	amd_iommu_rlookup_table[devid] = NULL;
 443	amd_iommu_rlookup_table[alias] = NULL;
 444}
 445
 446static void iommu_uninit_device(struct device *dev)
 447{
 448	struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev));
 
 
 
 
 
 449
 
 450	if (!dev_data)
 451		return;
 452
 453	if (dev_data->domain)
 454		detach_device(dev);
 455
 456	iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
 457			    dev);
 458
 459	iommu_group_remove_device(dev);
 460
 461	/* Remove dma-ops */
 462	dev->archdata.dma_ops = NULL;
 463
 464	/*
 465	 * We keep dev_data around for unplugged devices and reuse it when the
 466	 * device is re-plugged - not doing so would introduce a ton of races.
 467	 */
 468}
 469
 470#ifdef CONFIG_AMD_IOMMU_STATS
 471
 472/*
 473 * Initialization code for statistics collection
 474 */
 475
 476DECLARE_STATS_COUNTER(compl_wait);
 477DECLARE_STATS_COUNTER(cnt_map_single);
 478DECLARE_STATS_COUNTER(cnt_unmap_single);
 479DECLARE_STATS_COUNTER(cnt_map_sg);
 480DECLARE_STATS_COUNTER(cnt_unmap_sg);
 481DECLARE_STATS_COUNTER(cnt_alloc_coherent);
 482DECLARE_STATS_COUNTER(cnt_free_coherent);
 483DECLARE_STATS_COUNTER(cross_page);
 484DECLARE_STATS_COUNTER(domain_flush_single);
 485DECLARE_STATS_COUNTER(domain_flush_all);
 486DECLARE_STATS_COUNTER(alloced_io_mem);
 487DECLARE_STATS_COUNTER(total_map_requests);
 488DECLARE_STATS_COUNTER(complete_ppr);
 489DECLARE_STATS_COUNTER(invalidate_iotlb);
 490DECLARE_STATS_COUNTER(invalidate_iotlb_all);
 491DECLARE_STATS_COUNTER(pri_requests);
 492
 493static struct dentry *stats_dir;
 494static struct dentry *de_fflush;
 495
 496static void amd_iommu_stats_add(struct __iommu_counter *cnt)
 497{
 498	if (stats_dir == NULL)
 499		return;
 500
 501	cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
 502				       &cnt->value);
 503}
 504
 505static void amd_iommu_stats_init(void)
 506{
 507	stats_dir = debugfs_create_dir("amd-iommu", NULL);
 508	if (stats_dir == NULL)
 509		return;
 510
 511	de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
 512					 &amd_iommu_unmap_flush);
 513
 514	amd_iommu_stats_add(&compl_wait);
 515	amd_iommu_stats_add(&cnt_map_single);
 516	amd_iommu_stats_add(&cnt_unmap_single);
 517	amd_iommu_stats_add(&cnt_map_sg);
 518	amd_iommu_stats_add(&cnt_unmap_sg);
 519	amd_iommu_stats_add(&cnt_alloc_coherent);
 520	amd_iommu_stats_add(&cnt_free_coherent);
 521	amd_iommu_stats_add(&cross_page);
 522	amd_iommu_stats_add(&domain_flush_single);
 523	amd_iommu_stats_add(&domain_flush_all);
 524	amd_iommu_stats_add(&alloced_io_mem);
 525	amd_iommu_stats_add(&total_map_requests);
 526	amd_iommu_stats_add(&complete_ppr);
 527	amd_iommu_stats_add(&invalidate_iotlb);
 528	amd_iommu_stats_add(&invalidate_iotlb_all);
 529	amd_iommu_stats_add(&pri_requests);
 530}
 531
 532#endif
 533
 534/****************************************************************************
 535 *
 536 * Interrupt handling functions
 537 *
 538 ****************************************************************************/
 539
 540static void dump_dte_entry(u16 devid)
 541{
 542	int i;
 543
 544	for (i = 0; i < 4; ++i)
 545		pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
 546			amd_iommu_dev_table[devid].data[i]);
 547}
 548
 549static void dump_command(unsigned long phys_addr)
 550{
 551	struct iommu_cmd *cmd = phys_to_virt(phys_addr);
 552	int i;
 553
 554	for (i = 0; i < 4; ++i)
 555		pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
 556}
 557
 558static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 559{
 560	int type, devid, domid, flags;
 561	volatile u32 *event = __evt;
 562	int count = 0;
 563	u64 address;
 564
 565retry:
 566	type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
 567	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
 568	domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
 569	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
 570	address = (u64)(((u64)event[3]) << 32) | event[2];
 571
 572	if (type == 0) {
 573		/* Did we hit the erratum? */
 574		if (++count == LOOP_TIMEOUT) {
 575			pr_err("AMD-Vi: No event written to event log\n");
 576			return;
 577		}
 578		udelay(1);
 579		goto retry;
 580	}
 581
 582	printk(KERN_ERR "AMD-Vi: Event logged [");
 583
 584	switch (type) {
 585	case EVENT_TYPE_ILL_DEV:
 586		printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
 587		       "address=0x%016llx flags=0x%04x]\n",
 588		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 589		       address, flags);
 590		dump_dte_entry(devid);
 591		break;
 592	case EVENT_TYPE_IO_FAULT:
 593		printk("IO_PAGE_FAULT device=%02x:%02x.%x "
 594		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 595		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 596		       domid, address, flags);
 597		break;
 598	case EVENT_TYPE_DEV_TAB_ERR:
 599		printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 600		       "address=0x%016llx flags=0x%04x]\n",
 601		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 602		       address, flags);
 603		break;
 604	case EVENT_TYPE_PAGE_TAB_ERR:
 605		printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 606		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 607		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 608		       domid, address, flags);
 609		break;
 610	case EVENT_TYPE_ILL_CMD:
 611		printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
 612		dump_command(address);
 613		break;
 614	case EVENT_TYPE_CMD_HARD_ERR:
 615		printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
 616		       "flags=0x%04x]\n", address, flags);
 617		break;
 618	case EVENT_TYPE_IOTLB_INV_TO:
 619		printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
 620		       "address=0x%016llx]\n",
 621		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 622		       address);
 623		break;
 624	case EVENT_TYPE_INV_DEV_REQ:
 625		printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
 626		       "address=0x%016llx flags=0x%04x]\n",
 627		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 628		       address, flags);
 629		break;
 630	default:
 631		printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
 632	}
 633
 634	memset(__evt, 0, 4 * sizeof(u32));
 635}
 636
 637static void iommu_poll_events(struct amd_iommu *iommu)
 638{
 639	u32 head, tail;
 640
 641	head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 642	tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 643
 644	while (head != tail) {
 645		iommu_print_event(iommu, iommu->evt_buf + head);
 646		head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
 647	}
 648
 649	writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 650}
 651
 652static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
 653{
 654	struct amd_iommu_fault fault;
 655
 656	INC_STATS_COUNTER(pri_requests);
 657
 658	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
 659		pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
 660		return;
 661	}
 662
 663	fault.address   = raw[1];
 664	fault.pasid     = PPR_PASID(raw[0]);
 665	fault.device_id = PPR_DEVID(raw[0]);
 666	fault.tag       = PPR_TAG(raw[0]);
 667	fault.flags     = PPR_FLAGS(raw[0]);
 668
 669	atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
 670}
 671
 672static void iommu_poll_ppr_log(struct amd_iommu *iommu)
 673{
 674	u32 head, tail;
 675
 676	if (iommu->ppr_log == NULL)
 677		return;
 678
 679	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 680	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 681
 682	while (head != tail) {
 683		volatile u64 *raw;
 684		u64 entry[2];
 685		int i;
 686
 687		raw = (u64 *)(iommu->ppr_log + head);
 688
 689		/*
 690		 * Hardware bug: Interrupt may arrive before the entry is
 691		 * written to memory. If this happens we need to wait for the
 692		 * entry to arrive.
 693		 */
 694		for (i = 0; i < LOOP_TIMEOUT; ++i) {
 695			if (PPR_REQ_TYPE(raw[0]) != 0)
 696				break;
 697			udelay(1);
 698		}
 699
 700		/* Avoid memcpy function-call overhead */
 701		entry[0] = raw[0];
 702		entry[1] = raw[1];
 703
 704		/*
 705		 * To detect the hardware bug we need to clear the entry
 706		 * back to zero.
 707		 */
 708		raw[0] = raw[1] = 0UL;
 709
 710		/* Update head pointer of hardware ring-buffer */
 711		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
 712		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 713
 714		/* Handle PPR entry */
 715		iommu_handle_ppr_entry(iommu, entry);
 716
 717		/* Refresh ring-buffer information */
 718		head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 719		tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 720	}
 721}
 722
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 723irqreturn_t amd_iommu_int_thread(int irq, void *data)
 724{
 725	struct amd_iommu *iommu = (struct amd_iommu *) data;
 726	u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 727
 728	while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
 729		/* Enable EVT and PPR interrupts again */
 730		writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
 731			iommu->mmio_base + MMIO_STATUS_OFFSET);
 732
 733		if (status & MMIO_STATUS_EVT_INT_MASK) {
 734			pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
 735			iommu_poll_events(iommu);
 736		}
 737
 738		if (status & MMIO_STATUS_PPR_INT_MASK) {
 739			pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
 740			iommu_poll_ppr_log(iommu);
 741		}
 742
 
 
 
 
 
 
 
 743		/*
 744		 * Hardware bug: ERBT1312
 745		 * When re-enabling interrupt (by writing 1
 746		 * to clear the bit), the hardware might also try to set
 747		 * the interrupt bit in the event status register.
 748		 * In this scenario, the bit will be set, and disable
 749		 * subsequent interrupts.
 750		 *
 751		 * Workaround: The IOMMU driver should read back the
 752		 * status register and check if the interrupt bits are cleared.
 753		 * If not, driver will need to go through the interrupt handler
 754		 * again and re-clear the bits
 755		 */
 756		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 757	}
 758	return IRQ_HANDLED;
 759}
 760
 761irqreturn_t amd_iommu_int_handler(int irq, void *data)
 762{
 763	return IRQ_WAKE_THREAD;
 764}
 765
 766/****************************************************************************
 767 *
 768 * IOMMU command queuing functions
 769 *
 770 ****************************************************************************/
 771
 772static int wait_on_sem(volatile u64 *sem)
 773{
 774	int i = 0;
 775
 776	while (*sem == 0 && i < LOOP_TIMEOUT) {
 777		udelay(1);
 778		i += 1;
 779	}
 780
 781	if (i == LOOP_TIMEOUT) {
 782		pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
 783		return -EIO;
 784	}
 785
 786	return 0;
 787}
 788
 789static void copy_cmd_to_buffer(struct amd_iommu *iommu,
 790			       struct iommu_cmd *cmd,
 791			       u32 tail)
 792{
 793	u8 *target;
 794
 795	target = iommu->cmd_buf + tail;
 796	tail   = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
 797
 798	/* Copy command to buffer */
 799	memcpy(target, cmd, sizeof(*cmd));
 800
 801	/* Tell the IOMMU about it */
 802	writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 803}
 804
 805static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 806{
 807	WARN_ON(address & 0x7ULL);
 808
 809	memset(cmd, 0, sizeof(*cmd));
 810	cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
 811	cmd->data[1] = upper_32_bits(__pa(address));
 812	cmd->data[2] = 1;
 813	CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 814}
 815
 816static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
 817{
 818	memset(cmd, 0, sizeof(*cmd));
 819	cmd->data[0] = devid;
 820	CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
 821}
 822
 823static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
 824				  size_t size, u16 domid, int pde)
 825{
 826	u64 pages;
 827	bool s;
 828
 829	pages = iommu_num_pages(address, size, PAGE_SIZE);
 830	s     = false;
 831
 832	if (pages > 1) {
 833		/*
 834		 * If we have to flush more than one page, flush all
 835		 * TLB entries for this domain
 836		 */
 837		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 838		s = true;
 839	}
 840
 841	address &= PAGE_MASK;
 842
 843	memset(cmd, 0, sizeof(*cmd));
 844	cmd->data[1] |= domid;
 845	cmd->data[2]  = lower_32_bits(address);
 846	cmd->data[3]  = upper_32_bits(address);
 847	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 848	if (s) /* size bit - we flush more than one 4kb page */
 849		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 850	if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
 851		cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 852}
 853
 854static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
 855				  u64 address, size_t size)
 856{
 857	u64 pages;
 858	bool s;
 859
 860	pages = iommu_num_pages(address, size, PAGE_SIZE);
 861	s     = false;
 862
 863	if (pages > 1) {
 864		/*
 865		 * If we have to flush more than one page, flush all
 866		 * TLB entries for this domain
 867		 */
 868		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 869		s = true;
 870	}
 871
 872	address &= PAGE_MASK;
 873
 874	memset(cmd, 0, sizeof(*cmd));
 875	cmd->data[0]  = devid;
 876	cmd->data[0] |= (qdep & 0xff) << 24;
 877	cmd->data[1]  = devid;
 878	cmd->data[2]  = lower_32_bits(address);
 879	cmd->data[3]  = upper_32_bits(address);
 880	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 881	if (s)
 882		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 883}
 884
 885static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
 886				  u64 address, bool size)
 887{
 888	memset(cmd, 0, sizeof(*cmd));
 889
 890	address &= ~(0xfffULL);
 891
 892	cmd->data[0]  = pasid;
 893	cmd->data[1]  = domid;
 894	cmd->data[2]  = lower_32_bits(address);
 895	cmd->data[3]  = upper_32_bits(address);
 896	cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 897	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 898	if (size)
 899		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 900	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 901}
 902
 903static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
 904				  int qdep, u64 address, bool size)
 905{
 906	memset(cmd, 0, sizeof(*cmd));
 907
 908	address &= ~(0xfffULL);
 909
 910	cmd->data[0]  = devid;
 911	cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
 912	cmd->data[0] |= (qdep  & 0xff) << 24;
 913	cmd->data[1]  = devid;
 914	cmd->data[1] |= (pasid & 0xff) << 16;
 915	cmd->data[2]  = lower_32_bits(address);
 916	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 917	cmd->data[3]  = upper_32_bits(address);
 918	if (size)
 919		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 920	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 921}
 922
 923static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
 924			       int status, int tag, bool gn)
 925{
 926	memset(cmd, 0, sizeof(*cmd));
 927
 928	cmd->data[0]  = devid;
 929	if (gn) {
 930		cmd->data[1]  = pasid;
 931		cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
 932	}
 933	cmd->data[3]  = tag & 0x1ff;
 934	cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
 935
 936	CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
 937}
 938
 939static void build_inv_all(struct iommu_cmd *cmd)
 940{
 941	memset(cmd, 0, sizeof(*cmd));
 942	CMD_SET_TYPE(cmd, CMD_INV_ALL);
 943}
 944
 945static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
 946{
 947	memset(cmd, 0, sizeof(*cmd));
 948	cmd->data[0] = devid;
 949	CMD_SET_TYPE(cmd, CMD_INV_IRT);
 950}
 951
 952/*
 953 * Writes the command to the IOMMUs command buffer and informs the
 954 * hardware about the new command.
 955 */
 956static int iommu_queue_command_sync(struct amd_iommu *iommu,
 957				    struct iommu_cmd *cmd,
 958				    bool sync)
 959{
 960	u32 left, tail, head, next_tail;
 961	unsigned long flags;
 962
 963again:
 964	spin_lock_irqsave(&iommu->lock, flags);
 965
 966	head      = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
 967	tail      = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 968	next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
 969	left      = (head - next_tail) % CMD_BUFFER_SIZE;
 970
 971	if (left <= 2) {
 972		struct iommu_cmd sync_cmd;
 973		volatile u64 sem = 0;
 974		int ret;
 975
 976		build_completion_wait(&sync_cmd, (u64)&sem);
 977		copy_cmd_to_buffer(iommu, &sync_cmd, tail);
 978
 979		spin_unlock_irqrestore(&iommu->lock, flags);
 
 980
 981		if ((ret = wait_on_sem(&sem)) != 0)
 982			return ret;
 983
 984		goto again;
 985	}
 986
 987	copy_cmd_to_buffer(iommu, cmd, tail);
 988
 989	/* We need to sync now to make sure all commands are processed */
 990	iommu->need_sync = sync;
 991
 
 
 
 
 
 
 
 
 
 
 
 
 992	spin_unlock_irqrestore(&iommu->lock, flags);
 993
 994	return 0;
 995}
 996
 997static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
 998{
 999	return iommu_queue_command_sync(iommu, cmd, true);
1000}
1001
1002/*
1003 * This function queues a completion wait command into the command
1004 * buffer of an IOMMU
1005 */
1006static int iommu_completion_wait(struct amd_iommu *iommu)
1007{
1008	struct iommu_cmd cmd;
1009	volatile u64 sem = 0;
1010	int ret;
1011
1012	if (!iommu->need_sync)
1013		return 0;
1014
1015	build_completion_wait(&cmd, (u64)&sem);
1016
1017	ret = iommu_queue_command_sync(iommu, &cmd, false);
 
 
 
 
 
 
1018	if (ret)
1019		return ret;
1020
1021	return wait_on_sem(&sem);
 
 
 
 
 
1022}
1023
1024static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1025{
1026	struct iommu_cmd cmd;
1027
1028	build_inv_dte(&cmd, devid);
1029
1030	return iommu_queue_command(iommu, &cmd);
1031}
1032
1033static void iommu_flush_dte_all(struct amd_iommu *iommu)
1034{
1035	u32 devid;
1036
1037	for (devid = 0; devid <= 0xffff; ++devid)
1038		iommu_flush_dte(iommu, devid);
1039
1040	iommu_completion_wait(iommu);
1041}
1042
1043/*
1044 * This function uses heavy locking and may disable irqs for some time. But
1045 * this is no issue because it is only called during resume.
1046 */
1047static void iommu_flush_tlb_all(struct amd_iommu *iommu)
1048{
1049	u32 dom_id;
1050
1051	for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1052		struct iommu_cmd cmd;
1053		build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1054				      dom_id, 1);
1055		iommu_queue_command(iommu, &cmd);
1056	}
1057
1058	iommu_completion_wait(iommu);
1059}
1060
1061static void iommu_flush_all(struct amd_iommu *iommu)
1062{
1063	struct iommu_cmd cmd;
1064
1065	build_inv_all(&cmd);
1066
1067	iommu_queue_command(iommu, &cmd);
1068	iommu_completion_wait(iommu);
1069}
1070
1071static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1072{
1073	struct iommu_cmd cmd;
1074
1075	build_inv_irt(&cmd, devid);
1076
1077	iommu_queue_command(iommu, &cmd);
1078}
1079
1080static void iommu_flush_irt_all(struct amd_iommu *iommu)
1081{
1082	u32 devid;
1083
1084	for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1085		iommu_flush_irt(iommu, devid);
1086
1087	iommu_completion_wait(iommu);
1088}
1089
1090void iommu_flush_all_caches(struct amd_iommu *iommu)
1091{
1092	if (iommu_feature(iommu, FEATURE_IA)) {
1093		iommu_flush_all(iommu);
1094	} else {
1095		iommu_flush_dte_all(iommu);
1096		iommu_flush_irt_all(iommu);
1097		iommu_flush_tlb_all(iommu);
1098	}
1099}
1100
1101/*
1102 * Command send function for flushing on-device TLB
1103 */
1104static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1105			      u64 address, size_t size)
1106{
1107	struct amd_iommu *iommu;
1108	struct iommu_cmd cmd;
1109	int qdep;
1110
1111	qdep     = dev_data->ats.qdep;
1112	iommu    = amd_iommu_rlookup_table[dev_data->devid];
1113
1114	build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1115
1116	return iommu_queue_command(iommu, &cmd);
1117}
1118
1119/*
1120 * Command send function for invalidating a device table entry
1121 */
1122static int device_flush_dte(struct iommu_dev_data *dev_data)
1123{
1124	struct amd_iommu *iommu;
1125	u16 alias;
1126	int ret;
1127
1128	iommu = amd_iommu_rlookup_table[dev_data->devid];
1129	alias = dev_data->alias;
1130
1131	ret = iommu_flush_dte(iommu, dev_data->devid);
1132	if (!ret && alias != dev_data->devid)
1133		ret = iommu_flush_dte(iommu, alias);
1134	if (ret)
1135		return ret;
1136
1137	if (dev_data->ats.enabled)
1138		ret = device_flush_iotlb(dev_data, 0, ~0UL);
1139
1140	return ret;
1141}
1142
1143/*
1144 * TLB invalidation function which is called from the mapping functions.
1145 * It invalidates a single PTE if the range to flush is within a single
1146 * page. Otherwise it flushes the whole TLB of the IOMMU.
1147 */
1148static void __domain_flush_pages(struct protection_domain *domain,
1149				 u64 address, size_t size, int pde)
1150{
1151	struct iommu_dev_data *dev_data;
1152	struct iommu_cmd cmd;
1153	int ret = 0, i;
1154
1155	build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1156
1157	for (i = 0; i < amd_iommus_present; ++i) {
1158		if (!domain->dev_iommu[i])
1159			continue;
1160
1161		/*
1162		 * Devices of this domain are behind this IOMMU
1163		 * We need a TLB flush
1164		 */
1165		ret |= iommu_queue_command(amd_iommus[i], &cmd);
1166	}
1167
1168	list_for_each_entry(dev_data, &domain->dev_list, list) {
1169
1170		if (!dev_data->ats.enabled)
1171			continue;
1172
1173		ret |= device_flush_iotlb(dev_data, address, size);
1174	}
1175
1176	WARN_ON(ret);
1177}
1178
1179static void domain_flush_pages(struct protection_domain *domain,
1180			       u64 address, size_t size)
1181{
1182	__domain_flush_pages(domain, address, size, 0);
1183}
1184
1185/* Flush the whole IO/TLB for a given protection domain */
1186static void domain_flush_tlb(struct protection_domain *domain)
1187{
1188	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1189}
1190
1191/* Flush the whole IO/TLB for a given protection domain - including PDE */
1192static void domain_flush_tlb_pde(struct protection_domain *domain)
1193{
1194	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1195}
1196
1197static void domain_flush_complete(struct protection_domain *domain)
1198{
1199	int i;
1200
1201	for (i = 0; i < amd_iommus_present; ++i) {
1202		if (!domain->dev_iommu[i])
1203			continue;
1204
1205		/*
1206		 * Devices of this domain are behind this IOMMU
1207		 * We need to wait for completion of all commands.
1208		 */
1209		iommu_completion_wait(amd_iommus[i]);
1210	}
1211}
1212
1213
1214/*
1215 * This function flushes the DTEs for all devices in domain
1216 */
1217static void domain_flush_devices(struct protection_domain *domain)
1218{
1219	struct iommu_dev_data *dev_data;
1220
1221	list_for_each_entry(dev_data, &domain->dev_list, list)
1222		device_flush_dte(dev_data);
1223}
1224
1225/****************************************************************************
1226 *
1227 * The functions below are used the create the page table mappings for
1228 * unity mapped regions.
1229 *
1230 ****************************************************************************/
1231
1232/*
1233 * This function is used to add another level to an IO page table. Adding
1234 * another level increases the size of the address space by 9 bits to a size up
1235 * to 64 bits.
1236 */
1237static bool increase_address_space(struct protection_domain *domain,
1238				   gfp_t gfp)
1239{
1240	u64 *pte;
1241
1242	if (domain->mode == PAGE_MODE_6_LEVEL)
1243		/* address space already 64 bit large */
1244		return false;
1245
1246	pte = (void *)get_zeroed_page(gfp);
1247	if (!pte)
1248		return false;
1249
1250	*pte             = PM_LEVEL_PDE(domain->mode,
1251					virt_to_phys(domain->pt_root));
1252	domain->pt_root  = pte;
1253	domain->mode    += 1;
1254	domain->updated  = true;
1255
1256	return true;
1257}
1258
1259static u64 *alloc_pte(struct protection_domain *domain,
1260		      unsigned long address,
1261		      unsigned long page_size,
1262		      u64 **pte_page,
1263		      gfp_t gfp)
1264{
1265	int level, end_lvl;
1266	u64 *pte, *page;
1267
1268	BUG_ON(!is_power_of_2(page_size));
1269
1270	while (address > PM_LEVEL_SIZE(domain->mode))
1271		increase_address_space(domain, gfp);
1272
1273	level   = domain->mode - 1;
1274	pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1275	address = PAGE_SIZE_ALIGN(address, page_size);
1276	end_lvl = PAGE_SIZE_LEVEL(page_size);
1277
1278	while (level > end_lvl) {
1279		u64 __pte, __npte;
1280
1281		__pte = *pte;
1282
1283		if (!IOMMU_PTE_PRESENT(__pte)) {
1284			page = (u64 *)get_zeroed_page(gfp);
1285			if (!page)
1286				return NULL;
1287
1288			__npte = PM_LEVEL_PDE(level, virt_to_phys(page));
1289
1290			if (cmpxchg64(pte, __pte, __npte)) {
 
1291				free_page((unsigned long)page);
1292				continue;
1293			}
1294		}
1295
1296		/* No level skipping support yet */
1297		if (PM_PTE_LEVEL(*pte) != level)
1298			return NULL;
1299
1300		level -= 1;
1301
1302		pte = IOMMU_PTE_PAGE(*pte);
1303
1304		if (pte_page && level == end_lvl)
1305			*pte_page = pte;
1306
1307		pte = &pte[PM_LEVEL_INDEX(level, address)];
1308	}
1309
1310	return pte;
1311}
1312
1313/*
1314 * This function checks if there is a PTE for a given dma address. If
1315 * there is one, it returns the pointer to it.
1316 */
1317static u64 *fetch_pte(struct protection_domain *domain,
1318		      unsigned long address,
1319		      unsigned long *page_size)
1320{
1321	int level;
1322	u64 *pte;
1323
1324	if (address > PM_LEVEL_SIZE(domain->mode))
1325		return NULL;
1326
1327	level	   =  domain->mode - 1;
1328	pte	   = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1329	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
1330
1331	while (level > 0) {
1332
1333		/* Not Present */
1334		if (!IOMMU_PTE_PRESENT(*pte))
1335			return NULL;
1336
1337		/* Large PTE */
1338		if (PM_PTE_LEVEL(*pte) == 7 ||
1339		    PM_PTE_LEVEL(*pte) == 0)
1340			break;
1341
1342		/* No level skipping support yet */
1343		if (PM_PTE_LEVEL(*pte) != level)
1344			return NULL;
1345
1346		level -= 1;
1347
1348		/* Walk to the next level */
1349		pte	   = IOMMU_PTE_PAGE(*pte);
1350		pte	   = &pte[PM_LEVEL_INDEX(level, address)];
1351		*page_size = PTE_LEVEL_PAGE_SIZE(level);
1352	}
1353
1354	if (PM_PTE_LEVEL(*pte) == 0x07) {
1355		unsigned long pte_mask;
1356
1357		/*
1358		 * If we have a series of large PTEs, make
1359		 * sure to return a pointer to the first one.
1360		 */
1361		*page_size = pte_mask = PTE_PAGE_SIZE(*pte);
1362		pte_mask   = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1363		pte        = (u64 *)(((unsigned long)pte) & pte_mask);
1364	}
1365
1366	return pte;
1367}
1368
1369/*
1370 * Generic mapping functions. It maps a physical address into a DMA
1371 * address space. It allocates the page table pages if necessary.
1372 * In the future it can be extended to a generic mapping function
1373 * supporting all features of AMD IOMMU page tables like level skipping
1374 * and full 64 bit address spaces.
1375 */
1376static int iommu_map_page(struct protection_domain *dom,
1377			  unsigned long bus_addr,
1378			  unsigned long phys_addr,
 
1379			  int prot,
1380			  unsigned long page_size)
1381{
1382	u64 __pte, *pte;
1383	int i, count;
1384
1385	BUG_ON(!IS_ALIGNED(bus_addr, page_size));
1386	BUG_ON(!IS_ALIGNED(phys_addr, page_size));
1387
1388	if (!(prot & IOMMU_PROT_MASK))
1389		return -EINVAL;
1390
1391	count = PAGE_SIZE_PTE_COUNT(page_size);
1392	pte   = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
1393
1394	if (!pte)
1395		return -ENOMEM;
1396
1397	for (i = 0; i < count; ++i)
1398		if (IOMMU_PTE_PRESENT(pte[i]))
1399			return -EBUSY;
1400
1401	if (count > 1) {
1402		__pte = PAGE_SIZE_PTE(phys_addr, page_size);
1403		__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
1404	} else
1405		__pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
1406
1407	if (prot & IOMMU_PROT_IR)
1408		__pte |= IOMMU_PTE_IR;
1409	if (prot & IOMMU_PROT_IW)
1410		__pte |= IOMMU_PTE_IW;
1411
1412	for (i = 0; i < count; ++i)
1413		pte[i] = __pte;
1414
1415	update_domain(dom);
1416
1417	return 0;
1418}
1419
1420static unsigned long iommu_unmap_page(struct protection_domain *dom,
1421				      unsigned long bus_addr,
1422				      unsigned long page_size)
1423{
1424	unsigned long long unmapped;
1425	unsigned long unmap_size;
1426	u64 *pte;
1427
1428	BUG_ON(!is_power_of_2(page_size));
1429
1430	unmapped = 0;
1431
1432	while (unmapped < page_size) {
1433
1434		pte = fetch_pte(dom, bus_addr, &unmap_size);
1435
1436		if (pte) {
1437			int i, count;
1438
1439			count = PAGE_SIZE_PTE_COUNT(unmap_size);
1440			for (i = 0; i < count; i++)
1441				pte[i] = 0ULL;
1442		}
1443
1444		bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1445		unmapped += unmap_size;
1446	}
1447
1448	BUG_ON(unmapped && !is_power_of_2(unmapped));
1449
1450	return unmapped;
1451}
1452
1453/****************************************************************************
1454 *
1455 * The next functions belong to the address allocator for the dma_ops
1456 * interface functions. They work like the allocators in the other IOMMU
1457 * drivers. Its basically a bitmap which marks the allocated pages in
1458 * the aperture. Maybe it could be enhanced in the future to a more
1459 * efficient allocator.
1460 *
1461 ****************************************************************************/
1462
1463/*
1464 * The address allocator core functions.
1465 *
1466 * called with domain->lock held
1467 */
1468
1469/*
1470 * Used to reserve address ranges in the aperture (e.g. for exclusion
1471 * ranges.
1472 */
1473static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1474				      unsigned long start_page,
1475				      unsigned int pages)
1476{
1477	unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1478
1479	if (start_page + pages > last_page)
1480		pages = last_page - start_page;
1481
1482	for (i = start_page; i < start_page + pages; ++i) {
1483		int index = i / APERTURE_RANGE_PAGES;
1484		int page  = i % APERTURE_RANGE_PAGES;
1485		__set_bit(page, dom->aperture[index]->bitmap);
1486	}
1487}
1488
1489/*
1490 * This function is used to add a new aperture range to an existing
1491 * aperture in case of dma_ops domain allocation or address allocation
1492 * failure.
1493 */
1494static int alloc_new_range(struct dma_ops_domain *dma_dom,
1495			   bool populate, gfp_t gfp)
1496{
1497	int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
1498	unsigned long i, old_size, pte_pgsize;
1499	struct aperture_range *range;
1500	struct amd_iommu *iommu;
1501	unsigned long flags;
1502
1503#ifdef CONFIG_IOMMU_STRESS
1504	populate = false;
1505#endif
1506
1507	if (index >= APERTURE_MAX_RANGES)
1508		return -ENOMEM;
1509
1510	range = kzalloc(sizeof(struct aperture_range), gfp);
1511	if (!range)
1512		return -ENOMEM;
1513
1514	range->bitmap = (void *)get_zeroed_page(gfp);
1515	if (!range->bitmap)
1516		goto out_free;
1517
1518	range->offset = dma_dom->aperture_size;
1519
1520	spin_lock_init(&range->bitmap_lock);
1521
1522	if (populate) {
1523		unsigned long address = dma_dom->aperture_size;
1524		int i, num_ptes = APERTURE_RANGE_PAGES / 512;
1525		u64 *pte, *pte_page;
1526
1527		for (i = 0; i < num_ptes; ++i) {
1528			pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
1529					&pte_page, gfp);
1530			if (!pte)
1531				goto out_free;
1532
1533			range->pte_pages[i] = pte_page;
1534
1535			address += APERTURE_RANGE_SIZE / 64;
1536		}
1537	}
1538
1539	spin_lock_irqsave(&dma_dom->domain.lock, flags);
1540
1541	/* First take the bitmap_lock and then publish the range */
1542	spin_lock(&range->bitmap_lock);
1543
1544	old_size                 = dma_dom->aperture_size;
1545	dma_dom->aperture[index] = range;
1546	dma_dom->aperture_size  += APERTURE_RANGE_SIZE;
1547
1548	/* Reserve address range used for MSI messages */
1549	if (old_size < MSI_ADDR_BASE_LO &&
1550	    dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
1551		unsigned long spage;
1552		int pages;
1553
1554		pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
1555		spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
1556
1557		dma_ops_reserve_addresses(dma_dom, spage, pages);
1558	}
1559
1560	/* Initialize the exclusion range if necessary */
1561	for_each_iommu(iommu) {
1562		if (iommu->exclusion_start &&
1563		    iommu->exclusion_start >= dma_dom->aperture[index]->offset
1564		    && iommu->exclusion_start < dma_dom->aperture_size) {
1565			unsigned long startpage;
1566			int pages = iommu_num_pages(iommu->exclusion_start,
1567						    iommu->exclusion_length,
1568						    PAGE_SIZE);
1569			startpage = iommu->exclusion_start >> PAGE_SHIFT;
1570			dma_ops_reserve_addresses(dma_dom, startpage, pages);
1571		}
1572	}
1573
1574	/*
1575	 * Check for areas already mapped as present in the new aperture
1576	 * range and mark those pages as reserved in the allocator. Such
1577	 * mappings may already exist as a result of requested unity
1578	 * mappings for devices.
1579	 */
1580	for (i = dma_dom->aperture[index]->offset;
1581	     i < dma_dom->aperture_size;
1582	     i += pte_pgsize) {
1583		u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
1584		if (!pte || !IOMMU_PTE_PRESENT(*pte))
1585			continue;
1586
1587		dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
1588					  pte_pgsize >> 12);
1589	}
1590
1591	update_domain(&dma_dom->domain);
1592
1593	spin_unlock(&range->bitmap_lock);
1594
1595	spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
1596
1597	return 0;
1598
1599out_free:
1600	update_domain(&dma_dom->domain);
1601
1602	free_page((unsigned long)range->bitmap);
1603
1604	kfree(range);
1605
1606	return -ENOMEM;
1607}
1608
1609static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom,
1610					 struct aperture_range *range,
1611					 unsigned long pages,
1612					 unsigned long dma_mask,
1613					 unsigned long boundary_size,
1614					 unsigned long align_mask,
1615					 bool trylock)
1616{
1617	unsigned long offset, limit, flags;
1618	dma_addr_t address;
1619	bool flush = false;
1620
1621	offset = range->offset >> PAGE_SHIFT;
1622	limit  = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1623					dma_mask >> PAGE_SHIFT);
1624
1625	if (trylock) {
1626		if (!spin_trylock_irqsave(&range->bitmap_lock, flags))
1627			return -1;
1628	} else {
1629		spin_lock_irqsave(&range->bitmap_lock, flags);
1630	}
1631
1632	address = iommu_area_alloc(range->bitmap, limit, range->next_bit,
1633				   pages, offset, boundary_size, align_mask);
1634	if (address == -1) {
1635		/* Nothing found, retry one time */
1636		address = iommu_area_alloc(range->bitmap, limit,
1637					   0, pages, offset, boundary_size,
1638					   align_mask);
1639		flush = true;
1640	}
1641
1642	if (address != -1)
1643		range->next_bit = address + pages;
1644
1645	spin_unlock_irqrestore(&range->bitmap_lock, flags);
1646
1647	if (flush) {
1648		domain_flush_tlb(&dom->domain);
1649		domain_flush_complete(&dom->domain);
1650	}
1651
1652	return address;
1653}
1654
1655static unsigned long dma_ops_area_alloc(struct device *dev,
1656					struct dma_ops_domain *dom,
1657					unsigned int pages,
1658					unsigned long align_mask,
1659					u64 dma_mask)
1660{
1661	unsigned long boundary_size, mask;
1662	unsigned long address = -1;
1663	bool first = true;
1664	u32 start, i;
1665
1666	preempt_disable();
1667
1668	mask = dma_get_seg_boundary(dev);
1669
1670again:
1671	start = this_cpu_read(*dom->next_index);
1672
1673	/* Sanity check - is it really necessary? */
1674	if (unlikely(start > APERTURE_MAX_RANGES)) {
1675		start = 0;
1676		this_cpu_write(*dom->next_index, 0);
1677	}
1678
1679	boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
1680				   1UL << (BITS_PER_LONG - PAGE_SHIFT);
1681
1682	for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1683		struct aperture_range *range;
1684		int index;
1685
1686		index = (start + i) % APERTURE_MAX_RANGES;
1687
1688		range = dom->aperture[index];
1689
1690		if (!range || range->offset >= dma_mask)
1691			continue;
1692
1693		address = dma_ops_aperture_alloc(dom, range, pages,
1694						 dma_mask, boundary_size,
1695						 align_mask, first);
1696		if (address != -1) {
1697			address = range->offset + (address << PAGE_SHIFT);
1698			this_cpu_write(*dom->next_index, index);
1699			break;
1700		}
1701	}
1702
1703	if (address == -1 && first) {
1704		first = false;
1705		goto again;
1706	}
1707
1708	preempt_enable();
1709
1710	return address;
1711}
1712
1713static unsigned long dma_ops_alloc_addresses(struct device *dev,
1714					     struct dma_ops_domain *dom,
1715					     unsigned int pages,
1716					     unsigned long align_mask,
1717					     u64 dma_mask)
1718{
1719	unsigned long address = -1;
1720
1721	while (address == -1) {
1722		address = dma_ops_area_alloc(dev, dom, pages,
1723					     align_mask, dma_mask);
1724
1725		if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC))
1726			break;
1727	}
1728
1729	if (unlikely(address == -1))
1730		address = DMA_ERROR_CODE;
 
1731
1732	WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
 
1733
1734	return address;
1735}
1736
1737/*
1738 * The address free function.
1739 *
1740 * called with domain->lock held
1741 */
1742static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1743				   unsigned long address,
1744				   unsigned int pages)
1745{
1746	unsigned i = address >> APERTURE_RANGE_SHIFT;
1747	struct aperture_range *range = dom->aperture[i];
1748	unsigned long flags;
1749
1750	BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1751
1752#ifdef CONFIG_IOMMU_STRESS
1753	if (i < 4)
1754		return;
1755#endif
1756
1757	if (amd_iommu_unmap_flush) {
1758		domain_flush_tlb(&dom->domain);
1759		domain_flush_complete(&dom->domain);
1760	}
1761
1762	address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1763
1764	spin_lock_irqsave(&range->bitmap_lock, flags);
1765	if (address + pages > range->next_bit)
1766		range->next_bit = address + pages;
1767	bitmap_clear(range->bitmap, address, pages);
1768	spin_unlock_irqrestore(&range->bitmap_lock, flags);
1769
 
1770}
1771
1772/****************************************************************************
1773 *
1774 * The next functions belong to the domain allocation. A domain is
1775 * allocated for every IOMMU as the default domain. If device isolation
1776 * is enabled, every device get its own domain. The most important thing
1777 * about domains is the page table mapping the DMA address space they
1778 * contain.
1779 *
1780 ****************************************************************************/
1781
1782/*
1783 * This function adds a protection domain to the global protection domain list
1784 */
1785static void add_domain_to_list(struct protection_domain *domain)
1786{
1787	unsigned long flags;
1788
1789	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1790	list_add(&domain->list, &amd_iommu_pd_list);
1791	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1792}
1793
1794/*
1795 * This function removes a protection domain to the global
1796 * protection domain list
1797 */
1798static void del_domain_from_list(struct protection_domain *domain)
1799{
1800	unsigned long flags;
1801
1802	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1803	list_del(&domain->list);
1804	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1805}
1806
1807static u16 domain_id_alloc(void)
1808{
1809	unsigned long flags;
1810	int id;
1811
1812	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1813	id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1814	BUG_ON(id == 0);
1815	if (id > 0 && id < MAX_DOMAIN_ID)
1816		__set_bit(id, amd_iommu_pd_alloc_bitmap);
1817	else
1818		id = 0;
1819	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1820
1821	return id;
1822}
1823
1824static void domain_id_free(int id)
1825{
1826	unsigned long flags;
1827
1828	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1829	if (id > 0 && id < MAX_DOMAIN_ID)
1830		__clear_bit(id, amd_iommu_pd_alloc_bitmap);
1831	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1832}
1833
1834#define DEFINE_FREE_PT_FN(LVL, FN)				\
1835static void free_pt_##LVL (unsigned long __pt)			\
1836{								\
1837	unsigned long p;					\
1838	u64 *pt;						\
1839	int i;							\
1840								\
1841	pt = (u64 *)__pt;					\
1842								\
1843	for (i = 0; i < 512; ++i) {				\
1844		/* PTE present? */				\
1845		if (!IOMMU_PTE_PRESENT(pt[i]))			\
1846			continue;				\
1847								\
1848		/* Large PTE? */				\
1849		if (PM_PTE_LEVEL(pt[i]) == 0 ||			\
1850		    PM_PTE_LEVEL(pt[i]) == 7)			\
1851			continue;				\
1852								\
1853		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);	\
1854		FN(p);						\
1855	}							\
1856	free_page((unsigned long)pt);				\
1857}
1858
1859DEFINE_FREE_PT_FN(l2, free_page)
1860DEFINE_FREE_PT_FN(l3, free_pt_l2)
1861DEFINE_FREE_PT_FN(l4, free_pt_l3)
1862DEFINE_FREE_PT_FN(l5, free_pt_l4)
1863DEFINE_FREE_PT_FN(l6, free_pt_l5)
1864
1865static void free_pagetable(struct protection_domain *domain)
1866{
1867	unsigned long root = (unsigned long)domain->pt_root;
1868
1869	switch (domain->mode) {
1870	case PAGE_MODE_NONE:
1871		break;
1872	case PAGE_MODE_1_LEVEL:
1873		free_page(root);
1874		break;
1875	case PAGE_MODE_2_LEVEL:
1876		free_pt_l2(root);
1877		break;
1878	case PAGE_MODE_3_LEVEL:
1879		free_pt_l3(root);
1880		break;
1881	case PAGE_MODE_4_LEVEL:
1882		free_pt_l4(root);
1883		break;
1884	case PAGE_MODE_5_LEVEL:
1885		free_pt_l5(root);
1886		break;
1887	case PAGE_MODE_6_LEVEL:
1888		free_pt_l6(root);
1889		break;
1890	default:
1891		BUG();
1892	}
1893}
1894
1895static void free_gcr3_tbl_level1(u64 *tbl)
1896{
1897	u64 *ptr;
1898	int i;
1899
1900	for (i = 0; i < 512; ++i) {
1901		if (!(tbl[i] & GCR3_VALID))
1902			continue;
1903
1904		ptr = __va(tbl[i] & PAGE_MASK);
1905
1906		free_page((unsigned long)ptr);
1907	}
1908}
1909
1910static void free_gcr3_tbl_level2(u64 *tbl)
1911{
1912	u64 *ptr;
1913	int i;
1914
1915	for (i = 0; i < 512; ++i) {
1916		if (!(tbl[i] & GCR3_VALID))
1917			continue;
1918
1919		ptr = __va(tbl[i] & PAGE_MASK);
1920
1921		free_gcr3_tbl_level1(ptr);
1922	}
1923}
1924
1925static void free_gcr3_table(struct protection_domain *domain)
1926{
1927	if (domain->glx == 2)
1928		free_gcr3_tbl_level2(domain->gcr3_tbl);
1929	else if (domain->glx == 1)
1930		free_gcr3_tbl_level1(domain->gcr3_tbl);
1931	else
1932		BUG_ON(domain->glx != 0);
1933
1934	free_page((unsigned long)domain->gcr3_tbl);
1935}
1936
1937/*
1938 * Free a domain, only used if something went wrong in the
1939 * allocation path and we need to free an already allocated page table
1940 */
1941static void dma_ops_domain_free(struct dma_ops_domain *dom)
1942{
1943	int i;
1944
1945	if (!dom)
1946		return;
1947
1948	free_percpu(dom->next_index);
1949
1950	del_domain_from_list(&dom->domain);
1951
 
 
1952	free_pagetable(&dom->domain);
1953
1954	for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1955		if (!dom->aperture[i])
1956			continue;
1957		free_page((unsigned long)dom->aperture[i]->bitmap);
1958		kfree(dom->aperture[i]);
1959	}
1960
1961	kfree(dom);
1962}
1963
1964static int dma_ops_domain_alloc_apertures(struct dma_ops_domain *dma_dom,
1965					  int max_apertures)
1966{
1967	int ret, i, apertures;
1968
1969	apertures = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
1970	ret       = 0;
1971
1972	for (i = apertures; i < max_apertures; ++i) {
1973		ret = alloc_new_range(dma_dom, false, GFP_KERNEL);
1974		if (ret)
1975			break;
1976	}
1977
1978	return ret;
1979}
1980
1981/*
1982 * Allocates a new protection domain usable for the dma_ops functions.
1983 * It also initializes the page table and the address allocator data
1984 * structures required for the dma_ops interface
1985 */
1986static struct dma_ops_domain *dma_ops_domain_alloc(void)
1987{
1988	struct dma_ops_domain *dma_dom;
1989	int cpu;
1990
1991	dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1992	if (!dma_dom)
1993		return NULL;
1994
1995	if (protection_domain_init(&dma_dom->domain))
1996		goto free_dma_dom;
1997
1998	dma_dom->next_index = alloc_percpu(u32);
1999	if (!dma_dom->next_index)
2000		goto free_dma_dom;
2001
2002	dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
2003	dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2004	dma_dom->domain.flags = PD_DMA_OPS_MASK;
2005	dma_dom->domain.priv = dma_dom;
2006	if (!dma_dom->domain.pt_root)
2007		goto free_dma_dom;
2008
2009	add_domain_to_list(&dma_dom->domain);
2010
2011	if (alloc_new_range(dma_dom, true, GFP_KERNEL))
2012		goto free_dma_dom;
2013
2014	/*
2015	 * mark the first page as allocated so we never return 0 as
2016	 * a valid dma-address. So we can use 0 as error value
2017	 */
2018	dma_dom->aperture[0]->bitmap[0] = 1;
2019
2020	for_each_possible_cpu(cpu)
2021		*per_cpu_ptr(dma_dom->next_index, cpu) = 0;
2022
2023	return dma_dom;
2024
2025free_dma_dom:
2026	dma_ops_domain_free(dma_dom);
2027
2028	return NULL;
2029}
2030
2031/*
2032 * little helper function to check whether a given protection domain is a
2033 * dma_ops domain
2034 */
2035static bool dma_ops_domain(struct protection_domain *domain)
2036{
2037	return domain->flags & PD_DMA_OPS_MASK;
2038}
2039
2040static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
2041{
2042	u64 pte_root = 0;
2043	u64 flags = 0;
2044
2045	if (domain->mode != PAGE_MODE_NONE)
2046		pte_root = virt_to_phys(domain->pt_root);
2047
2048	pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
2049		    << DEV_ENTRY_MODE_SHIFT;
2050	pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
2051
2052	flags = amd_iommu_dev_table[devid].data[1];
2053
2054	if (ats)
2055		flags |= DTE_FLAG_IOTLB;
2056
2057	if (domain->flags & PD_IOMMUV2_MASK) {
2058		u64 gcr3 = __pa(domain->gcr3_tbl);
2059		u64 glx  = domain->glx;
2060		u64 tmp;
2061
2062		pte_root |= DTE_FLAG_GV;
2063		pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
2064
2065		/* First mask out possible old values for GCR3 table */
2066		tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
2067		flags    &= ~tmp;
2068
2069		tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
2070		flags    &= ~tmp;
2071
2072		/* Encode GCR3 table into DTE */
2073		tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
2074		pte_root |= tmp;
2075
2076		tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
2077		flags    |= tmp;
2078
2079		tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
2080		flags    |= tmp;
2081	}
2082
2083	flags &= ~(0xffffUL);
2084	flags |= domain->id;
2085
2086	amd_iommu_dev_table[devid].data[1]  = flags;
2087	amd_iommu_dev_table[devid].data[0]  = pte_root;
2088}
2089
2090static void clear_dte_entry(u16 devid)
2091{
2092	/* remove entry from the device table seen by the hardware */
2093	amd_iommu_dev_table[devid].data[0]  = IOMMU_PTE_P | IOMMU_PTE_TV;
2094	amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
2095
2096	amd_iommu_apply_erratum_63(devid);
2097}
2098
2099static void do_attach(struct iommu_dev_data *dev_data,
2100		      struct protection_domain *domain)
2101{
2102	struct amd_iommu *iommu;
2103	u16 alias;
2104	bool ats;
2105
2106	iommu = amd_iommu_rlookup_table[dev_data->devid];
2107	alias = dev_data->alias;
2108	ats   = dev_data->ats.enabled;
2109
2110	/* Update data structures */
2111	dev_data->domain = domain;
2112	list_add(&dev_data->list, &domain->dev_list);
2113
2114	/* Do reference counting */
2115	domain->dev_iommu[iommu->index] += 1;
2116	domain->dev_cnt                 += 1;
2117
2118	/* Update device table */
2119	set_dte_entry(dev_data->devid, domain, ats);
2120	if (alias != dev_data->devid)
2121		set_dte_entry(alias, domain, ats);
2122
2123	device_flush_dte(dev_data);
2124}
2125
2126static void do_detach(struct iommu_dev_data *dev_data)
2127{
2128	struct amd_iommu *iommu;
2129	u16 alias;
2130
2131	/*
2132	 * First check if the device is still attached. It might already
2133	 * be detached from its domain because the generic
2134	 * iommu_detach_group code detached it and we try again here in
2135	 * our alias handling.
2136	 */
2137	if (!dev_data->domain)
2138		return;
2139
2140	iommu = amd_iommu_rlookup_table[dev_data->devid];
2141	alias = dev_data->alias;
2142
2143	/* decrease reference counters */
2144	dev_data->domain->dev_iommu[iommu->index] -= 1;
2145	dev_data->domain->dev_cnt                 -= 1;
2146
2147	/* Update data structures */
2148	dev_data->domain = NULL;
2149	list_del(&dev_data->list);
2150	clear_dte_entry(dev_data->devid);
2151	if (alias != dev_data->devid)
2152		clear_dte_entry(alias);
2153
2154	/* Flush the DTE entry */
2155	device_flush_dte(dev_data);
2156}
2157
2158/*
2159 * If a device is not yet associated with a domain, this function does
2160 * assigns it visible for the hardware
2161 */
2162static int __attach_device(struct iommu_dev_data *dev_data,
2163			   struct protection_domain *domain)
2164{
2165	int ret;
2166
2167	/*
2168	 * Must be called with IRQs disabled. Warn here to detect early
2169	 * when its not.
2170	 */
2171	WARN_ON(!irqs_disabled());
2172
2173	/* lock domain */
2174	spin_lock(&domain->lock);
2175
2176	ret = -EBUSY;
2177	if (dev_data->domain != NULL)
2178		goto out_unlock;
2179
2180	/* Attach alias group root */
2181	do_attach(dev_data, domain);
2182
2183	ret = 0;
2184
2185out_unlock:
2186
2187	/* ready */
2188	spin_unlock(&domain->lock);
2189
2190	return ret;
2191}
2192
2193
2194static void pdev_iommuv2_disable(struct pci_dev *pdev)
2195{
2196	pci_disable_ats(pdev);
2197	pci_disable_pri(pdev);
2198	pci_disable_pasid(pdev);
2199}
2200
2201/* FIXME: Change generic reset-function to do the same */
2202static int pri_reset_while_enabled(struct pci_dev *pdev)
2203{
2204	u16 control;
2205	int pos;
2206
2207	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2208	if (!pos)
2209		return -EINVAL;
2210
2211	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
2212	control |= PCI_PRI_CTRL_RESET;
2213	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
2214
2215	return 0;
2216}
2217
2218static int pdev_iommuv2_enable(struct pci_dev *pdev)
2219{
2220	bool reset_enable;
2221	int reqs, ret;
2222
2223	/* FIXME: Hardcode number of outstanding requests for now */
2224	reqs = 32;
2225	if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
2226		reqs = 1;
2227	reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
2228
2229	/* Only allow access to user-accessible pages */
2230	ret = pci_enable_pasid(pdev, 0);
2231	if (ret)
2232		goto out_err;
2233
2234	/* First reset the PRI state of the device */
2235	ret = pci_reset_pri(pdev);
2236	if (ret)
2237		goto out_err;
2238
2239	/* Enable PRI */
2240	ret = pci_enable_pri(pdev, reqs);
2241	if (ret)
2242		goto out_err;
2243
2244	if (reset_enable) {
2245		ret = pri_reset_while_enabled(pdev);
2246		if (ret)
2247			goto out_err;
2248	}
2249
2250	ret = pci_enable_ats(pdev, PAGE_SHIFT);
2251	if (ret)
2252		goto out_err;
2253
2254	return 0;
2255
2256out_err:
2257	pci_disable_pri(pdev);
2258	pci_disable_pasid(pdev);
2259
2260	return ret;
2261}
2262
2263/* FIXME: Move this to PCI code */
2264#define PCI_PRI_TLP_OFF		(1 << 15)
2265
2266static bool pci_pri_tlp_required(struct pci_dev *pdev)
2267{
2268	u16 status;
2269	int pos;
2270
2271	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2272	if (!pos)
2273		return false;
2274
2275	pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
2276
2277	return (status & PCI_PRI_TLP_OFF) ? true : false;
2278}
2279
2280/*
2281 * If a device is not yet associated with a domain, this function
2282 * assigns it visible for the hardware
2283 */
2284static int attach_device(struct device *dev,
2285			 struct protection_domain *domain)
2286{
2287	struct pci_dev *pdev = to_pci_dev(dev);
2288	struct iommu_dev_data *dev_data;
2289	unsigned long flags;
2290	int ret;
2291
2292	dev_data = get_dev_data(dev);
2293
 
 
 
 
2294	if (domain->flags & PD_IOMMUV2_MASK) {
2295		if (!dev_data->passthrough)
2296			return -EINVAL;
2297
2298		if (dev_data->iommu_v2) {
2299			if (pdev_iommuv2_enable(pdev) != 0)
2300				return -EINVAL;
2301
2302			dev_data->ats.enabled = true;
2303			dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2304			dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
2305		}
2306	} else if (amd_iommu_iotlb_sup &&
2307		   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2308		dev_data->ats.enabled = true;
2309		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2310	}
2311
 
2312	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2313	ret = __attach_device(dev_data, domain);
2314	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2315
2316	/*
2317	 * We might boot into a crash-kernel here. The crashed kernel
2318	 * left the caches in the IOMMU dirty. So we have to flush
2319	 * here to evict all dirty stuff.
2320	 */
2321	domain_flush_tlb_pde(domain);
2322
2323	return ret;
2324}
2325
2326/*
2327 * Removes a device from a protection domain (unlocked)
2328 */
2329static void __detach_device(struct iommu_dev_data *dev_data)
2330{
2331	struct protection_domain *domain;
2332
2333	/*
2334	 * Must be called with IRQs disabled. Warn here to detect early
2335	 * when its not.
2336	 */
2337	WARN_ON(!irqs_disabled());
2338
2339	if (WARN_ON(!dev_data->domain))
2340		return;
2341
2342	domain = dev_data->domain;
2343
2344	spin_lock(&domain->lock);
2345
2346	do_detach(dev_data);
2347
2348	spin_unlock(&domain->lock);
2349}
2350
2351/*
2352 * Removes a device from a protection domain (with devtable_lock held)
2353 */
2354static void detach_device(struct device *dev)
2355{
2356	struct protection_domain *domain;
2357	struct iommu_dev_data *dev_data;
2358	unsigned long flags;
2359
2360	dev_data = get_dev_data(dev);
2361	domain   = dev_data->domain;
2362
2363	/* lock device table */
2364	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2365	__detach_device(dev_data);
2366	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2367
 
 
 
2368	if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2369		pdev_iommuv2_disable(to_pci_dev(dev));
2370	else if (dev_data->ats.enabled)
2371		pci_disable_ats(to_pci_dev(dev));
2372
2373	dev_data->ats.enabled = false;
2374}
2375
2376static int amd_iommu_add_device(struct device *dev)
2377{
2378	struct iommu_dev_data *dev_data;
2379	struct iommu_domain *domain;
2380	struct amd_iommu *iommu;
2381	u16 devid;
2382	int ret;
2383
2384	if (!check_device(dev) || get_dev_data(dev))
2385		return 0;
2386
2387	devid = get_device_id(dev);
 
 
 
2388	iommu = amd_iommu_rlookup_table[devid];
2389
2390	ret = iommu_init_device(dev);
2391	if (ret) {
2392		if (ret != -ENOTSUPP)
2393			pr_err("Failed to initialize device %s - trying to proceed anyway\n",
2394				dev_name(dev));
2395
2396		iommu_ignore_device(dev);
2397		dev->archdata.dma_ops = &nommu_dma_ops;
2398		goto out;
2399	}
2400	init_iommu_group(dev);
2401
2402	dev_data = get_dev_data(dev);
2403
2404	BUG_ON(!dev_data);
2405
2406	if (iommu_pass_through || dev_data->iommu_v2)
2407		iommu_request_dm_for_dev(dev);
2408
2409	/* Domains are initialized for this device - have a look what we ended up with */
2410	domain = iommu_get_domain_for_dev(dev);
2411	if (domain->type == IOMMU_DOMAIN_IDENTITY)
2412		dev_data->passthrough = true;
2413	else
2414		dev->archdata.dma_ops = &amd_iommu_dma_ops;
2415
2416out:
2417	iommu_completion_wait(iommu);
2418
2419	return 0;
2420}
2421
2422static void amd_iommu_remove_device(struct device *dev)
2423{
2424	struct amd_iommu *iommu;
2425	u16 devid;
2426
2427	if (!check_device(dev))
2428		return;
2429
2430	devid = get_device_id(dev);
 
 
 
2431	iommu = amd_iommu_rlookup_table[devid];
2432
2433	iommu_uninit_device(dev);
2434	iommu_completion_wait(iommu);
2435}
2436
 
 
 
 
 
 
 
 
2437/*****************************************************************************
2438 *
2439 * The next functions belong to the dma_ops mapping/unmapping code.
2440 *
2441 *****************************************************************************/
2442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2443/*
2444 * In the dma_ops path we only have the struct device. This function
2445 * finds the corresponding IOMMU, the protection domain and the
2446 * requestor id for a given device.
2447 * If the device is not yet associated with a domain this is also done
2448 * in this function.
2449 */
2450static struct protection_domain *get_domain(struct device *dev)
2451{
2452	struct protection_domain *domain;
2453	struct iommu_domain *io_domain;
2454
2455	if (!check_device(dev))
2456		return ERR_PTR(-EINVAL);
2457
2458	io_domain = iommu_get_domain_for_dev(dev);
2459	if (!io_domain)
2460		return NULL;
2461
2462	domain = to_pdomain(io_domain);
2463	if (!dma_ops_domain(domain))
2464		return ERR_PTR(-EBUSY);
2465
2466	return domain;
2467}
2468
2469static void update_device_table(struct protection_domain *domain)
2470{
2471	struct iommu_dev_data *dev_data;
2472
2473	list_for_each_entry(dev_data, &domain->dev_list, list)
2474		set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
 
 
 
 
 
 
 
2475}
2476
2477static void update_domain(struct protection_domain *domain)
2478{
2479	if (!domain->updated)
2480		return;
2481
2482	update_device_table(domain);
2483
2484	domain_flush_devices(domain);
2485	domain_flush_tlb_pde(domain);
2486
2487	domain->updated = false;
2488}
2489
2490/*
2491 * This function fetches the PTE for a given address in the aperture
2492 */
2493static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
2494			    unsigned long address)
2495{
2496	struct aperture_range *aperture;
2497	u64 *pte, *pte_page;
2498
2499	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2500	if (!aperture)
2501		return NULL;
2502
2503	pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2504	if (!pte) {
2505		pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
2506				GFP_ATOMIC);
2507		aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
2508	} else
2509		pte += PM_LEVEL_INDEX(0, address);
2510
2511	update_domain(&dom->domain);
2512
2513	return pte;
2514}
2515
2516/*
2517 * This is the generic map function. It maps one 4kb page at paddr to
2518 * the given address in the DMA address space for the domain.
2519 */
2520static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
2521				     unsigned long address,
2522				     phys_addr_t paddr,
2523				     int direction)
2524{
2525	u64 *pte, __pte;
2526
2527	WARN_ON(address > dom->aperture_size);
2528
2529	paddr &= PAGE_MASK;
2530
2531	pte  = dma_ops_get_pte(dom, address);
2532	if (!pte)
2533		return DMA_ERROR_CODE;
2534
2535	__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
2536
2537	if (direction == DMA_TO_DEVICE)
2538		__pte |= IOMMU_PTE_IR;
2539	else if (direction == DMA_FROM_DEVICE)
2540		__pte |= IOMMU_PTE_IW;
2541	else if (direction == DMA_BIDIRECTIONAL)
2542		__pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
2543
2544	WARN_ON_ONCE(*pte);
2545
2546	*pte = __pte;
2547
2548	return (dma_addr_t)address;
2549}
2550
2551/*
2552 * The generic unmapping function for on page in the DMA address space.
2553 */
2554static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
2555				 unsigned long address)
2556{
2557	struct aperture_range *aperture;
2558	u64 *pte;
2559
2560	if (address >= dom->aperture_size)
2561		return;
2562
2563	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2564	if (!aperture)
2565		return;
2566
2567	pte  = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2568	if (!pte)
2569		return;
2570
2571	pte += PM_LEVEL_INDEX(0, address);
2572
2573	WARN_ON_ONCE(!*pte);
2574
2575	*pte = 0ULL;
2576}
2577
2578/*
2579 * This function contains common code for mapping of a physically
2580 * contiguous memory region into DMA address space. It is used by all
2581 * mapping functions provided with this IOMMU driver.
2582 * Must be called with the domain lock held.
2583 */
2584static dma_addr_t __map_single(struct device *dev,
2585			       struct dma_ops_domain *dma_dom,
2586			       phys_addr_t paddr,
2587			       size_t size,
2588			       int dir,
2589			       bool align,
2590			       u64 dma_mask)
2591{
2592	dma_addr_t offset = paddr & ~PAGE_MASK;
2593	dma_addr_t address, start, ret;
2594	unsigned int pages;
2595	unsigned long align_mask = 0;
2596	int i;
2597
2598	pages = iommu_num_pages(paddr, size, PAGE_SIZE);
2599	paddr &= PAGE_MASK;
2600
2601	INC_STATS_COUNTER(total_map_requests);
2602
2603	if (pages > 1)
2604		INC_STATS_COUNTER(cross_page);
2605
2606	if (align)
2607		align_mask = (1UL << get_order(size)) - 1;
2608
2609	address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
2610					  dma_mask);
2611
2612	if (address == DMA_ERROR_CODE)
2613		goto out;
2614
 
 
2615	start = address;
2616	for (i = 0; i < pages; ++i) {
2617		ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
2618		if (ret == DMA_ERROR_CODE)
 
2619			goto out_unmap;
2620
2621		paddr += PAGE_SIZE;
2622		start += PAGE_SIZE;
2623	}
2624	address += offset;
2625
2626	ADD_STATS_COUNTER(alloced_io_mem, size);
2627
2628	if (unlikely(amd_iommu_np_cache)) {
2629		domain_flush_pages(&dma_dom->domain, address, size);
2630		domain_flush_complete(&dma_dom->domain);
2631	}
2632
2633out:
2634	return address;
2635
2636out_unmap:
2637
2638	for (--i; i >= 0; --i) {
2639		start -= PAGE_SIZE;
2640		dma_ops_domain_unmap(dma_dom, start);
2641	}
2642
2643	dma_ops_free_addresses(dma_dom, address, pages);
 
 
 
2644
2645	return DMA_ERROR_CODE;
2646}
2647
2648/*
2649 * Does the reverse of the __map_single function. Must be called with
2650 * the domain lock held too
2651 */
2652static void __unmap_single(struct dma_ops_domain *dma_dom,
2653			   dma_addr_t dma_addr,
2654			   size_t size,
2655			   int dir)
2656{
2657	dma_addr_t flush_addr;
2658	dma_addr_t i, start;
2659	unsigned int pages;
2660
2661	if ((dma_addr == DMA_ERROR_CODE) ||
2662	    (dma_addr + size > dma_dom->aperture_size))
2663		return;
2664
2665	flush_addr = dma_addr;
2666	pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
2667	dma_addr &= PAGE_MASK;
2668	start = dma_addr;
2669
2670	for (i = 0; i < pages; ++i) {
2671		dma_ops_domain_unmap(dma_dom, start);
2672		start += PAGE_SIZE;
2673	}
2674
2675	SUB_STATS_COUNTER(alloced_io_mem, size);
2676
2677	dma_ops_free_addresses(dma_dom, dma_addr, pages);
 
 
 
 
2678}
2679
2680/*
2681 * The exported map_single function for dma_ops.
2682 */
2683static dma_addr_t map_page(struct device *dev, struct page *page,
2684			   unsigned long offset, size_t size,
2685			   enum dma_data_direction dir,
2686			   struct dma_attrs *attrs)
2687{
2688	phys_addr_t paddr = page_to_phys(page) + offset;
2689	struct protection_domain *domain;
 
2690	u64 dma_mask;
2691
2692	INC_STATS_COUNTER(cnt_map_single);
2693
2694	domain = get_domain(dev);
2695	if (PTR_ERR(domain) == -EINVAL)
2696		return (dma_addr_t)paddr;
2697	else if (IS_ERR(domain))
2698		return DMA_ERROR_CODE;
2699
2700	dma_mask = *dev->dma_mask;
 
2701
2702	return __map_single(dev, domain->priv, paddr, size, dir, false,
2703			    dma_mask);
2704}
2705
2706/*
2707 * The exported unmap_single function for dma_ops.
2708 */
2709static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2710		       enum dma_data_direction dir, struct dma_attrs *attrs)
2711{
2712	struct protection_domain *domain;
2713
2714	INC_STATS_COUNTER(cnt_unmap_single);
2715
2716	domain = get_domain(dev);
2717	if (IS_ERR(domain))
2718		return;
2719
2720	__unmap_single(domain->priv, dma_addr, size, dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2721}
2722
2723/*
2724 * The exported map_sg function for dma_ops (handles scatter-gather
2725 * lists).
2726 */
2727static int map_sg(struct device *dev, struct scatterlist *sglist,
2728		  int nelems, enum dma_data_direction dir,
2729		  struct dma_attrs *attrs)
2730{
 
2731	struct protection_domain *domain;
2732	int i;
2733	struct scatterlist *s;
2734	phys_addr_t paddr;
2735	int mapped_elems = 0;
2736	u64 dma_mask;
2737
2738	INC_STATS_COUNTER(cnt_map_sg);
2739
2740	domain = get_domain(dev);
2741	if (IS_ERR(domain))
2742		return 0;
2743
 
2744	dma_mask = *dev->dma_mask;
2745
 
 
 
 
 
 
 
 
 
2746	for_each_sg(sglist, s, nelems, i) {
2747		paddr = sg_phys(s);
2748
2749		s->dma_address = __map_single(dev, domain->priv,
2750					      paddr, s->length, dir, false,
2751					      dma_mask);
2752
2753		if (s->dma_address) {
2754			s->dma_length = s->length;
2755			mapped_elems++;
2756		} else
2757			goto unmap;
2758	}
2759
2760	return mapped_elems;
2761
2762unmap:
2763	for_each_sg(sglist, s, mapped_elems, i) {
2764		if (s->dma_address)
2765			__unmap_single(domain->priv, s->dma_address,
2766				       s->dma_length, dir);
2767		s->dma_address = s->dma_length = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2768	}
2769
 
 
 
 
2770	return 0;
2771}
2772
2773/*
2774 * The exported map_sg function for dma_ops (handles scatter-gather
2775 * lists).
2776 */
2777static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2778		     int nelems, enum dma_data_direction dir,
2779		     struct dma_attrs *attrs)
2780{
2781	struct protection_domain *domain;
2782	struct scatterlist *s;
2783	int i;
2784
2785	INC_STATS_COUNTER(cnt_unmap_sg);
2786
2787	domain = get_domain(dev);
2788	if (IS_ERR(domain))
2789		return;
2790
2791	for_each_sg(sglist, s, nelems, i) {
2792		__unmap_single(domain->priv, s->dma_address,
2793			       s->dma_length, dir);
2794		s->dma_address = s->dma_length = 0;
2795	}
2796}
2797
2798/*
2799 * The exported alloc_coherent function for dma_ops.
2800 */
2801static void *alloc_coherent(struct device *dev, size_t size,
2802			    dma_addr_t *dma_addr, gfp_t flag,
2803			    struct dma_attrs *attrs)
2804{
2805	u64 dma_mask = dev->coherent_dma_mask;
2806	struct protection_domain *domain;
 
2807	struct page *page;
2808
2809	INC_STATS_COUNTER(cnt_alloc_coherent);
2810
2811	domain = get_domain(dev);
2812	if (PTR_ERR(domain) == -EINVAL) {
2813		page = alloc_pages(flag, get_order(size));
2814		*dma_addr = page_to_phys(page);
2815		return page_address(page);
2816	} else if (IS_ERR(domain))
2817		return NULL;
2818
 
2819	size	  = PAGE_ALIGN(size);
2820	dma_mask  = dev->coherent_dma_mask;
2821	flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2822	flag     |= __GFP_ZERO;
2823
2824	page = alloc_pages(flag | __GFP_NOWARN,  get_order(size));
2825	if (!page) {
2826		if (!gfpflags_allow_blocking(flag))
2827			return NULL;
2828
2829		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
2830						 get_order(size));
2831		if (!page)
2832			return NULL;
2833	}
2834
2835	if (!dma_mask)
2836		dma_mask = *dev->dma_mask;
2837
2838	*dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
2839				 size, DMA_BIDIRECTIONAL, true, dma_mask);
2840
2841	if (*dma_addr == DMA_ERROR_CODE)
2842		goto out_free;
2843
2844	return page_address(page);
2845
2846out_free:
2847
2848	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2849		__free_pages(page, get_order(size));
2850
2851	return NULL;
2852}
2853
2854/*
2855 * The exported free_coherent function for dma_ops.
2856 */
2857static void free_coherent(struct device *dev, size_t size,
2858			  void *virt_addr, dma_addr_t dma_addr,
2859			  struct dma_attrs *attrs)
2860{
2861	struct protection_domain *domain;
 
2862	struct page *page;
2863
2864	INC_STATS_COUNTER(cnt_free_coherent);
2865
2866	page = virt_to_page(virt_addr);
2867	size = PAGE_ALIGN(size);
2868
2869	domain = get_domain(dev);
2870	if (IS_ERR(domain))
2871		goto free_mem;
2872
2873	__unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
 
 
2874
2875free_mem:
2876	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2877		__free_pages(page, get_order(size));
2878}
2879
2880/*
2881 * This function is called by the DMA layer to find out if we can handle a
2882 * particular device. It is part of the dma_ops.
2883 */
2884static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2885{
2886	return check_device(dev);
2887}
2888
2889static int set_dma_mask(struct device *dev, u64 mask)
 
 
 
 
 
 
 
 
 
 
2890{
2891	struct protection_domain *domain;
2892	int max_apertures = 1;
2893
2894	domain = get_domain(dev);
2895	if (IS_ERR(domain))
2896		return PTR_ERR(domain);
2897
2898	if (mask == DMA_BIT_MASK(64))
2899		max_apertures = 8;
2900	else if (mask > DMA_BIT_MASK(32))
2901		max_apertures = 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2902
2903	/*
2904	 * To prevent lock contention it doesn't make sense to allocate more
2905	 * apertures than online cpus
2906	 */
2907	if (max_apertures > num_online_cpus())
2908		max_apertures = num_online_cpus();
2909
2910	if (dma_ops_domain_alloc_apertures(domain->priv, max_apertures))
2911		dev_err(dev, "Can't allocate %d iommu apertures\n",
2912			max_apertures);
 
 
 
 
 
 
 
 
 
 
 
 
2913
2914	return 0;
2915}
2916
2917static struct dma_map_ops amd_iommu_dma_ops = {
2918	.alloc		= alloc_coherent,
2919	.free		= free_coherent,
2920	.map_page	= map_page,
2921	.unmap_page	= unmap_page,
2922	.map_sg		= map_sg,
2923	.unmap_sg	= unmap_sg,
2924	.dma_supported	= amd_iommu_dma_supported,
2925	.set_dma_mask	= set_dma_mask,
2926};
2927
2928int __init amd_iommu_init_api(void)
2929{
2930	return bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2931}
2932
2933int __init amd_iommu_init_dma_ops(void)
2934{
 
 
 
2935	swiotlb        = iommu_pass_through ? 1 : 0;
2936	iommu_detected = 1;
2937
2938	/*
2939	 * In case we don't initialize SWIOTLB (actually the common case
2940	 * when AMD IOMMU is enabled), make sure there are global
2941	 * dma_ops set as a fall-back for devices not handled by this
2942	 * driver (for example non-PCI devices).
2943	 */
2944	if (!swiotlb)
2945		dma_ops = &nommu_dma_ops;
2946
2947	amd_iommu_stats_init();
2948
2949	if (amd_iommu_unmap_flush)
2950		pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
2951	else
2952		pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
2953
2954	return 0;
 
2955}
2956
2957/*****************************************************************************
2958 *
2959 * The following functions belong to the exported interface of AMD IOMMU
2960 *
2961 * This interface allows access to lower level functions of the IOMMU
2962 * like protection domain handling and assignement of devices to domains
2963 * which is not possible with the dma_ops interface.
2964 *
2965 *****************************************************************************/
2966
2967static void cleanup_domain(struct protection_domain *domain)
2968{
2969	struct iommu_dev_data *entry;
2970	unsigned long flags;
2971
2972	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2973
2974	while (!list_empty(&domain->dev_list)) {
2975		entry = list_first_entry(&domain->dev_list,
2976					 struct iommu_dev_data, list);
2977		__detach_device(entry);
2978	}
2979
2980	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2981}
2982
2983static void protection_domain_free(struct protection_domain *domain)
2984{
2985	if (!domain)
2986		return;
2987
2988	del_domain_from_list(domain);
2989
2990	if (domain->id)
2991		domain_id_free(domain->id);
2992
2993	kfree(domain);
2994}
2995
2996static int protection_domain_init(struct protection_domain *domain)
2997{
2998	spin_lock_init(&domain->lock);
2999	mutex_init(&domain->api_lock);
3000	domain->id = domain_id_alloc();
3001	if (!domain->id)
3002		return -ENOMEM;
3003	INIT_LIST_HEAD(&domain->dev_list);
3004
3005	return 0;
3006}
3007
3008static struct protection_domain *protection_domain_alloc(void)
3009{
3010	struct protection_domain *domain;
3011
3012	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
3013	if (!domain)
3014		return NULL;
3015
3016	if (protection_domain_init(domain))
3017		goto out_err;
3018
3019	add_domain_to_list(domain);
3020
3021	return domain;
3022
3023out_err:
3024	kfree(domain);
3025
3026	return NULL;
3027}
3028
3029static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
3030{
3031	struct protection_domain *pdomain;
3032	struct dma_ops_domain *dma_domain;
3033
3034	switch (type) {
3035	case IOMMU_DOMAIN_UNMANAGED:
3036		pdomain = protection_domain_alloc();
3037		if (!pdomain)
3038			return NULL;
3039
3040		pdomain->mode    = PAGE_MODE_3_LEVEL;
3041		pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
3042		if (!pdomain->pt_root) {
3043			protection_domain_free(pdomain);
3044			return NULL;
3045		}
3046
3047		pdomain->domain.geometry.aperture_start = 0;
3048		pdomain->domain.geometry.aperture_end   = ~0ULL;
3049		pdomain->domain.geometry.force_aperture = true;
3050
3051		break;
3052	case IOMMU_DOMAIN_DMA:
3053		dma_domain = dma_ops_domain_alloc();
3054		if (!dma_domain) {
3055			pr_err("AMD-Vi: Failed to allocate\n");
3056			return NULL;
3057		}
3058		pdomain = &dma_domain->domain;
3059		break;
3060	case IOMMU_DOMAIN_IDENTITY:
3061		pdomain = protection_domain_alloc();
3062		if (!pdomain)
3063			return NULL;
3064
3065		pdomain->mode = PAGE_MODE_NONE;
3066		break;
3067	default:
3068		return NULL;
3069	}
3070
3071	return &pdomain->domain;
3072}
3073
3074static void amd_iommu_domain_free(struct iommu_domain *dom)
3075{
3076	struct protection_domain *domain;
3077
3078	if (!dom)
3079		return;
3080
3081	domain = to_pdomain(dom);
3082
3083	if (domain->dev_cnt > 0)
3084		cleanup_domain(domain);
3085
3086	BUG_ON(domain->dev_cnt != 0);
3087
3088	if (domain->mode != PAGE_MODE_NONE)
3089		free_pagetable(domain);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3090
3091	if (domain->flags & PD_IOMMUV2_MASK)
3092		free_gcr3_table(domain);
3093
3094	protection_domain_free(domain);
 
 
3095}
3096
3097static void amd_iommu_detach_device(struct iommu_domain *dom,
3098				    struct device *dev)
3099{
3100	struct iommu_dev_data *dev_data = dev->archdata.iommu;
3101	struct amd_iommu *iommu;
3102	u16 devid;
3103
3104	if (!check_device(dev))
3105		return;
3106
3107	devid = get_device_id(dev);
 
 
3108
3109	if (dev_data->domain != NULL)
3110		detach_device(dev);
3111
3112	iommu = amd_iommu_rlookup_table[devid];
3113	if (!iommu)
3114		return;
3115
 
 
 
 
 
 
3116	iommu_completion_wait(iommu);
3117}
3118
3119static int amd_iommu_attach_device(struct iommu_domain *dom,
3120				   struct device *dev)
3121{
3122	struct protection_domain *domain = to_pdomain(dom);
3123	struct iommu_dev_data *dev_data;
3124	struct amd_iommu *iommu;
3125	int ret;
3126
3127	if (!check_device(dev))
3128		return -EINVAL;
3129
3130	dev_data = dev->archdata.iommu;
3131
3132	iommu = amd_iommu_rlookup_table[dev_data->devid];
3133	if (!iommu)
3134		return -EINVAL;
3135
3136	if (dev_data->domain)
3137		detach_device(dev);
3138
3139	ret = attach_device(dev, domain);
3140
 
 
 
 
 
 
 
 
 
3141	iommu_completion_wait(iommu);
3142
3143	return ret;
3144}
3145
3146static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3147			 phys_addr_t paddr, size_t page_size, int iommu_prot)
3148{
3149	struct protection_domain *domain = to_pdomain(dom);
3150	int prot = 0;
3151	int ret;
3152
3153	if (domain->mode == PAGE_MODE_NONE)
3154		return -EINVAL;
3155
3156	if (iommu_prot & IOMMU_READ)
3157		prot |= IOMMU_PROT_IR;
3158	if (iommu_prot & IOMMU_WRITE)
3159		prot |= IOMMU_PROT_IW;
3160
3161	mutex_lock(&domain->api_lock);
3162	ret = iommu_map_page(domain, iova, paddr, prot, page_size);
3163	mutex_unlock(&domain->api_lock);
3164
3165	return ret;
3166}
3167
3168static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3169			   size_t page_size)
3170{
3171	struct protection_domain *domain = to_pdomain(dom);
3172	size_t unmap_size;
3173
3174	if (domain->mode == PAGE_MODE_NONE)
3175		return -EINVAL;
3176
3177	mutex_lock(&domain->api_lock);
3178	unmap_size = iommu_unmap_page(domain, iova, page_size);
3179	mutex_unlock(&domain->api_lock);
3180
3181	domain_flush_tlb_pde(domain);
3182
3183	return unmap_size;
3184}
3185
3186static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3187					  dma_addr_t iova)
3188{
3189	struct protection_domain *domain = to_pdomain(dom);
3190	unsigned long offset_mask, pte_pgsize;
3191	u64 *pte, __pte;
3192
3193	if (domain->mode == PAGE_MODE_NONE)
3194		return iova;
3195
3196	pte = fetch_pte(domain, iova, &pte_pgsize);
3197
3198	if (!pte || !IOMMU_PTE_PRESENT(*pte))
3199		return 0;
3200
3201	offset_mask = pte_pgsize - 1;
3202	__pte	    = *pte & PM_ADDR_MASK;
3203
3204	return (__pte & ~offset_mask) | (iova & offset_mask);
3205}
3206
3207static bool amd_iommu_capable(enum iommu_cap cap)
3208{
3209	switch (cap) {
3210	case IOMMU_CAP_CACHE_COHERENCY:
3211		return true;
3212	case IOMMU_CAP_INTR_REMAP:
3213		return (irq_remapping_enabled == 1);
3214	case IOMMU_CAP_NOEXEC:
3215		return false;
3216	}
3217
3218	return false;
3219}
3220
3221static void amd_iommu_get_dm_regions(struct device *dev,
3222				     struct list_head *head)
3223{
3224	struct unity_map_entry *entry;
3225	u16 devid;
3226
3227	devid = get_device_id(dev);
 
 
3228
3229	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3230		struct iommu_dm_region *region;
3231
3232		if (devid < entry->devid_start || devid > entry->devid_end)
3233			continue;
3234
3235		region = kzalloc(sizeof(*region), GFP_KERNEL);
3236		if (!region) {
3237			pr_err("Out of memory allocating dm-regions for %s\n",
3238				dev_name(dev));
3239			return;
3240		}
3241
3242		region->start = entry->address_start;
3243		region->length = entry->address_end - entry->address_start;
3244		if (entry->prot & IOMMU_PROT_IR)
3245			region->prot |= IOMMU_READ;
3246		if (entry->prot & IOMMU_PROT_IW)
3247			region->prot |= IOMMU_WRITE;
3248
3249		list_add_tail(&region->list, head);
3250	}
3251}
3252
3253static void amd_iommu_put_dm_regions(struct device *dev,
3254				     struct list_head *head)
3255{
3256	struct iommu_dm_region *entry, *next;
3257
3258	list_for_each_entry_safe(entry, next, head, list)
3259		kfree(entry);
3260}
3261
 
 
 
 
 
 
 
 
 
 
 
 
 
3262static const struct iommu_ops amd_iommu_ops = {
3263	.capable = amd_iommu_capable,
3264	.domain_alloc = amd_iommu_domain_alloc,
3265	.domain_free  = amd_iommu_domain_free,
3266	.attach_dev = amd_iommu_attach_device,
3267	.detach_dev = amd_iommu_detach_device,
3268	.map = amd_iommu_map,
3269	.unmap = amd_iommu_unmap,
3270	.map_sg = default_iommu_map_sg,
3271	.iova_to_phys = amd_iommu_iova_to_phys,
3272	.add_device = amd_iommu_add_device,
3273	.remove_device = amd_iommu_remove_device,
3274	.device_group = pci_device_group,
3275	.get_dm_regions = amd_iommu_get_dm_regions,
3276	.put_dm_regions = amd_iommu_put_dm_regions,
 
3277	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
3278};
3279
3280/*****************************************************************************
3281 *
3282 * The next functions do a basic initialization of IOMMU for pass through
3283 * mode
3284 *
3285 * In passthrough mode the IOMMU is initialized and enabled but not used for
3286 * DMA-API translation.
3287 *
3288 *****************************************************************************/
3289
3290/* IOMMUv2 specific functions */
3291int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3292{
3293	return atomic_notifier_chain_register(&ppr_notifier, nb);
3294}
3295EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3296
3297int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3298{
3299	return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3300}
3301EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
3302
3303void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3304{
3305	struct protection_domain *domain = to_pdomain(dom);
3306	unsigned long flags;
3307
3308	spin_lock_irqsave(&domain->lock, flags);
3309
3310	/* Update data structure */
3311	domain->mode    = PAGE_MODE_NONE;
3312	domain->updated = true;
3313
3314	/* Make changes visible to IOMMUs */
3315	update_domain(domain);
3316
3317	/* Page-table is not visible to IOMMU anymore, so free it */
3318	free_pagetable(domain);
3319
3320	spin_unlock_irqrestore(&domain->lock, flags);
3321}
3322EXPORT_SYMBOL(amd_iommu_domain_direct_map);
3323
3324int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3325{
3326	struct protection_domain *domain = to_pdomain(dom);
3327	unsigned long flags;
3328	int levels, ret;
3329
3330	if (pasids <= 0 || pasids > (PASID_MASK + 1))
3331		return -EINVAL;
3332
3333	/* Number of GCR3 table levels required */
3334	for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3335		levels += 1;
3336
3337	if (levels > amd_iommu_max_glx_val)
3338		return -EINVAL;
3339
3340	spin_lock_irqsave(&domain->lock, flags);
3341
3342	/*
3343	 * Save us all sanity checks whether devices already in the
3344	 * domain support IOMMUv2. Just force that the domain has no
3345	 * devices attached when it is switched into IOMMUv2 mode.
3346	 */
3347	ret = -EBUSY;
3348	if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3349		goto out;
3350
3351	ret = -ENOMEM;
3352	domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3353	if (domain->gcr3_tbl == NULL)
3354		goto out;
3355
3356	domain->glx      = levels;
3357	domain->flags   |= PD_IOMMUV2_MASK;
3358	domain->updated  = true;
3359
3360	update_domain(domain);
3361
3362	ret = 0;
3363
3364out:
3365	spin_unlock_irqrestore(&domain->lock, flags);
3366
3367	return ret;
3368}
3369EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
3370
3371static int __flush_pasid(struct protection_domain *domain, int pasid,
3372			 u64 address, bool size)
3373{
3374	struct iommu_dev_data *dev_data;
3375	struct iommu_cmd cmd;
3376	int i, ret;
3377
3378	if (!(domain->flags & PD_IOMMUV2_MASK))
3379		return -EINVAL;
3380
3381	build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3382
3383	/*
3384	 * IOMMU TLB needs to be flushed before Device TLB to
3385	 * prevent device TLB refill from IOMMU TLB
3386	 */
3387	for (i = 0; i < amd_iommus_present; ++i) {
3388		if (domain->dev_iommu[i] == 0)
3389			continue;
3390
3391		ret = iommu_queue_command(amd_iommus[i], &cmd);
3392		if (ret != 0)
3393			goto out;
3394	}
3395
3396	/* Wait until IOMMU TLB flushes are complete */
3397	domain_flush_complete(domain);
3398
3399	/* Now flush device TLBs */
3400	list_for_each_entry(dev_data, &domain->dev_list, list) {
3401		struct amd_iommu *iommu;
3402		int qdep;
3403
3404		/*
3405		   There might be non-IOMMUv2 capable devices in an IOMMUv2
3406		 * domain.
3407		 */
3408		if (!dev_data->ats.enabled)
3409			continue;
3410
3411		qdep  = dev_data->ats.qdep;
3412		iommu = amd_iommu_rlookup_table[dev_data->devid];
3413
3414		build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3415				      qdep, address, size);
3416
3417		ret = iommu_queue_command(iommu, &cmd);
3418		if (ret != 0)
3419			goto out;
3420	}
3421
3422	/* Wait until all device TLBs are flushed */
3423	domain_flush_complete(domain);
3424
3425	ret = 0;
3426
3427out:
3428
3429	return ret;
3430}
3431
3432static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3433				  u64 address)
3434{
3435	INC_STATS_COUNTER(invalidate_iotlb);
3436
3437	return __flush_pasid(domain, pasid, address, false);
3438}
3439
3440int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3441			 u64 address)
3442{
3443	struct protection_domain *domain = to_pdomain(dom);
3444	unsigned long flags;
3445	int ret;
3446
3447	spin_lock_irqsave(&domain->lock, flags);
3448	ret = __amd_iommu_flush_page(domain, pasid, address);
3449	spin_unlock_irqrestore(&domain->lock, flags);
3450
3451	return ret;
3452}
3453EXPORT_SYMBOL(amd_iommu_flush_page);
3454
3455static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3456{
3457	INC_STATS_COUNTER(invalidate_iotlb_all);
3458
3459	return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3460			     true);
3461}
3462
3463int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3464{
3465	struct protection_domain *domain = to_pdomain(dom);
3466	unsigned long flags;
3467	int ret;
3468
3469	spin_lock_irqsave(&domain->lock, flags);
3470	ret = __amd_iommu_flush_tlb(domain, pasid);
3471	spin_unlock_irqrestore(&domain->lock, flags);
3472
3473	return ret;
3474}
3475EXPORT_SYMBOL(amd_iommu_flush_tlb);
3476
3477static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
3478{
3479	int index;
3480	u64 *pte;
3481
3482	while (true) {
3483
3484		index = (pasid >> (9 * level)) & 0x1ff;
3485		pte   = &root[index];
3486
3487		if (level == 0)
3488			break;
3489
3490		if (!(*pte & GCR3_VALID)) {
3491			if (!alloc)
3492				return NULL;
3493
3494			root = (void *)get_zeroed_page(GFP_ATOMIC);
3495			if (root == NULL)
3496				return NULL;
3497
3498			*pte = __pa(root) | GCR3_VALID;
3499		}
3500
3501		root = __va(*pte & PAGE_MASK);
3502
3503		level -= 1;
3504	}
3505
3506	return pte;
3507}
3508
3509static int __set_gcr3(struct protection_domain *domain, int pasid,
3510		      unsigned long cr3)
3511{
3512	u64 *pte;
3513
3514	if (domain->mode != PAGE_MODE_NONE)
3515		return -EINVAL;
3516
3517	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3518	if (pte == NULL)
3519		return -ENOMEM;
3520
3521	*pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3522
3523	return __amd_iommu_flush_tlb(domain, pasid);
3524}
3525
3526static int __clear_gcr3(struct protection_domain *domain, int pasid)
3527{
3528	u64 *pte;
3529
3530	if (domain->mode != PAGE_MODE_NONE)
3531		return -EINVAL;
3532
3533	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3534	if (pte == NULL)
3535		return 0;
3536
3537	*pte = 0;
3538
3539	return __amd_iommu_flush_tlb(domain, pasid);
3540}
3541
3542int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3543			      unsigned long cr3)
3544{
3545	struct protection_domain *domain = to_pdomain(dom);
3546	unsigned long flags;
3547	int ret;
3548
3549	spin_lock_irqsave(&domain->lock, flags);
3550	ret = __set_gcr3(domain, pasid, cr3);
3551	spin_unlock_irqrestore(&domain->lock, flags);
3552
3553	return ret;
3554}
3555EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3556
3557int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3558{
3559	struct protection_domain *domain = to_pdomain(dom);
3560	unsigned long flags;
3561	int ret;
3562
3563	spin_lock_irqsave(&domain->lock, flags);
3564	ret = __clear_gcr3(domain, pasid);
3565	spin_unlock_irqrestore(&domain->lock, flags);
3566
3567	return ret;
3568}
3569EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
3570
3571int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3572			   int status, int tag)
3573{
3574	struct iommu_dev_data *dev_data;
3575	struct amd_iommu *iommu;
3576	struct iommu_cmd cmd;
3577
3578	INC_STATS_COUNTER(complete_ppr);
3579
3580	dev_data = get_dev_data(&pdev->dev);
3581	iommu    = amd_iommu_rlookup_table[dev_data->devid];
3582
3583	build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3584			   tag, dev_data->pri_tlp);
3585
3586	return iommu_queue_command(iommu, &cmd);
3587}
3588EXPORT_SYMBOL(amd_iommu_complete_ppr);
3589
3590struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
3591{
3592	struct protection_domain *pdomain;
3593
3594	pdomain = get_domain(&pdev->dev);
3595	if (IS_ERR(pdomain))
3596		return NULL;
3597
3598	/* Only return IOMMUv2 domains */
3599	if (!(pdomain->flags & PD_IOMMUV2_MASK))
3600		return NULL;
3601
3602	return &pdomain->domain;
3603}
3604EXPORT_SYMBOL(amd_iommu_get_v2_domain);
3605
3606void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3607{
3608	struct iommu_dev_data *dev_data;
3609
3610	if (!amd_iommu_v2_supported())
3611		return;
3612
3613	dev_data = get_dev_data(&pdev->dev);
3614	dev_data->errata |= (1 << erratum);
3615}
3616EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
3617
3618int amd_iommu_device_info(struct pci_dev *pdev,
3619                          struct amd_iommu_device_info *info)
3620{
3621	int max_pasids;
3622	int pos;
3623
3624	if (pdev == NULL || info == NULL)
3625		return -EINVAL;
3626
3627	if (!amd_iommu_v2_supported())
3628		return -EINVAL;
3629
3630	memset(info, 0, sizeof(*info));
3631
3632	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3633	if (pos)
3634		info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3635
3636	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3637	if (pos)
3638		info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
3639
3640	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
3641	if (pos) {
3642		int features;
3643
3644		max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
3645		max_pasids = min(max_pasids, (1 << 20));
3646
3647		info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
3648		info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
3649
3650		features = pci_pasid_features(pdev);
3651		if (features & PCI_PASID_CAP_EXEC)
3652			info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
3653		if (features & PCI_PASID_CAP_PRIV)
3654			info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
3655	}
3656
3657	return 0;
3658}
3659EXPORT_SYMBOL(amd_iommu_device_info);
3660
3661#ifdef CONFIG_IRQ_REMAP
3662
3663/*****************************************************************************
3664 *
3665 * Interrupt Remapping Implementation
3666 *
3667 *****************************************************************************/
3668
3669union irte {
3670	u32 val;
3671	struct {
3672		u32 valid	: 1,
3673		    no_fault	: 1,
3674		    int_type	: 3,
3675		    rq_eoi	: 1,
3676		    dm		: 1,
3677		    rsvd_1	: 1,
3678		    destination	: 8,
3679		    vector	: 8,
3680		    rsvd_2	: 8;
3681	} fields;
3682};
3683
3684struct irq_2_irte {
3685	u16 devid; /* Device ID for IRTE table */
3686	u16 index; /* Index into IRTE table*/
3687};
3688
3689struct amd_ir_data {
3690	struct irq_2_irte			irq_2_irte;
3691	union irte				irte_entry;
3692	union {
3693		struct msi_msg			msi_entry;
3694	};
3695};
3696
3697static struct irq_chip amd_ir_chip;
3698
3699#define DTE_IRQ_PHYS_ADDR_MASK	(((1ULL << 45)-1) << 6)
3700#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
3701#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
3702#define DTE_IRQ_REMAP_ENABLE    1ULL
3703
3704static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3705{
3706	u64 dte;
3707
3708	dte	= amd_iommu_dev_table[devid].data[2];
3709	dte	&= ~DTE_IRQ_PHYS_ADDR_MASK;
3710	dte	|= virt_to_phys(table->table);
3711	dte	|= DTE_IRQ_REMAP_INTCTL;
3712	dte	|= DTE_IRQ_TABLE_LEN;
3713	dte	|= DTE_IRQ_REMAP_ENABLE;
3714
3715	amd_iommu_dev_table[devid].data[2] = dte;
3716}
3717
3718#define IRTE_ALLOCATED (~1U)
3719
3720static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3721{
3722	struct irq_remap_table *table = NULL;
3723	struct amd_iommu *iommu;
3724	unsigned long flags;
3725	u16 alias;
3726
3727	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3728
3729	iommu = amd_iommu_rlookup_table[devid];
3730	if (!iommu)
3731		goto out_unlock;
3732
3733	table = irq_lookup_table[devid];
3734	if (table)
3735		goto out;
3736
3737	alias = amd_iommu_alias_table[devid];
3738	table = irq_lookup_table[alias];
3739	if (table) {
3740		irq_lookup_table[devid] = table;
3741		set_dte_irq_entry(devid, table);
3742		iommu_flush_dte(iommu, devid);
3743		goto out;
3744	}
3745
3746	/* Nothing there yet, allocate new irq remapping table */
3747	table = kzalloc(sizeof(*table), GFP_ATOMIC);
3748	if (!table)
3749		goto out;
3750
3751	/* Initialize table spin-lock */
3752	spin_lock_init(&table->lock);
3753
3754	if (ioapic)
3755		/* Keep the first 32 indexes free for IOAPIC interrupts */
3756		table->min_index = 32;
3757
3758	table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
3759	if (!table->table) {
3760		kfree(table);
3761		table = NULL;
3762		goto out;
3763	}
3764
3765	memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
 
 
 
 
 
3766
3767	if (ioapic) {
3768		int i;
3769
3770		for (i = 0; i < 32; ++i)
3771			table->table[i] = IRTE_ALLOCATED;
3772	}
3773
3774	irq_lookup_table[devid] = table;
3775	set_dte_irq_entry(devid, table);
3776	iommu_flush_dte(iommu, devid);
3777	if (devid != alias) {
3778		irq_lookup_table[alias] = table;
3779		set_dte_irq_entry(alias, table);
3780		iommu_flush_dte(iommu, alias);
3781	}
3782
3783out:
3784	iommu_completion_wait(iommu);
3785
3786out_unlock:
3787	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3788
3789	return table;
3790}
3791
3792static int alloc_irq_index(u16 devid, int count)
3793{
3794	struct irq_remap_table *table;
3795	unsigned long flags;
3796	int index, c;
 
 
 
 
3797
3798	table = get_irq_table(devid, false);
3799	if (!table)
3800		return -ENODEV;
3801
3802	spin_lock_irqsave(&table->lock, flags);
3803
3804	/* Scan table for free entries */
3805	for (c = 0, index = table->min_index;
3806	     index < MAX_IRQS_PER_TABLE;
3807	     ++index) {
3808		if (table->table[index] == 0)
3809			c += 1;
3810		else
3811			c = 0;
3812
3813		if (c == count)	{
3814			for (; c != 0; --c)
3815				table->table[index - c + 1] = IRTE_ALLOCATED;
3816
3817			index -= count - 1;
3818			goto out;
3819		}
3820	}
3821
3822	index = -ENOSPC;
3823
3824out:
3825	spin_unlock_irqrestore(&table->lock, flags);
3826
3827	return index;
3828}
3829
3830static int modify_irte(u16 devid, int index, union irte irte)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3831{
3832	struct irq_remap_table *table;
3833	struct amd_iommu *iommu;
3834	unsigned long flags;
3835
3836	iommu = amd_iommu_rlookup_table[devid];
3837	if (iommu == NULL)
3838		return -EINVAL;
3839
3840	table = get_irq_table(devid, false);
3841	if (!table)
3842		return -ENOMEM;
3843
3844	spin_lock_irqsave(&table->lock, flags);
3845	table->table[index] = irte.val;
3846	spin_unlock_irqrestore(&table->lock, flags);
3847
3848	iommu_flush_irt(iommu, devid);
3849	iommu_completion_wait(iommu);
3850
3851	return 0;
3852}
3853
3854static void free_irte(u16 devid, int index)
3855{
3856	struct irq_remap_table *table;
3857	struct amd_iommu *iommu;
3858	unsigned long flags;
3859
3860	iommu = amd_iommu_rlookup_table[devid];
3861	if (iommu == NULL)
3862		return;
3863
3864	table = get_irq_table(devid, false);
3865	if (!table)
3866		return;
3867
3868	spin_lock_irqsave(&table->lock, flags);
3869	table->table[index] = 0;
3870	spin_unlock_irqrestore(&table->lock, flags);
3871
3872	iommu_flush_irt(iommu, devid);
3873	iommu_completion_wait(iommu);
3874}
3875
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3876static int get_devid(struct irq_alloc_info *info)
3877{
3878	int devid = -1;
3879
3880	switch (info->type) {
3881	case X86_IRQ_ALLOC_TYPE_IOAPIC:
3882		devid     = get_ioapic_devid(info->ioapic_id);
3883		break;
3884	case X86_IRQ_ALLOC_TYPE_HPET:
3885		devid     = get_hpet_devid(info->hpet_id);
3886		break;
3887	case X86_IRQ_ALLOC_TYPE_MSI:
3888	case X86_IRQ_ALLOC_TYPE_MSIX:
3889		devid = get_device_id(&info->msi_dev->dev);
3890		break;
3891	default:
3892		BUG_ON(1);
3893		break;
3894	}
3895
3896	return devid;
3897}
3898
3899static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
3900{
3901	struct amd_iommu *iommu;
3902	int devid;
3903
3904	if (!info)
3905		return NULL;
3906
3907	devid = get_devid(info);
3908	if (devid >= 0) {
3909		iommu = amd_iommu_rlookup_table[devid];
3910		if (iommu)
3911			return iommu->ir_domain;
3912	}
3913
3914	return NULL;
3915}
3916
3917static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
3918{
3919	struct amd_iommu *iommu;
3920	int devid;
3921
3922	if (!info)
3923		return NULL;
3924
3925	switch (info->type) {
3926	case X86_IRQ_ALLOC_TYPE_MSI:
3927	case X86_IRQ_ALLOC_TYPE_MSIX:
3928		devid = get_device_id(&info->msi_dev->dev);
 
 
 
3929		iommu = amd_iommu_rlookup_table[devid];
3930		if (iommu)
3931			return iommu->msi_domain;
3932		break;
3933	default:
3934		break;
3935	}
3936
3937	return NULL;
3938}
3939
3940struct irq_remap_ops amd_iommu_irq_ops = {
3941	.prepare		= amd_iommu_prepare,
3942	.enable			= amd_iommu_enable,
3943	.disable		= amd_iommu_disable,
3944	.reenable		= amd_iommu_reenable,
3945	.enable_faulting	= amd_iommu_enable_faulting,
3946	.get_ir_irq_domain	= get_ir_irq_domain,
3947	.get_irq_domain		= get_irq_domain,
3948};
3949
3950static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3951				       struct irq_cfg *irq_cfg,
3952				       struct irq_alloc_info *info,
3953				       int devid, int index, int sub_handle)
3954{
3955	struct irq_2_irte *irte_info = &data->irq_2_irte;
3956	struct msi_msg *msg = &data->msi_entry;
3957	union irte *irte = &data->irte_entry;
3958	struct IO_APIC_route_entry *entry;
 
 
 
 
3959
3960	data->irq_2_irte.devid = devid;
3961	data->irq_2_irte.index = index + sub_handle;
3962
3963	/* Setup IRTE for IOMMU */
3964	irte->val = 0;
3965	irte->fields.vector      = irq_cfg->vector;
3966	irte->fields.int_type    = apic->irq_delivery_mode;
3967	irte->fields.destination = irq_cfg->dest_apicid;
3968	irte->fields.dm          = apic->irq_dest_mode;
3969	irte->fields.valid       = 1;
3970
3971	switch (info->type) {
3972	case X86_IRQ_ALLOC_TYPE_IOAPIC:
3973		/* Setup IOAPIC entry */
3974		entry = info->ioapic_entry;
3975		info->ioapic_entry = NULL;
3976		memset(entry, 0, sizeof(*entry));
3977		entry->vector        = index;
3978		entry->mask          = 0;
3979		entry->trigger       = info->ioapic_trigger;
3980		entry->polarity      = info->ioapic_polarity;
3981		/* Mask level triggered irqs. */
3982		if (info->ioapic_trigger)
3983			entry->mask = 1;
3984		break;
3985
3986	case X86_IRQ_ALLOC_TYPE_HPET:
3987	case X86_IRQ_ALLOC_TYPE_MSI:
3988	case X86_IRQ_ALLOC_TYPE_MSIX:
3989		msg->address_hi = MSI_ADDR_BASE_HI;
3990		msg->address_lo = MSI_ADDR_BASE_LO;
3991		msg->data = irte_info->index;
3992		break;
3993
3994	default:
3995		BUG_ON(1);
3996		break;
3997	}
3998}
3999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4000static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
4001			       unsigned int nr_irqs, void *arg)
4002{
4003	struct irq_alloc_info *info = arg;
4004	struct irq_data *irq_data;
4005	struct amd_ir_data *data;
4006	struct irq_cfg *cfg;
4007	int i, ret, devid;
4008	int index = -1;
4009
4010	if (!info)
4011		return -EINVAL;
4012	if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
4013	    info->type != X86_IRQ_ALLOC_TYPE_MSIX)
4014		return -EINVAL;
4015
4016	/*
4017	 * With IRQ remapping enabled, don't need contiguous CPU vectors
4018	 * to support multiple MSI interrupts.
4019	 */
4020	if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
4021		info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
4022
4023	devid = get_devid(info);
4024	if (devid < 0)
4025		return -EINVAL;
4026
4027	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
4028	if (ret < 0)
4029		return ret;
4030
4031	if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
4032		if (get_irq_table(devid, true))
4033			index = info->ioapic_pin;
4034		else
4035			ret = -ENOMEM;
4036	} else {
4037		index = alloc_irq_index(devid, nr_irqs);
4038	}
4039	if (index < 0) {
4040		pr_warn("Failed to allocate IRTE\n");
 
4041		goto out_free_parent;
4042	}
4043
4044	for (i = 0; i < nr_irqs; i++) {
4045		irq_data = irq_domain_get_irq_data(domain, virq + i);
4046		cfg = irqd_cfg(irq_data);
4047		if (!irq_data || !cfg) {
4048			ret = -EINVAL;
4049			goto out_free_data;
4050		}
4051
4052		ret = -ENOMEM;
4053		data = kzalloc(sizeof(*data), GFP_KERNEL);
4054		if (!data)
4055			goto out_free_data;
4056
 
 
 
 
 
 
 
 
 
 
4057		irq_data->hwirq = (devid << 16) + i;
4058		irq_data->chip_data = data;
4059		irq_data->chip = &amd_ir_chip;
4060		irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
4061		irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
4062	}
4063
4064	return 0;
4065
4066out_free_data:
4067	for (i--; i >= 0; i--) {
4068		irq_data = irq_domain_get_irq_data(domain, virq + i);
4069		if (irq_data)
4070			kfree(irq_data->chip_data);
4071	}
4072	for (i = 0; i < nr_irqs; i++)
4073		free_irte(devid, index + i);
4074out_free_parent:
4075	irq_domain_free_irqs_common(domain, virq, nr_irqs);
4076	return ret;
4077}
4078
4079static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
4080			       unsigned int nr_irqs)
4081{
4082	struct irq_2_irte *irte_info;
4083	struct irq_data *irq_data;
4084	struct amd_ir_data *data;
4085	int i;
4086
4087	for (i = 0; i < nr_irqs; i++) {
4088		irq_data = irq_domain_get_irq_data(domain, virq  + i);
4089		if (irq_data && irq_data->chip_data) {
4090			data = irq_data->chip_data;
4091			irte_info = &data->irq_2_irte;
4092			free_irte(irte_info->devid, irte_info->index);
 
4093			kfree(data);
4094		}
4095	}
4096	irq_domain_free_irqs_common(domain, virq, nr_irqs);
4097}
4098
4099static void irq_remapping_activate(struct irq_domain *domain,
4100				   struct irq_data *irq_data)
4101{
4102	struct amd_ir_data *data = irq_data->chip_data;
4103	struct irq_2_irte *irte_info = &data->irq_2_irte;
 
4104
4105	modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
 
 
4106}
4107
4108static void irq_remapping_deactivate(struct irq_domain *domain,
4109				     struct irq_data *irq_data)
4110{
4111	struct amd_ir_data *data = irq_data->chip_data;
4112	struct irq_2_irte *irte_info = &data->irq_2_irte;
4113	union irte entry;
4114
4115	entry.val = 0;
4116	modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
 
4117}
4118
4119static struct irq_domain_ops amd_ir_domain_ops = {
4120	.alloc = irq_remapping_alloc,
4121	.free = irq_remapping_free,
4122	.activate = irq_remapping_activate,
4123	.deactivate = irq_remapping_deactivate,
4124};
4125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4126static int amd_ir_set_affinity(struct irq_data *data,
4127			       const struct cpumask *mask, bool force)
4128{
4129	struct amd_ir_data *ir_data = data->chip_data;
4130	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4131	struct irq_cfg *cfg = irqd_cfg(data);
4132	struct irq_data *parent = data->parent_data;
 
4133	int ret;
4134
 
 
 
4135	ret = parent->chip->irq_set_affinity(parent, mask, force);
4136	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
4137		return ret;
4138
4139	/*
4140	 * Atomically updates the IRTE with the new destination, vector
4141	 * and flushes the interrupt entry cache.
4142	 */
4143	ir_data->irte_entry.fields.vector = cfg->vector;
4144	ir_data->irte_entry.fields.destination = cfg->dest_apicid;
4145	modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry);
4146
4147	/*
4148	 * After this point, all the interrupts will start arriving
4149	 * at the new destination. So, time to cleanup the previous
4150	 * vector allocation.
4151	 */
4152	send_cleanup_vector(cfg);
4153
4154	return IRQ_SET_MASK_OK_DONE;
4155}
4156
4157static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4158{
4159	struct amd_ir_data *ir_data = irq_data->chip_data;
4160
4161	*msg = ir_data->msi_entry;
4162}
4163
4164static struct irq_chip amd_ir_chip = {
4165	.irq_ack = ir_ack_apic_edge,
4166	.irq_set_affinity = amd_ir_set_affinity,
 
4167	.irq_compose_msi_msg = ir_compose_msi_msg,
4168};
4169
4170int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
4171{
4172	iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu);
4173	if (!iommu->ir_domain)
4174		return -ENOMEM;
4175
4176	iommu->ir_domain->parent = arch_get_ir_parent_domain();
4177	iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
4178
4179	return 0;
4180}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4181#endif
v4.10.11
   1/*
   2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/ratelimit.h>
  21#include <linux/pci.h>
  22#include <linux/acpi.h>
  23#include <linux/amba/bus.h>
  24#include <linux/platform_device.h>
  25#include <linux/pci-ats.h>
  26#include <linux/bitmap.h>
  27#include <linux/slab.h>
  28#include <linux/debugfs.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/iommu-helper.h>
  32#include <linux/iommu.h>
  33#include <linux/delay.h>
  34#include <linux/amd-iommu.h>
  35#include <linux/notifier.h>
  36#include <linux/export.h>
  37#include <linux/irq.h>
  38#include <linux/msi.h>
  39#include <linux/dma-contiguous.h>
  40#include <linux/irqdomain.h>
  41#include <linux/percpu.h>
  42#include <linux/iova.h>
  43#include <asm/irq_remapping.h>
  44#include <asm/io_apic.h>
  45#include <asm/apic.h>
  46#include <asm/hw_irq.h>
  47#include <asm/msidef.h>
  48#include <asm/proto.h>
  49#include <asm/iommu.h>
  50#include <asm/gart.h>
  51#include <asm/dma.h>
  52
  53#include "amd_iommu_proto.h"
  54#include "amd_iommu_types.h"
  55#include "irq_remapping.h"
  56
  57#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  58
  59#define LOOP_TIMEOUT	100000
  60
  61/* IO virtual address start page frame number */
  62#define IOVA_START_PFN		(1)
  63#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
  64#define DMA_32BIT_PFN		IOVA_PFN(DMA_BIT_MASK(32))
  65
  66/* Reserved IOVA ranges */
  67#define MSI_RANGE_START		(0xfee00000)
  68#define MSI_RANGE_END		(0xfeefffff)
  69#define HT_RANGE_START		(0xfd00000000ULL)
  70#define HT_RANGE_END		(0xffffffffffULL)
  71
  72/*
  73 * This bitmap is used to advertise the page sizes our hardware support
  74 * to the IOMMU core, which will then use this information to split
  75 * physically contiguous memory regions it is mapping into page sizes
  76 * that we support.
  77 *
  78 * 512GB Pages are not supported due to a hardware bug
  79 */
  80#define AMD_IOMMU_PGSIZES	((~0xFFFUL) & ~(2ULL << 38))
  81
  82static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  83
  84/* List of all available dev_data structures */
  85static LIST_HEAD(dev_data_list);
  86static DEFINE_SPINLOCK(dev_data_list_lock);
  87
  88LIST_HEAD(ioapic_map);
  89LIST_HEAD(hpet_map);
  90LIST_HEAD(acpihid_map);
  91
  92#define FLUSH_QUEUE_SIZE 256
  93
  94struct flush_queue_entry {
  95	unsigned long iova_pfn;
  96	unsigned long pages;
  97	struct dma_ops_domain *dma_dom;
  98};
  99
 100struct flush_queue {
 101	spinlock_t lock;
 102	unsigned next;
 103	struct flush_queue_entry *entries;
 104};
 105
 106static DEFINE_PER_CPU(struct flush_queue, flush_queue);
 107
 108static atomic_t queue_timer_on;
 109static struct timer_list queue_timer;
 110
 111/*
 112 * Domain for untranslated devices - only allocated
 113 * if iommu=pt passed on kernel cmd line.
 114 */
 115static const struct iommu_ops amd_iommu_ops;
 116
 117static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
 118int amd_iommu_max_glx_val = -1;
 119
 120static struct dma_map_ops amd_iommu_dma_ops;
 121
 122/*
 123 * This struct contains device specific data for the IOMMU
 124 */
 125struct iommu_dev_data {
 126	struct list_head list;		  /* For domain->dev_list */
 127	struct list_head dev_data_list;	  /* For global dev_data_list */
 128	struct protection_domain *domain; /* Domain the device is bound to */
 129	u16 devid;			  /* PCI Device ID */
 130	u16 alias;			  /* Alias Device ID */
 131	bool iommu_v2;			  /* Device can make use of IOMMUv2 */
 132	bool passthrough;		  /* Device is identity mapped */
 133	struct {
 134		bool enabled;
 135		int qdep;
 136	} ats;				  /* ATS state */
 137	bool pri_tlp;			  /* PASID TLB required for
 138					     PPR completions */
 139	u32 errata;			  /* Bitmap for errata to apply */
 140	bool use_vapic;			  /* Enable device to use vapic mode */
 141};
 142
 143/*
 144 * general struct to manage commands send to an IOMMU
 145 */
 146struct iommu_cmd {
 147	u32 data[4];
 148};
 149
 150struct kmem_cache *amd_iommu_irq_cache;
 151
 152static void update_domain(struct protection_domain *domain);
 153static int protection_domain_init(struct protection_domain *domain);
 154static void detach_device(struct device *dev);
 155
 156/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157 * Data container for a dma_ops specific protection domain
 158 */
 159struct dma_ops_domain {
 160	/* generic protection domain information */
 161	struct protection_domain domain;
 162
 163	/* IOVA RB-Tree */
 164	struct iova_domain iovad;
 
 
 
 
 
 
 165};
 166
 167static struct iova_domain reserved_iova_ranges;
 168static struct lock_class_key reserved_rbtree_key;
 169
 170/****************************************************************************
 171 *
 172 * Helper functions
 173 *
 174 ****************************************************************************/
 175
 176static inline int match_hid_uid(struct device *dev,
 177				struct acpihid_map_entry *entry)
 178{
 179	const char *hid, *uid;
 180
 181	hid = acpi_device_hid(ACPI_COMPANION(dev));
 182	uid = acpi_device_uid(ACPI_COMPANION(dev));
 183
 184	if (!hid || !(*hid))
 185		return -ENODEV;
 186
 187	if (!uid || !(*uid))
 188		return strcmp(hid, entry->hid);
 189
 190	if (!(*entry->uid))
 191		return strcmp(hid, entry->hid);
 192
 193	return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
 194}
 195
 196static inline u16 get_pci_device_id(struct device *dev)
 197{
 198	struct pci_dev *pdev = to_pci_dev(dev);
 199
 200	return PCI_DEVID(pdev->bus->number, pdev->devfn);
 201}
 202
 203static inline int get_acpihid_device_id(struct device *dev,
 204					struct acpihid_map_entry **entry)
 205{
 206	struct acpihid_map_entry *p;
 207
 208	list_for_each_entry(p, &acpihid_map, list) {
 209		if (!match_hid_uid(dev, p)) {
 210			if (entry)
 211				*entry = p;
 212			return p->devid;
 213		}
 214	}
 215	return -EINVAL;
 216}
 217
 218static inline int get_device_id(struct device *dev)
 219{
 220	int devid;
 221
 222	if (dev_is_pci(dev))
 223		devid = get_pci_device_id(dev);
 224	else
 225		devid = get_acpihid_device_id(dev, NULL);
 226
 227	return devid;
 228}
 229
 230static struct protection_domain *to_pdomain(struct iommu_domain *dom)
 231{
 232	return container_of(dom, struct protection_domain, domain);
 233}
 234
 235static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain)
 236{
 237	BUG_ON(domain->flags != PD_DMA_OPS_MASK);
 238	return container_of(domain, struct dma_ops_domain, domain);
 239}
 240
 241static struct iommu_dev_data *alloc_dev_data(u16 devid)
 242{
 243	struct iommu_dev_data *dev_data;
 244	unsigned long flags;
 245
 246	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
 247	if (!dev_data)
 248		return NULL;
 249
 250	dev_data->devid = devid;
 251
 252	spin_lock_irqsave(&dev_data_list_lock, flags);
 253	list_add_tail(&dev_data->dev_data_list, &dev_data_list);
 254	spin_unlock_irqrestore(&dev_data_list_lock, flags);
 255
 256	return dev_data;
 257}
 258
 259static struct iommu_dev_data *search_dev_data(u16 devid)
 260{
 261	struct iommu_dev_data *dev_data;
 262	unsigned long flags;
 263
 264	spin_lock_irqsave(&dev_data_list_lock, flags);
 265	list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
 266		if (dev_data->devid == devid)
 267			goto out_unlock;
 268	}
 269
 270	dev_data = NULL;
 271
 272out_unlock:
 273	spin_unlock_irqrestore(&dev_data_list_lock, flags);
 274
 275	return dev_data;
 276}
 277
 278static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
 279{
 280	*(u16 *)data = alias;
 281	return 0;
 282}
 283
 284static u16 get_alias(struct device *dev)
 285{
 286	struct pci_dev *pdev = to_pci_dev(dev);
 287	u16 devid, ivrs_alias, pci_alias;
 288
 289	/* The callers make sure that get_device_id() does not fail here */
 290	devid = get_device_id(dev);
 291	ivrs_alias = amd_iommu_alias_table[devid];
 292	pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
 293
 294	if (ivrs_alias == pci_alias)
 295		return ivrs_alias;
 296
 297	/*
 298	 * DMA alias showdown
 299	 *
 300	 * The IVRS is fairly reliable in telling us about aliases, but it
 301	 * can't know about every screwy device.  If we don't have an IVRS
 302	 * reported alias, use the PCI reported alias.  In that case we may
 303	 * still need to initialize the rlookup and dev_table entries if the
 304	 * alias is to a non-existent device.
 305	 */
 306	if (ivrs_alias == devid) {
 307		if (!amd_iommu_rlookup_table[pci_alias]) {
 308			amd_iommu_rlookup_table[pci_alias] =
 309				amd_iommu_rlookup_table[devid];
 310			memcpy(amd_iommu_dev_table[pci_alias].data,
 311			       amd_iommu_dev_table[devid].data,
 312			       sizeof(amd_iommu_dev_table[pci_alias].data));
 313		}
 314
 315		return pci_alias;
 316	}
 317
 318	pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
 319		"for device %s[%04x:%04x], kernel reported alias "
 320		"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
 321		PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
 322		PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
 323		PCI_FUNC(pci_alias));
 324
 325	/*
 326	 * If we don't have a PCI DMA alias and the IVRS alias is on the same
 327	 * bus, then the IVRS table may know about a quirk that we don't.
 328	 */
 329	if (pci_alias == devid &&
 330	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
 331		pci_add_dma_alias(pdev, ivrs_alias & 0xff);
 
 332		pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
 333			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
 334			dev_name(dev));
 335	}
 336
 337	return ivrs_alias;
 338}
 339
 340static struct iommu_dev_data *find_dev_data(u16 devid)
 341{
 342	struct iommu_dev_data *dev_data;
 343
 344	dev_data = search_dev_data(devid);
 345
 346	if (dev_data == NULL)
 347		dev_data = alloc_dev_data(devid);
 348
 349	return dev_data;
 350}
 351
 352static struct iommu_dev_data *get_dev_data(struct device *dev)
 353{
 354	return dev->archdata.iommu;
 355}
 356
 357/*
 358* Find or create an IOMMU group for a acpihid device.
 359*/
 360static struct iommu_group *acpihid_device_group(struct device *dev)
 361{
 362	struct acpihid_map_entry *p, *entry = NULL;
 363	int devid;
 364
 365	devid = get_acpihid_device_id(dev, &entry);
 366	if (devid < 0)
 367		return ERR_PTR(devid);
 368
 369	list_for_each_entry(p, &acpihid_map, list) {
 370		if ((devid == p->devid) && p->group)
 371			entry->group = p->group;
 372	}
 373
 374	if (!entry->group)
 375		entry->group = generic_device_group(dev);
 376	else
 377		iommu_group_ref_get(entry->group);
 378
 379	return entry->group;
 380}
 381
 382static bool pci_iommuv2_capable(struct pci_dev *pdev)
 383{
 384	static const int caps[] = {
 385		PCI_EXT_CAP_ID_ATS,
 386		PCI_EXT_CAP_ID_PRI,
 387		PCI_EXT_CAP_ID_PASID,
 388	};
 389	int i, pos;
 390
 391	for (i = 0; i < 3; ++i) {
 392		pos = pci_find_ext_capability(pdev, caps[i]);
 393		if (pos == 0)
 394			return false;
 395	}
 396
 397	return true;
 398}
 399
 400static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
 401{
 402	struct iommu_dev_data *dev_data;
 403
 404	dev_data = get_dev_data(&pdev->dev);
 405
 406	return dev_data->errata & (1 << erratum) ? true : false;
 407}
 408
 409/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 410 * This function checks if the driver got a valid device from the caller to
 411 * avoid dereferencing invalid pointers.
 412 */
 413static bool check_device(struct device *dev)
 414{
 415	int devid;
 416
 417	if (!dev || !dev->dma_mask)
 418		return false;
 419
 
 
 
 
 420	devid = get_device_id(dev);
 421	if (devid < 0)
 422		return false;
 423
 424	/* Out of our scope? */
 425	if (devid > amd_iommu_last_bdf)
 426		return false;
 427
 428	if (amd_iommu_rlookup_table[devid] == NULL)
 429		return false;
 430
 431	return true;
 432}
 433
 434static void init_iommu_group(struct device *dev)
 435{
 
 
 436	struct iommu_group *group;
 437
 438	group = iommu_group_get_for_dev(dev);
 439	if (IS_ERR(group))
 440		return;
 441
 
 
 
 
 
 
 
 
 442	iommu_group_put(group);
 443}
 444
 445static int iommu_init_device(struct device *dev)
 446{
 
 447	struct iommu_dev_data *dev_data;
 448	int devid;
 449
 450	if (dev->archdata.iommu)
 451		return 0;
 452
 453	devid = get_device_id(dev);
 454	if (devid < 0)
 455		return devid;
 456
 457	dev_data = find_dev_data(devid);
 458	if (!dev_data)
 459		return -ENOMEM;
 460
 461	dev_data->alias = get_alias(dev);
 462
 463	if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
 464		struct amd_iommu *iommu;
 465
 466		iommu = amd_iommu_rlookup_table[dev_data->devid];
 467		dev_data->iommu_v2 = iommu->is_iommu_v2;
 468	}
 469
 470	dev->archdata.iommu = dev_data;
 471
 472	iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
 473			  dev);
 474
 475	return 0;
 476}
 477
 478static void iommu_ignore_device(struct device *dev)
 479{
 480	u16 alias;
 481	int devid;
 482
 483	devid = get_device_id(dev);
 484	if (devid < 0)
 485		return;
 486
 487	alias = get_alias(dev);
 488
 489	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
 490	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
 491
 492	amd_iommu_rlookup_table[devid] = NULL;
 493	amd_iommu_rlookup_table[alias] = NULL;
 494}
 495
 496static void iommu_uninit_device(struct device *dev)
 497{
 498	int devid;
 499	struct iommu_dev_data *dev_data;
 500
 501	devid = get_device_id(dev);
 502	if (devid < 0)
 503		return;
 504
 505	dev_data = search_dev_data(devid);
 506	if (!dev_data)
 507		return;
 508
 509	if (dev_data->domain)
 510		detach_device(dev);
 511
 512	iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
 513			    dev);
 514
 515	iommu_group_remove_device(dev);
 516
 517	/* Remove dma-ops */
 518	dev->archdata.dma_ops = NULL;
 519
 520	/*
 521	 * We keep dev_data around for unplugged devices and reuse it when the
 522	 * device is re-plugged - not doing so would introduce a ton of races.
 523	 */
 524}
 525
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 526/****************************************************************************
 527 *
 528 * Interrupt handling functions
 529 *
 530 ****************************************************************************/
 531
 532static void dump_dte_entry(u16 devid)
 533{
 534	int i;
 535
 536	for (i = 0; i < 4; ++i)
 537		pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
 538			amd_iommu_dev_table[devid].data[i]);
 539}
 540
 541static void dump_command(unsigned long phys_addr)
 542{
 543	struct iommu_cmd *cmd = phys_to_virt(phys_addr);
 544	int i;
 545
 546	for (i = 0; i < 4; ++i)
 547		pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
 548}
 549
 550static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 551{
 552	int type, devid, domid, flags;
 553	volatile u32 *event = __evt;
 554	int count = 0;
 555	u64 address;
 556
 557retry:
 558	type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
 559	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
 560	domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
 561	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
 562	address = (u64)(((u64)event[3]) << 32) | event[2];
 563
 564	if (type == 0) {
 565		/* Did we hit the erratum? */
 566		if (++count == LOOP_TIMEOUT) {
 567			pr_err("AMD-Vi: No event written to event log\n");
 568			return;
 569		}
 570		udelay(1);
 571		goto retry;
 572	}
 573
 574	printk(KERN_ERR "AMD-Vi: Event logged [");
 575
 576	switch (type) {
 577	case EVENT_TYPE_ILL_DEV:
 578		printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
 579		       "address=0x%016llx flags=0x%04x]\n",
 580		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 581		       address, flags);
 582		dump_dte_entry(devid);
 583		break;
 584	case EVENT_TYPE_IO_FAULT:
 585		printk("IO_PAGE_FAULT device=%02x:%02x.%x "
 586		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 587		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 588		       domid, address, flags);
 589		break;
 590	case EVENT_TYPE_DEV_TAB_ERR:
 591		printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 592		       "address=0x%016llx flags=0x%04x]\n",
 593		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 594		       address, flags);
 595		break;
 596	case EVENT_TYPE_PAGE_TAB_ERR:
 597		printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 598		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 599		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 600		       domid, address, flags);
 601		break;
 602	case EVENT_TYPE_ILL_CMD:
 603		printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
 604		dump_command(address);
 605		break;
 606	case EVENT_TYPE_CMD_HARD_ERR:
 607		printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
 608		       "flags=0x%04x]\n", address, flags);
 609		break;
 610	case EVENT_TYPE_IOTLB_INV_TO:
 611		printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
 612		       "address=0x%016llx]\n",
 613		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 614		       address);
 615		break;
 616	case EVENT_TYPE_INV_DEV_REQ:
 617		printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
 618		       "address=0x%016llx flags=0x%04x]\n",
 619		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 620		       address, flags);
 621		break;
 622	default:
 623		printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
 624	}
 625
 626	memset(__evt, 0, 4 * sizeof(u32));
 627}
 628
 629static void iommu_poll_events(struct amd_iommu *iommu)
 630{
 631	u32 head, tail;
 632
 633	head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 634	tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 635
 636	while (head != tail) {
 637		iommu_print_event(iommu, iommu->evt_buf + head);
 638		head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
 639	}
 640
 641	writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 642}
 643
 644static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
 645{
 646	struct amd_iommu_fault fault;
 647
 
 
 648	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
 649		pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
 650		return;
 651	}
 652
 653	fault.address   = raw[1];
 654	fault.pasid     = PPR_PASID(raw[0]);
 655	fault.device_id = PPR_DEVID(raw[0]);
 656	fault.tag       = PPR_TAG(raw[0]);
 657	fault.flags     = PPR_FLAGS(raw[0]);
 658
 659	atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
 660}
 661
 662static void iommu_poll_ppr_log(struct amd_iommu *iommu)
 663{
 664	u32 head, tail;
 665
 666	if (iommu->ppr_log == NULL)
 667		return;
 668
 669	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 670	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 671
 672	while (head != tail) {
 673		volatile u64 *raw;
 674		u64 entry[2];
 675		int i;
 676
 677		raw = (u64 *)(iommu->ppr_log + head);
 678
 679		/*
 680		 * Hardware bug: Interrupt may arrive before the entry is
 681		 * written to memory. If this happens we need to wait for the
 682		 * entry to arrive.
 683		 */
 684		for (i = 0; i < LOOP_TIMEOUT; ++i) {
 685			if (PPR_REQ_TYPE(raw[0]) != 0)
 686				break;
 687			udelay(1);
 688		}
 689
 690		/* Avoid memcpy function-call overhead */
 691		entry[0] = raw[0];
 692		entry[1] = raw[1];
 693
 694		/*
 695		 * To detect the hardware bug we need to clear the entry
 696		 * back to zero.
 697		 */
 698		raw[0] = raw[1] = 0UL;
 699
 700		/* Update head pointer of hardware ring-buffer */
 701		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
 702		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 703
 704		/* Handle PPR entry */
 705		iommu_handle_ppr_entry(iommu, entry);
 706
 707		/* Refresh ring-buffer information */
 708		head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 709		tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 710	}
 711}
 712
 713#ifdef CONFIG_IRQ_REMAP
 714static int (*iommu_ga_log_notifier)(u32);
 715
 716int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
 717{
 718	iommu_ga_log_notifier = notifier;
 719
 720	return 0;
 721}
 722EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
 723
 724static void iommu_poll_ga_log(struct amd_iommu *iommu)
 725{
 726	u32 head, tail, cnt = 0;
 727
 728	if (iommu->ga_log == NULL)
 729		return;
 730
 731	head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
 732	tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
 733
 734	while (head != tail) {
 735		volatile u64 *raw;
 736		u64 log_entry;
 737
 738		raw = (u64 *)(iommu->ga_log + head);
 739		cnt++;
 740
 741		/* Avoid memcpy function-call overhead */
 742		log_entry = *raw;
 743
 744		/* Update head pointer of hardware ring-buffer */
 745		head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
 746		writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
 747
 748		/* Handle GA entry */
 749		switch (GA_REQ_TYPE(log_entry)) {
 750		case GA_GUEST_NR:
 751			if (!iommu_ga_log_notifier)
 752				break;
 753
 754			pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n",
 755				 __func__, GA_DEVID(log_entry),
 756				 GA_TAG(log_entry));
 757
 758			if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
 759				pr_err("AMD-Vi: GA log notifier failed.\n");
 760			break;
 761		default:
 762			break;
 763		}
 764	}
 765}
 766#endif /* CONFIG_IRQ_REMAP */
 767
 768#define AMD_IOMMU_INT_MASK	\
 769	(MMIO_STATUS_EVT_INT_MASK | \
 770	 MMIO_STATUS_PPR_INT_MASK | \
 771	 MMIO_STATUS_GALOG_INT_MASK)
 772
 773irqreturn_t amd_iommu_int_thread(int irq, void *data)
 774{
 775	struct amd_iommu *iommu = (struct amd_iommu *) data;
 776	u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 777
 778	while (status & AMD_IOMMU_INT_MASK) {
 779		/* Enable EVT and PPR and GA interrupts again */
 780		writel(AMD_IOMMU_INT_MASK,
 781			iommu->mmio_base + MMIO_STATUS_OFFSET);
 782
 783		if (status & MMIO_STATUS_EVT_INT_MASK) {
 784			pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
 785			iommu_poll_events(iommu);
 786		}
 787
 788		if (status & MMIO_STATUS_PPR_INT_MASK) {
 789			pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
 790			iommu_poll_ppr_log(iommu);
 791		}
 792
 793#ifdef CONFIG_IRQ_REMAP
 794		if (status & MMIO_STATUS_GALOG_INT_MASK) {
 795			pr_devel("AMD-Vi: Processing IOMMU GA Log\n");
 796			iommu_poll_ga_log(iommu);
 797		}
 798#endif
 799
 800		/*
 801		 * Hardware bug: ERBT1312
 802		 * When re-enabling interrupt (by writing 1
 803		 * to clear the bit), the hardware might also try to set
 804		 * the interrupt bit in the event status register.
 805		 * In this scenario, the bit will be set, and disable
 806		 * subsequent interrupts.
 807		 *
 808		 * Workaround: The IOMMU driver should read back the
 809		 * status register and check if the interrupt bits are cleared.
 810		 * If not, driver will need to go through the interrupt handler
 811		 * again and re-clear the bits
 812		 */
 813		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 814	}
 815	return IRQ_HANDLED;
 816}
 817
 818irqreturn_t amd_iommu_int_handler(int irq, void *data)
 819{
 820	return IRQ_WAKE_THREAD;
 821}
 822
 823/****************************************************************************
 824 *
 825 * IOMMU command queuing functions
 826 *
 827 ****************************************************************************/
 828
 829static int wait_on_sem(volatile u64 *sem)
 830{
 831	int i = 0;
 832
 833	while (*sem == 0 && i < LOOP_TIMEOUT) {
 834		udelay(1);
 835		i += 1;
 836	}
 837
 838	if (i == LOOP_TIMEOUT) {
 839		pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
 840		return -EIO;
 841	}
 842
 843	return 0;
 844}
 845
 846static void copy_cmd_to_buffer(struct amd_iommu *iommu,
 847			       struct iommu_cmd *cmd,
 848			       u32 tail)
 849{
 850	u8 *target;
 851
 852	target = iommu->cmd_buf + tail;
 853	tail   = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
 854
 855	/* Copy command to buffer */
 856	memcpy(target, cmd, sizeof(*cmd));
 857
 858	/* Tell the IOMMU about it */
 859	writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 860}
 861
 862static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 863{
 864	WARN_ON(address & 0x7ULL);
 865
 866	memset(cmd, 0, sizeof(*cmd));
 867	cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
 868	cmd->data[1] = upper_32_bits(__pa(address));
 869	cmd->data[2] = 1;
 870	CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 871}
 872
 873static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
 874{
 875	memset(cmd, 0, sizeof(*cmd));
 876	cmd->data[0] = devid;
 877	CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
 878}
 879
 880static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
 881				  size_t size, u16 domid, int pde)
 882{
 883	u64 pages;
 884	bool s;
 885
 886	pages = iommu_num_pages(address, size, PAGE_SIZE);
 887	s     = false;
 888
 889	if (pages > 1) {
 890		/*
 891		 * If we have to flush more than one page, flush all
 892		 * TLB entries for this domain
 893		 */
 894		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 895		s = true;
 896	}
 897
 898	address &= PAGE_MASK;
 899
 900	memset(cmd, 0, sizeof(*cmd));
 901	cmd->data[1] |= domid;
 902	cmd->data[2]  = lower_32_bits(address);
 903	cmd->data[3]  = upper_32_bits(address);
 904	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 905	if (s) /* size bit - we flush more than one 4kb page */
 906		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 907	if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
 908		cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 909}
 910
 911static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
 912				  u64 address, size_t size)
 913{
 914	u64 pages;
 915	bool s;
 916
 917	pages = iommu_num_pages(address, size, PAGE_SIZE);
 918	s     = false;
 919
 920	if (pages > 1) {
 921		/*
 922		 * If we have to flush more than one page, flush all
 923		 * TLB entries for this domain
 924		 */
 925		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 926		s = true;
 927	}
 928
 929	address &= PAGE_MASK;
 930
 931	memset(cmd, 0, sizeof(*cmd));
 932	cmd->data[0]  = devid;
 933	cmd->data[0] |= (qdep & 0xff) << 24;
 934	cmd->data[1]  = devid;
 935	cmd->data[2]  = lower_32_bits(address);
 936	cmd->data[3]  = upper_32_bits(address);
 937	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 938	if (s)
 939		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 940}
 941
 942static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
 943				  u64 address, bool size)
 944{
 945	memset(cmd, 0, sizeof(*cmd));
 946
 947	address &= ~(0xfffULL);
 948
 949	cmd->data[0]  = pasid;
 950	cmd->data[1]  = domid;
 951	cmd->data[2]  = lower_32_bits(address);
 952	cmd->data[3]  = upper_32_bits(address);
 953	cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 954	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 955	if (size)
 956		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 957	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 958}
 959
 960static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
 961				  int qdep, u64 address, bool size)
 962{
 963	memset(cmd, 0, sizeof(*cmd));
 964
 965	address &= ~(0xfffULL);
 966
 967	cmd->data[0]  = devid;
 968	cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
 969	cmd->data[0] |= (qdep  & 0xff) << 24;
 970	cmd->data[1]  = devid;
 971	cmd->data[1] |= (pasid & 0xff) << 16;
 972	cmd->data[2]  = lower_32_bits(address);
 973	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 974	cmd->data[3]  = upper_32_bits(address);
 975	if (size)
 976		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 977	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 978}
 979
 980static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
 981			       int status, int tag, bool gn)
 982{
 983	memset(cmd, 0, sizeof(*cmd));
 984
 985	cmd->data[0]  = devid;
 986	if (gn) {
 987		cmd->data[1]  = pasid;
 988		cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
 989	}
 990	cmd->data[3]  = tag & 0x1ff;
 991	cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
 992
 993	CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
 994}
 995
 996static void build_inv_all(struct iommu_cmd *cmd)
 997{
 998	memset(cmd, 0, sizeof(*cmd));
 999	CMD_SET_TYPE(cmd, CMD_INV_ALL);
1000}
1001
1002static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1003{
1004	memset(cmd, 0, sizeof(*cmd));
1005	cmd->data[0] = devid;
1006	CMD_SET_TYPE(cmd, CMD_INV_IRT);
1007}
1008
1009/*
1010 * Writes the command to the IOMMUs command buffer and informs the
1011 * hardware about the new command.
1012 */
1013static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1014				      struct iommu_cmd *cmd,
1015				      bool sync)
1016{
1017	u32 left, tail, head, next_tail;
 
1018
1019again:
 
1020
1021	head      = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
1022	tail      = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
1023	next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1024	left      = (head - next_tail) % CMD_BUFFER_SIZE;
1025
1026	if (left <= 0x20) {
1027		struct iommu_cmd sync_cmd;
 
1028		int ret;
1029
1030		iommu->cmd_sem = 0;
 
1031
1032		build_completion_wait(&sync_cmd, (u64)&iommu->cmd_sem);
1033		copy_cmd_to_buffer(iommu, &sync_cmd, tail);
1034
1035		if ((ret = wait_on_sem(&iommu->cmd_sem)) != 0)
1036			return ret;
1037
1038		goto again;
1039	}
1040
1041	copy_cmd_to_buffer(iommu, cmd, tail);
1042
1043	/* We need to sync now to make sure all commands are processed */
1044	iommu->need_sync = sync;
1045
1046	return 0;
1047}
1048
1049static int iommu_queue_command_sync(struct amd_iommu *iommu,
1050				    struct iommu_cmd *cmd,
1051				    bool sync)
1052{
1053	unsigned long flags;
1054	int ret;
1055
1056	spin_lock_irqsave(&iommu->lock, flags);
1057	ret = __iommu_queue_command_sync(iommu, cmd, sync);
1058	spin_unlock_irqrestore(&iommu->lock, flags);
1059
1060	return ret;
1061}
1062
1063static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1064{
1065	return iommu_queue_command_sync(iommu, cmd, true);
1066}
1067
1068/*
1069 * This function queues a completion wait command into the command
1070 * buffer of an IOMMU
1071 */
1072static int iommu_completion_wait(struct amd_iommu *iommu)
1073{
1074	struct iommu_cmd cmd;
1075	unsigned long flags;
1076	int ret;
1077
1078	if (!iommu->need_sync)
1079		return 0;
1080
 
1081
1082	build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
1083
1084	spin_lock_irqsave(&iommu->lock, flags);
1085
1086	iommu->cmd_sem = 0;
1087
1088	ret = __iommu_queue_command_sync(iommu, &cmd, false);
1089	if (ret)
1090		goto out_unlock;
1091
1092	ret = wait_on_sem(&iommu->cmd_sem);
1093
1094out_unlock:
1095	spin_unlock_irqrestore(&iommu->lock, flags);
1096
1097	return ret;
1098}
1099
1100static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1101{
1102	struct iommu_cmd cmd;
1103
1104	build_inv_dte(&cmd, devid);
1105
1106	return iommu_queue_command(iommu, &cmd);
1107}
1108
1109static void iommu_flush_dte_all(struct amd_iommu *iommu)
1110{
1111	u32 devid;
1112
1113	for (devid = 0; devid <= 0xffff; ++devid)
1114		iommu_flush_dte(iommu, devid);
1115
1116	iommu_completion_wait(iommu);
1117}
1118
1119/*
1120 * This function uses heavy locking and may disable irqs for some time. But
1121 * this is no issue because it is only called during resume.
1122 */
1123static void iommu_flush_tlb_all(struct amd_iommu *iommu)
1124{
1125	u32 dom_id;
1126
1127	for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1128		struct iommu_cmd cmd;
1129		build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1130				      dom_id, 1);
1131		iommu_queue_command(iommu, &cmd);
1132	}
1133
1134	iommu_completion_wait(iommu);
1135}
1136
1137static void iommu_flush_all(struct amd_iommu *iommu)
1138{
1139	struct iommu_cmd cmd;
1140
1141	build_inv_all(&cmd);
1142
1143	iommu_queue_command(iommu, &cmd);
1144	iommu_completion_wait(iommu);
1145}
1146
1147static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1148{
1149	struct iommu_cmd cmd;
1150
1151	build_inv_irt(&cmd, devid);
1152
1153	iommu_queue_command(iommu, &cmd);
1154}
1155
1156static void iommu_flush_irt_all(struct amd_iommu *iommu)
1157{
1158	u32 devid;
1159
1160	for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1161		iommu_flush_irt(iommu, devid);
1162
1163	iommu_completion_wait(iommu);
1164}
1165
1166void iommu_flush_all_caches(struct amd_iommu *iommu)
1167{
1168	if (iommu_feature(iommu, FEATURE_IA)) {
1169		iommu_flush_all(iommu);
1170	} else {
1171		iommu_flush_dte_all(iommu);
1172		iommu_flush_irt_all(iommu);
1173		iommu_flush_tlb_all(iommu);
1174	}
1175}
1176
1177/*
1178 * Command send function for flushing on-device TLB
1179 */
1180static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1181			      u64 address, size_t size)
1182{
1183	struct amd_iommu *iommu;
1184	struct iommu_cmd cmd;
1185	int qdep;
1186
1187	qdep     = dev_data->ats.qdep;
1188	iommu    = amd_iommu_rlookup_table[dev_data->devid];
1189
1190	build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1191
1192	return iommu_queue_command(iommu, &cmd);
1193}
1194
1195/*
1196 * Command send function for invalidating a device table entry
1197 */
1198static int device_flush_dte(struct iommu_dev_data *dev_data)
1199{
1200	struct amd_iommu *iommu;
1201	u16 alias;
1202	int ret;
1203
1204	iommu = amd_iommu_rlookup_table[dev_data->devid];
1205	alias = dev_data->alias;
1206
1207	ret = iommu_flush_dte(iommu, dev_data->devid);
1208	if (!ret && alias != dev_data->devid)
1209		ret = iommu_flush_dte(iommu, alias);
1210	if (ret)
1211		return ret;
1212
1213	if (dev_data->ats.enabled)
1214		ret = device_flush_iotlb(dev_data, 0, ~0UL);
1215
1216	return ret;
1217}
1218
1219/*
1220 * TLB invalidation function which is called from the mapping functions.
1221 * It invalidates a single PTE if the range to flush is within a single
1222 * page. Otherwise it flushes the whole TLB of the IOMMU.
1223 */
1224static void __domain_flush_pages(struct protection_domain *domain,
1225				 u64 address, size_t size, int pde)
1226{
1227	struct iommu_dev_data *dev_data;
1228	struct iommu_cmd cmd;
1229	int ret = 0, i;
1230
1231	build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1232
1233	for (i = 0; i < amd_iommus_present; ++i) {
1234		if (!domain->dev_iommu[i])
1235			continue;
1236
1237		/*
1238		 * Devices of this domain are behind this IOMMU
1239		 * We need a TLB flush
1240		 */
1241		ret |= iommu_queue_command(amd_iommus[i], &cmd);
1242	}
1243
1244	list_for_each_entry(dev_data, &domain->dev_list, list) {
1245
1246		if (!dev_data->ats.enabled)
1247			continue;
1248
1249		ret |= device_flush_iotlb(dev_data, address, size);
1250	}
1251
1252	WARN_ON(ret);
1253}
1254
1255static void domain_flush_pages(struct protection_domain *domain,
1256			       u64 address, size_t size)
1257{
1258	__domain_flush_pages(domain, address, size, 0);
1259}
1260
1261/* Flush the whole IO/TLB for a given protection domain */
1262static void domain_flush_tlb(struct protection_domain *domain)
1263{
1264	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1265}
1266
1267/* Flush the whole IO/TLB for a given protection domain - including PDE */
1268static void domain_flush_tlb_pde(struct protection_domain *domain)
1269{
1270	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1271}
1272
1273static void domain_flush_complete(struct protection_domain *domain)
1274{
1275	int i;
1276
1277	for (i = 0; i < amd_iommus_present; ++i) {
1278		if (domain && !domain->dev_iommu[i])
1279			continue;
1280
1281		/*
1282		 * Devices of this domain are behind this IOMMU
1283		 * We need to wait for completion of all commands.
1284		 */
1285		iommu_completion_wait(amd_iommus[i]);
1286	}
1287}
1288
1289
1290/*
1291 * This function flushes the DTEs for all devices in domain
1292 */
1293static void domain_flush_devices(struct protection_domain *domain)
1294{
1295	struct iommu_dev_data *dev_data;
1296
1297	list_for_each_entry(dev_data, &domain->dev_list, list)
1298		device_flush_dte(dev_data);
1299}
1300
1301/****************************************************************************
1302 *
1303 * The functions below are used the create the page table mappings for
1304 * unity mapped regions.
1305 *
1306 ****************************************************************************/
1307
1308/*
1309 * This function is used to add another level to an IO page table. Adding
1310 * another level increases the size of the address space by 9 bits to a size up
1311 * to 64 bits.
1312 */
1313static bool increase_address_space(struct protection_domain *domain,
1314				   gfp_t gfp)
1315{
1316	u64 *pte;
1317
1318	if (domain->mode == PAGE_MODE_6_LEVEL)
1319		/* address space already 64 bit large */
1320		return false;
1321
1322	pte = (void *)get_zeroed_page(gfp);
1323	if (!pte)
1324		return false;
1325
1326	*pte             = PM_LEVEL_PDE(domain->mode,
1327					virt_to_phys(domain->pt_root));
1328	domain->pt_root  = pte;
1329	domain->mode    += 1;
1330	domain->updated  = true;
1331
1332	return true;
1333}
1334
1335static u64 *alloc_pte(struct protection_domain *domain,
1336		      unsigned long address,
1337		      unsigned long page_size,
1338		      u64 **pte_page,
1339		      gfp_t gfp)
1340{
1341	int level, end_lvl;
1342	u64 *pte, *page;
1343
1344	BUG_ON(!is_power_of_2(page_size));
1345
1346	while (address > PM_LEVEL_SIZE(domain->mode))
1347		increase_address_space(domain, gfp);
1348
1349	level   = domain->mode - 1;
1350	pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1351	address = PAGE_SIZE_ALIGN(address, page_size);
1352	end_lvl = PAGE_SIZE_LEVEL(page_size);
1353
1354	while (level > end_lvl) {
1355		u64 __pte, __npte;
1356
1357		__pte = *pte;
1358
1359		if (!IOMMU_PTE_PRESENT(__pte)) {
1360			page = (u64 *)get_zeroed_page(gfp);
1361			if (!page)
1362				return NULL;
1363
1364			__npte = PM_LEVEL_PDE(level, virt_to_phys(page));
1365
1366			/* pte could have been changed somewhere. */
1367			if (cmpxchg64(pte, __pte, __npte) != __pte) {
1368				free_page((unsigned long)page);
1369				continue;
1370			}
1371		}
1372
1373		/* No level skipping support yet */
1374		if (PM_PTE_LEVEL(*pte) != level)
1375			return NULL;
1376
1377		level -= 1;
1378
1379		pte = IOMMU_PTE_PAGE(*pte);
1380
1381		if (pte_page && level == end_lvl)
1382			*pte_page = pte;
1383
1384		pte = &pte[PM_LEVEL_INDEX(level, address)];
1385	}
1386
1387	return pte;
1388}
1389
1390/*
1391 * This function checks if there is a PTE for a given dma address. If
1392 * there is one, it returns the pointer to it.
1393 */
1394static u64 *fetch_pte(struct protection_domain *domain,
1395		      unsigned long address,
1396		      unsigned long *page_size)
1397{
1398	int level;
1399	u64 *pte;
1400
1401	if (address > PM_LEVEL_SIZE(domain->mode))
1402		return NULL;
1403
1404	level	   =  domain->mode - 1;
1405	pte	   = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1406	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
1407
1408	while (level > 0) {
1409
1410		/* Not Present */
1411		if (!IOMMU_PTE_PRESENT(*pte))
1412			return NULL;
1413
1414		/* Large PTE */
1415		if (PM_PTE_LEVEL(*pte) == 7 ||
1416		    PM_PTE_LEVEL(*pte) == 0)
1417			break;
1418
1419		/* No level skipping support yet */
1420		if (PM_PTE_LEVEL(*pte) != level)
1421			return NULL;
1422
1423		level -= 1;
1424
1425		/* Walk to the next level */
1426		pte	   = IOMMU_PTE_PAGE(*pte);
1427		pte	   = &pte[PM_LEVEL_INDEX(level, address)];
1428		*page_size = PTE_LEVEL_PAGE_SIZE(level);
1429	}
1430
1431	if (PM_PTE_LEVEL(*pte) == 0x07) {
1432		unsigned long pte_mask;
1433
1434		/*
1435		 * If we have a series of large PTEs, make
1436		 * sure to return a pointer to the first one.
1437		 */
1438		*page_size = pte_mask = PTE_PAGE_SIZE(*pte);
1439		pte_mask   = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1440		pte        = (u64 *)(((unsigned long)pte) & pte_mask);
1441	}
1442
1443	return pte;
1444}
1445
1446/*
1447 * Generic mapping functions. It maps a physical address into a DMA
1448 * address space. It allocates the page table pages if necessary.
1449 * In the future it can be extended to a generic mapping function
1450 * supporting all features of AMD IOMMU page tables like level skipping
1451 * and full 64 bit address spaces.
1452 */
1453static int iommu_map_page(struct protection_domain *dom,
1454			  unsigned long bus_addr,
1455			  unsigned long phys_addr,
1456			  unsigned long page_size,
1457			  int prot,
1458			  gfp_t gfp)
1459{
1460	u64 __pte, *pte;
1461	int i, count;
1462
1463	BUG_ON(!IS_ALIGNED(bus_addr, page_size));
1464	BUG_ON(!IS_ALIGNED(phys_addr, page_size));
1465
1466	if (!(prot & IOMMU_PROT_MASK))
1467		return -EINVAL;
1468
1469	count = PAGE_SIZE_PTE_COUNT(page_size);
1470	pte   = alloc_pte(dom, bus_addr, page_size, NULL, gfp);
1471
1472	if (!pte)
1473		return -ENOMEM;
1474
1475	for (i = 0; i < count; ++i)
1476		if (IOMMU_PTE_PRESENT(pte[i]))
1477			return -EBUSY;
1478
1479	if (count > 1) {
1480		__pte = PAGE_SIZE_PTE(phys_addr, page_size);
1481		__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
1482	} else
1483		__pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
1484
1485	if (prot & IOMMU_PROT_IR)
1486		__pte |= IOMMU_PTE_IR;
1487	if (prot & IOMMU_PROT_IW)
1488		__pte |= IOMMU_PTE_IW;
1489
1490	for (i = 0; i < count; ++i)
1491		pte[i] = __pte;
1492
1493	update_domain(dom);
1494
1495	return 0;
1496}
1497
1498static unsigned long iommu_unmap_page(struct protection_domain *dom,
1499				      unsigned long bus_addr,
1500				      unsigned long page_size)
1501{
1502	unsigned long long unmapped;
1503	unsigned long unmap_size;
1504	u64 *pte;
1505
1506	BUG_ON(!is_power_of_2(page_size));
1507
1508	unmapped = 0;
1509
1510	while (unmapped < page_size) {
1511
1512		pte = fetch_pte(dom, bus_addr, &unmap_size);
1513
1514		if (pte) {
1515			int i, count;
1516
1517			count = PAGE_SIZE_PTE_COUNT(unmap_size);
1518			for (i = 0; i < count; i++)
1519				pte[i] = 0ULL;
1520		}
1521
1522		bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1523		unmapped += unmap_size;
1524	}
1525
1526	BUG_ON(unmapped && !is_power_of_2(unmapped));
1527
1528	return unmapped;
1529}
1530
1531/****************************************************************************
1532 *
1533 * The next functions belong to the address allocator for the dma_ops
1534 * interface functions.
 
 
 
1535 *
1536 ****************************************************************************/
1537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1538
1539static unsigned long dma_ops_alloc_iova(struct device *dev,
1540					struct dma_ops_domain *dma_dom,
1541					unsigned int pages, u64 dma_mask)
 
 
 
 
1542{
1543	unsigned long pfn = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1544
1545	pages = __roundup_pow_of_two(pages);
 
 
1546
1547	if (dma_mask > DMA_BIT_MASK(32))
1548		pfn = alloc_iova_fast(&dma_dom->iovad, pages,
1549				      IOVA_PFN(DMA_BIT_MASK(32)));
1550
1551	if (!pfn)
1552		pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
1553
1554	return (pfn << PAGE_SHIFT);
1555}
1556
1557static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
1558			      unsigned long address,
1559			      unsigned int pages)
 
 
 
 
 
1560{
1561	pages = __roundup_pow_of_two(pages);
1562	address >>= PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1563
1564	free_iova_fast(&dma_dom->iovad, address, pages);
1565}
1566
1567/****************************************************************************
1568 *
1569 * The next functions belong to the domain allocation. A domain is
1570 * allocated for every IOMMU as the default domain. If device isolation
1571 * is enabled, every device get its own domain. The most important thing
1572 * about domains is the page table mapping the DMA address space they
1573 * contain.
1574 *
1575 ****************************************************************************/
1576
1577/*
1578 * This function adds a protection domain to the global protection domain list
1579 */
1580static void add_domain_to_list(struct protection_domain *domain)
1581{
1582	unsigned long flags;
1583
1584	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1585	list_add(&domain->list, &amd_iommu_pd_list);
1586	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1587}
1588
1589/*
1590 * This function removes a protection domain to the global
1591 * protection domain list
1592 */
1593static void del_domain_from_list(struct protection_domain *domain)
1594{
1595	unsigned long flags;
1596
1597	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1598	list_del(&domain->list);
1599	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1600}
1601
1602static u16 domain_id_alloc(void)
1603{
1604	unsigned long flags;
1605	int id;
1606
1607	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1608	id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1609	BUG_ON(id == 0);
1610	if (id > 0 && id < MAX_DOMAIN_ID)
1611		__set_bit(id, amd_iommu_pd_alloc_bitmap);
1612	else
1613		id = 0;
1614	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1615
1616	return id;
1617}
1618
1619static void domain_id_free(int id)
1620{
1621	unsigned long flags;
1622
1623	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1624	if (id > 0 && id < MAX_DOMAIN_ID)
1625		__clear_bit(id, amd_iommu_pd_alloc_bitmap);
1626	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1627}
1628
1629#define DEFINE_FREE_PT_FN(LVL, FN)				\
1630static void free_pt_##LVL (unsigned long __pt)			\
1631{								\
1632	unsigned long p;					\
1633	u64 *pt;						\
1634	int i;							\
1635								\
1636	pt = (u64 *)__pt;					\
1637								\
1638	for (i = 0; i < 512; ++i) {				\
1639		/* PTE present? */				\
1640		if (!IOMMU_PTE_PRESENT(pt[i]))			\
1641			continue;				\
1642								\
1643		/* Large PTE? */				\
1644		if (PM_PTE_LEVEL(pt[i]) == 0 ||			\
1645		    PM_PTE_LEVEL(pt[i]) == 7)			\
1646			continue;				\
1647								\
1648		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);	\
1649		FN(p);						\
1650	}							\
1651	free_page((unsigned long)pt);				\
1652}
1653
1654DEFINE_FREE_PT_FN(l2, free_page)
1655DEFINE_FREE_PT_FN(l3, free_pt_l2)
1656DEFINE_FREE_PT_FN(l4, free_pt_l3)
1657DEFINE_FREE_PT_FN(l5, free_pt_l4)
1658DEFINE_FREE_PT_FN(l6, free_pt_l5)
1659
1660static void free_pagetable(struct protection_domain *domain)
1661{
1662	unsigned long root = (unsigned long)domain->pt_root;
1663
1664	switch (domain->mode) {
1665	case PAGE_MODE_NONE:
1666		break;
1667	case PAGE_MODE_1_LEVEL:
1668		free_page(root);
1669		break;
1670	case PAGE_MODE_2_LEVEL:
1671		free_pt_l2(root);
1672		break;
1673	case PAGE_MODE_3_LEVEL:
1674		free_pt_l3(root);
1675		break;
1676	case PAGE_MODE_4_LEVEL:
1677		free_pt_l4(root);
1678		break;
1679	case PAGE_MODE_5_LEVEL:
1680		free_pt_l5(root);
1681		break;
1682	case PAGE_MODE_6_LEVEL:
1683		free_pt_l6(root);
1684		break;
1685	default:
1686		BUG();
1687	}
1688}
1689
1690static void free_gcr3_tbl_level1(u64 *tbl)
1691{
1692	u64 *ptr;
1693	int i;
1694
1695	for (i = 0; i < 512; ++i) {
1696		if (!(tbl[i] & GCR3_VALID))
1697			continue;
1698
1699		ptr = __va(tbl[i] & PAGE_MASK);
1700
1701		free_page((unsigned long)ptr);
1702	}
1703}
1704
1705static void free_gcr3_tbl_level2(u64 *tbl)
1706{
1707	u64 *ptr;
1708	int i;
1709
1710	for (i = 0; i < 512; ++i) {
1711		if (!(tbl[i] & GCR3_VALID))
1712			continue;
1713
1714		ptr = __va(tbl[i] & PAGE_MASK);
1715
1716		free_gcr3_tbl_level1(ptr);
1717	}
1718}
1719
1720static void free_gcr3_table(struct protection_domain *domain)
1721{
1722	if (domain->glx == 2)
1723		free_gcr3_tbl_level2(domain->gcr3_tbl);
1724	else if (domain->glx == 1)
1725		free_gcr3_tbl_level1(domain->gcr3_tbl);
1726	else
1727		BUG_ON(domain->glx != 0);
1728
1729	free_page((unsigned long)domain->gcr3_tbl);
1730}
1731
1732/*
1733 * Free a domain, only used if something went wrong in the
1734 * allocation path and we need to free an already allocated page table
1735 */
1736static void dma_ops_domain_free(struct dma_ops_domain *dom)
1737{
 
 
1738	if (!dom)
1739		return;
1740
 
 
1741	del_domain_from_list(&dom->domain);
1742
1743	put_iova_domain(&dom->iovad);
1744
1745	free_pagetable(&dom->domain);
1746
1747	if (dom->domain.id)
1748		domain_id_free(dom->domain.id);
 
 
 
 
1749
1750	kfree(dom);
1751}
1752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1753/*
1754 * Allocates a new protection domain usable for the dma_ops functions.
1755 * It also initializes the page table and the address allocator data
1756 * structures required for the dma_ops interface
1757 */
1758static struct dma_ops_domain *dma_ops_domain_alloc(void)
1759{
1760	struct dma_ops_domain *dma_dom;
 
1761
1762	dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1763	if (!dma_dom)
1764		return NULL;
1765
1766	if (protection_domain_init(&dma_dom->domain))
1767		goto free_dma_dom;
1768
1769	dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
 
 
 
 
1770	dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1771	dma_dom->domain.flags = PD_DMA_OPS_MASK;
 
1772	if (!dma_dom->domain.pt_root)
1773		goto free_dma_dom;
1774
1775	init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
1776			 IOVA_START_PFN, DMA_32BIT_PFN);
 
 
1777
1778	/* Initialize reserved ranges */
1779	copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
 
 
 
1780
1781	add_domain_to_list(&dma_dom->domain);
 
1782
1783	return dma_dom;
1784
1785free_dma_dom:
1786	dma_ops_domain_free(dma_dom);
1787
1788	return NULL;
1789}
1790
1791/*
1792 * little helper function to check whether a given protection domain is a
1793 * dma_ops domain
1794 */
1795static bool dma_ops_domain(struct protection_domain *domain)
1796{
1797	return domain->flags & PD_DMA_OPS_MASK;
1798}
1799
1800static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
1801{
1802	u64 pte_root = 0;
1803	u64 flags = 0;
1804
1805	if (domain->mode != PAGE_MODE_NONE)
1806		pte_root = virt_to_phys(domain->pt_root);
1807
1808	pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1809		    << DEV_ENTRY_MODE_SHIFT;
1810	pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
1811
1812	flags = amd_iommu_dev_table[devid].data[1];
1813
1814	if (ats)
1815		flags |= DTE_FLAG_IOTLB;
1816
1817	if (domain->flags & PD_IOMMUV2_MASK) {
1818		u64 gcr3 = __pa(domain->gcr3_tbl);
1819		u64 glx  = domain->glx;
1820		u64 tmp;
1821
1822		pte_root |= DTE_FLAG_GV;
1823		pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1824
1825		/* First mask out possible old values for GCR3 table */
1826		tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1827		flags    &= ~tmp;
1828
1829		tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1830		flags    &= ~tmp;
1831
1832		/* Encode GCR3 table into DTE */
1833		tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1834		pte_root |= tmp;
1835
1836		tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1837		flags    |= tmp;
1838
1839		tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1840		flags    |= tmp;
1841	}
1842
1843	flags &= ~(0xffffUL);
1844	flags |= domain->id;
1845
1846	amd_iommu_dev_table[devid].data[1]  = flags;
1847	amd_iommu_dev_table[devid].data[0]  = pte_root;
1848}
1849
1850static void clear_dte_entry(u16 devid)
1851{
1852	/* remove entry from the device table seen by the hardware */
1853	amd_iommu_dev_table[devid].data[0]  = IOMMU_PTE_P | IOMMU_PTE_TV;
1854	amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
1855
1856	amd_iommu_apply_erratum_63(devid);
1857}
1858
1859static void do_attach(struct iommu_dev_data *dev_data,
1860		      struct protection_domain *domain)
1861{
1862	struct amd_iommu *iommu;
1863	u16 alias;
1864	bool ats;
1865
1866	iommu = amd_iommu_rlookup_table[dev_data->devid];
1867	alias = dev_data->alias;
1868	ats   = dev_data->ats.enabled;
1869
1870	/* Update data structures */
1871	dev_data->domain = domain;
1872	list_add(&dev_data->list, &domain->dev_list);
1873
1874	/* Do reference counting */
1875	domain->dev_iommu[iommu->index] += 1;
1876	domain->dev_cnt                 += 1;
1877
1878	/* Update device table */
1879	set_dte_entry(dev_data->devid, domain, ats);
1880	if (alias != dev_data->devid)
1881		set_dte_entry(alias, domain, ats);
1882
1883	device_flush_dte(dev_data);
1884}
1885
1886static void do_detach(struct iommu_dev_data *dev_data)
1887{
1888	struct amd_iommu *iommu;
1889	u16 alias;
1890
1891	/*
1892	 * First check if the device is still attached. It might already
1893	 * be detached from its domain because the generic
1894	 * iommu_detach_group code detached it and we try again here in
1895	 * our alias handling.
1896	 */
1897	if (!dev_data->domain)
1898		return;
1899
1900	iommu = amd_iommu_rlookup_table[dev_data->devid];
1901	alias = dev_data->alias;
1902
1903	/* decrease reference counters */
1904	dev_data->domain->dev_iommu[iommu->index] -= 1;
1905	dev_data->domain->dev_cnt                 -= 1;
1906
1907	/* Update data structures */
1908	dev_data->domain = NULL;
1909	list_del(&dev_data->list);
1910	clear_dte_entry(dev_data->devid);
1911	if (alias != dev_data->devid)
1912		clear_dte_entry(alias);
1913
1914	/* Flush the DTE entry */
1915	device_flush_dte(dev_data);
1916}
1917
1918/*
1919 * If a device is not yet associated with a domain, this function does
1920 * assigns it visible for the hardware
1921 */
1922static int __attach_device(struct iommu_dev_data *dev_data,
1923			   struct protection_domain *domain)
1924{
1925	int ret;
1926
1927	/*
1928	 * Must be called with IRQs disabled. Warn here to detect early
1929	 * when its not.
1930	 */
1931	WARN_ON(!irqs_disabled());
1932
1933	/* lock domain */
1934	spin_lock(&domain->lock);
1935
1936	ret = -EBUSY;
1937	if (dev_data->domain != NULL)
1938		goto out_unlock;
1939
1940	/* Attach alias group root */
1941	do_attach(dev_data, domain);
1942
1943	ret = 0;
1944
1945out_unlock:
1946
1947	/* ready */
1948	spin_unlock(&domain->lock);
1949
1950	return ret;
1951}
1952
1953
1954static void pdev_iommuv2_disable(struct pci_dev *pdev)
1955{
1956	pci_disable_ats(pdev);
1957	pci_disable_pri(pdev);
1958	pci_disable_pasid(pdev);
1959}
1960
1961/* FIXME: Change generic reset-function to do the same */
1962static int pri_reset_while_enabled(struct pci_dev *pdev)
1963{
1964	u16 control;
1965	int pos;
1966
1967	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
1968	if (!pos)
1969		return -EINVAL;
1970
1971	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
1972	control |= PCI_PRI_CTRL_RESET;
1973	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
1974
1975	return 0;
1976}
1977
1978static int pdev_iommuv2_enable(struct pci_dev *pdev)
1979{
1980	bool reset_enable;
1981	int reqs, ret;
1982
1983	/* FIXME: Hardcode number of outstanding requests for now */
1984	reqs = 32;
1985	if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
1986		reqs = 1;
1987	reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
1988
1989	/* Only allow access to user-accessible pages */
1990	ret = pci_enable_pasid(pdev, 0);
1991	if (ret)
1992		goto out_err;
1993
1994	/* First reset the PRI state of the device */
1995	ret = pci_reset_pri(pdev);
1996	if (ret)
1997		goto out_err;
1998
1999	/* Enable PRI */
2000	ret = pci_enable_pri(pdev, reqs);
2001	if (ret)
2002		goto out_err;
2003
2004	if (reset_enable) {
2005		ret = pri_reset_while_enabled(pdev);
2006		if (ret)
2007			goto out_err;
2008	}
2009
2010	ret = pci_enable_ats(pdev, PAGE_SHIFT);
2011	if (ret)
2012		goto out_err;
2013
2014	return 0;
2015
2016out_err:
2017	pci_disable_pri(pdev);
2018	pci_disable_pasid(pdev);
2019
2020	return ret;
2021}
2022
2023/* FIXME: Move this to PCI code */
2024#define PCI_PRI_TLP_OFF		(1 << 15)
2025
2026static bool pci_pri_tlp_required(struct pci_dev *pdev)
2027{
2028	u16 status;
2029	int pos;
2030
2031	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2032	if (!pos)
2033		return false;
2034
2035	pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
2036
2037	return (status & PCI_PRI_TLP_OFF) ? true : false;
2038}
2039
2040/*
2041 * If a device is not yet associated with a domain, this function
2042 * assigns it visible for the hardware
2043 */
2044static int attach_device(struct device *dev,
2045			 struct protection_domain *domain)
2046{
2047	struct pci_dev *pdev;
2048	struct iommu_dev_data *dev_data;
2049	unsigned long flags;
2050	int ret;
2051
2052	dev_data = get_dev_data(dev);
2053
2054	if (!dev_is_pci(dev))
2055		goto skip_ats_check;
2056
2057	pdev = to_pci_dev(dev);
2058	if (domain->flags & PD_IOMMUV2_MASK) {
2059		if (!dev_data->passthrough)
2060			return -EINVAL;
2061
2062		if (dev_data->iommu_v2) {
2063			if (pdev_iommuv2_enable(pdev) != 0)
2064				return -EINVAL;
2065
2066			dev_data->ats.enabled = true;
2067			dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2068			dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
2069		}
2070	} else if (amd_iommu_iotlb_sup &&
2071		   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2072		dev_data->ats.enabled = true;
2073		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2074	}
2075
2076skip_ats_check:
2077	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2078	ret = __attach_device(dev_data, domain);
2079	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2080
2081	/*
2082	 * We might boot into a crash-kernel here. The crashed kernel
2083	 * left the caches in the IOMMU dirty. So we have to flush
2084	 * here to evict all dirty stuff.
2085	 */
2086	domain_flush_tlb_pde(domain);
2087
2088	return ret;
2089}
2090
2091/*
2092 * Removes a device from a protection domain (unlocked)
2093 */
2094static void __detach_device(struct iommu_dev_data *dev_data)
2095{
2096	struct protection_domain *domain;
2097
2098	/*
2099	 * Must be called with IRQs disabled. Warn here to detect early
2100	 * when its not.
2101	 */
2102	WARN_ON(!irqs_disabled());
2103
2104	if (WARN_ON(!dev_data->domain))
2105		return;
2106
2107	domain = dev_data->domain;
2108
2109	spin_lock(&domain->lock);
2110
2111	do_detach(dev_data);
2112
2113	spin_unlock(&domain->lock);
2114}
2115
2116/*
2117 * Removes a device from a protection domain (with devtable_lock held)
2118 */
2119static void detach_device(struct device *dev)
2120{
2121	struct protection_domain *domain;
2122	struct iommu_dev_data *dev_data;
2123	unsigned long flags;
2124
2125	dev_data = get_dev_data(dev);
2126	domain   = dev_data->domain;
2127
2128	/* lock device table */
2129	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2130	__detach_device(dev_data);
2131	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2132
2133	if (!dev_is_pci(dev))
2134		return;
2135
2136	if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2137		pdev_iommuv2_disable(to_pci_dev(dev));
2138	else if (dev_data->ats.enabled)
2139		pci_disable_ats(to_pci_dev(dev));
2140
2141	dev_data->ats.enabled = false;
2142}
2143
2144static int amd_iommu_add_device(struct device *dev)
2145{
2146	struct iommu_dev_data *dev_data;
2147	struct iommu_domain *domain;
2148	struct amd_iommu *iommu;
2149	int ret, devid;
 
2150
2151	if (!check_device(dev) || get_dev_data(dev))
2152		return 0;
2153
2154	devid = get_device_id(dev);
2155	if (devid < 0)
2156		return devid;
2157
2158	iommu = amd_iommu_rlookup_table[devid];
2159
2160	ret = iommu_init_device(dev);
2161	if (ret) {
2162		if (ret != -ENOTSUPP)
2163			pr_err("Failed to initialize device %s - trying to proceed anyway\n",
2164				dev_name(dev));
2165
2166		iommu_ignore_device(dev);
2167		dev->archdata.dma_ops = &nommu_dma_ops;
2168		goto out;
2169	}
2170	init_iommu_group(dev);
2171
2172	dev_data = get_dev_data(dev);
2173
2174	BUG_ON(!dev_data);
2175
2176	if (iommu_pass_through || dev_data->iommu_v2)
2177		iommu_request_dm_for_dev(dev);
2178
2179	/* Domains are initialized for this device - have a look what we ended up with */
2180	domain = iommu_get_domain_for_dev(dev);
2181	if (domain->type == IOMMU_DOMAIN_IDENTITY)
2182		dev_data->passthrough = true;
2183	else
2184		dev->archdata.dma_ops = &amd_iommu_dma_ops;
2185
2186out:
2187	iommu_completion_wait(iommu);
2188
2189	return 0;
2190}
2191
2192static void amd_iommu_remove_device(struct device *dev)
2193{
2194	struct amd_iommu *iommu;
2195	int devid;
2196
2197	if (!check_device(dev))
2198		return;
2199
2200	devid = get_device_id(dev);
2201	if (devid < 0)
2202		return;
2203
2204	iommu = amd_iommu_rlookup_table[devid];
2205
2206	iommu_uninit_device(dev);
2207	iommu_completion_wait(iommu);
2208}
2209
2210static struct iommu_group *amd_iommu_device_group(struct device *dev)
2211{
2212	if (dev_is_pci(dev))
2213		return pci_device_group(dev);
2214
2215	return acpihid_device_group(dev);
2216}
2217
2218/*****************************************************************************
2219 *
2220 * The next functions belong to the dma_ops mapping/unmapping code.
2221 *
2222 *****************************************************************************/
2223
2224static void __queue_flush(struct flush_queue *queue)
2225{
2226	struct protection_domain *domain;
2227	unsigned long flags;
2228	int idx;
2229
2230	/* First flush TLB of all known domains */
2231	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
2232	list_for_each_entry(domain, &amd_iommu_pd_list, list)
2233		domain_flush_tlb(domain);
2234	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
2235
2236	/* Wait until flushes have completed */
2237	domain_flush_complete(NULL);
2238
2239	for (idx = 0; idx < queue->next; ++idx) {
2240		struct flush_queue_entry *entry;
2241
2242		entry = queue->entries + idx;
2243
2244		free_iova_fast(&entry->dma_dom->iovad,
2245				entry->iova_pfn,
2246				entry->pages);
2247
2248		/* Not really necessary, just to make sure we catch any bugs */
2249		entry->dma_dom = NULL;
2250	}
2251
2252	queue->next = 0;
2253}
2254
2255static void queue_flush_all(void)
2256{
2257	int cpu;
2258
2259	for_each_possible_cpu(cpu) {
2260		struct flush_queue *queue;
2261		unsigned long flags;
2262
2263		queue = per_cpu_ptr(&flush_queue, cpu);
2264		spin_lock_irqsave(&queue->lock, flags);
2265		if (queue->next > 0)
2266			__queue_flush(queue);
2267		spin_unlock_irqrestore(&queue->lock, flags);
2268	}
2269}
2270
2271static void queue_flush_timeout(unsigned long unsused)
2272{
2273	atomic_set(&queue_timer_on, 0);
2274	queue_flush_all();
2275}
2276
2277static void queue_add(struct dma_ops_domain *dma_dom,
2278		      unsigned long address, unsigned long pages)
2279{
2280	struct flush_queue_entry *entry;
2281	struct flush_queue *queue;
2282	unsigned long flags;
2283	int idx;
2284
2285	pages     = __roundup_pow_of_two(pages);
2286	address >>= PAGE_SHIFT;
2287
2288	queue = get_cpu_ptr(&flush_queue);
2289	spin_lock_irqsave(&queue->lock, flags);
2290
2291	if (queue->next == FLUSH_QUEUE_SIZE)
2292		__queue_flush(queue);
2293
2294	idx   = queue->next++;
2295	entry = queue->entries + idx;
2296
2297	entry->iova_pfn = address;
2298	entry->pages    = pages;
2299	entry->dma_dom  = dma_dom;
2300
2301	spin_unlock_irqrestore(&queue->lock, flags);
2302
2303	if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0)
2304		mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10));
2305
2306	put_cpu_ptr(&flush_queue);
2307}
2308
2309
2310/*
2311 * In the dma_ops path we only have the struct device. This function
2312 * finds the corresponding IOMMU, the protection domain and the
2313 * requestor id for a given device.
2314 * If the device is not yet associated with a domain this is also done
2315 * in this function.
2316 */
2317static struct protection_domain *get_domain(struct device *dev)
2318{
2319	struct protection_domain *domain;
 
2320
2321	if (!check_device(dev))
2322		return ERR_PTR(-EINVAL);
2323
2324	domain = get_dev_data(dev)->domain;
 
 
 
 
2325	if (!dma_ops_domain(domain))
2326		return ERR_PTR(-EBUSY);
2327
2328	return domain;
2329}
2330
2331static void update_device_table(struct protection_domain *domain)
2332{
2333	struct iommu_dev_data *dev_data;
2334
2335	list_for_each_entry(dev_data, &domain->dev_list, list) {
2336		set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
2337
2338		if (dev_data->devid == dev_data->alias)
2339			continue;
2340
2341		/* There is an alias, update device table entry for it */
2342		set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
2343	}
2344}
2345
2346static void update_domain(struct protection_domain *domain)
2347{
2348	if (!domain->updated)
2349		return;
2350
2351	update_device_table(domain);
2352
2353	domain_flush_devices(domain);
2354	domain_flush_tlb_pde(domain);
2355
2356	domain->updated = false;
2357}
2358
2359static int dir2prot(enum dma_data_direction direction)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2360{
 
 
 
 
 
 
 
 
 
 
 
 
2361	if (direction == DMA_TO_DEVICE)
2362		return IOMMU_PROT_IR;
2363	else if (direction == DMA_FROM_DEVICE)
2364		return IOMMU_PROT_IW;
2365	else if (direction == DMA_BIDIRECTIONAL)
2366		return IOMMU_PROT_IW | IOMMU_PROT_IR;
2367	else
2368		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2369}
 
2370/*
2371 * This function contains common code for mapping of a physically
2372 * contiguous memory region into DMA address space. It is used by all
2373 * mapping functions provided with this IOMMU driver.
2374 * Must be called with the domain lock held.
2375 */
2376static dma_addr_t __map_single(struct device *dev,
2377			       struct dma_ops_domain *dma_dom,
2378			       phys_addr_t paddr,
2379			       size_t size,
2380			       enum dma_data_direction direction,
 
2381			       u64 dma_mask)
2382{
2383	dma_addr_t offset = paddr & ~PAGE_MASK;
2384	dma_addr_t address, start, ret;
2385	unsigned int pages;
2386	int prot = 0;
2387	int i;
2388
2389	pages = iommu_num_pages(paddr, size, PAGE_SIZE);
2390	paddr &= PAGE_MASK;
2391
2392	address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
 
 
 
 
 
 
 
 
 
 
2393	if (address == DMA_ERROR_CODE)
2394		goto out;
2395
2396	prot = dir2prot(direction);
2397
2398	start = address;
2399	for (i = 0; i < pages; ++i) {
2400		ret = iommu_map_page(&dma_dom->domain, start, paddr,
2401				     PAGE_SIZE, prot, GFP_ATOMIC);
2402		if (ret)
2403			goto out_unmap;
2404
2405		paddr += PAGE_SIZE;
2406		start += PAGE_SIZE;
2407	}
2408	address += offset;
2409
 
 
2410	if (unlikely(amd_iommu_np_cache)) {
2411		domain_flush_pages(&dma_dom->domain, address, size);
2412		domain_flush_complete(&dma_dom->domain);
2413	}
2414
2415out:
2416	return address;
2417
2418out_unmap:
2419
2420	for (--i; i >= 0; --i) {
2421		start -= PAGE_SIZE;
2422		iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
2423	}
2424
2425	domain_flush_tlb(&dma_dom->domain);
2426	domain_flush_complete(&dma_dom->domain);
2427
2428	dma_ops_free_iova(dma_dom, address, pages);
2429
2430	return DMA_ERROR_CODE;
2431}
2432
2433/*
2434 * Does the reverse of the __map_single function. Must be called with
2435 * the domain lock held too
2436 */
2437static void __unmap_single(struct dma_ops_domain *dma_dom,
2438			   dma_addr_t dma_addr,
2439			   size_t size,
2440			   int dir)
2441{
2442	dma_addr_t flush_addr;
2443	dma_addr_t i, start;
2444	unsigned int pages;
2445
 
 
 
 
2446	flush_addr = dma_addr;
2447	pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
2448	dma_addr &= PAGE_MASK;
2449	start = dma_addr;
2450
2451	for (i = 0; i < pages; ++i) {
2452		iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
2453		start += PAGE_SIZE;
2454	}
2455
2456	if (amd_iommu_unmap_flush) {
2457		dma_ops_free_iova(dma_dom, dma_addr, pages);
2458		domain_flush_tlb(&dma_dom->domain);
2459		domain_flush_complete(&dma_dom->domain);
2460	} else {
2461		queue_add(dma_dom, dma_addr, pages);
2462	}
2463}
2464
2465/*
2466 * The exported map_single function for dma_ops.
2467 */
2468static dma_addr_t map_page(struct device *dev, struct page *page,
2469			   unsigned long offset, size_t size,
2470			   enum dma_data_direction dir,
2471			   unsigned long attrs)
2472{
2473	phys_addr_t paddr = page_to_phys(page) + offset;
2474	struct protection_domain *domain;
2475	struct dma_ops_domain *dma_dom;
2476	u64 dma_mask;
2477
 
 
2478	domain = get_domain(dev);
2479	if (PTR_ERR(domain) == -EINVAL)
2480		return (dma_addr_t)paddr;
2481	else if (IS_ERR(domain))
2482		return DMA_ERROR_CODE;
2483
2484	dma_mask = *dev->dma_mask;
2485	dma_dom = to_dma_ops_domain(domain);
2486
2487	return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
 
2488}
2489
2490/*
2491 * The exported unmap_single function for dma_ops.
2492 */
2493static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2494		       enum dma_data_direction dir, unsigned long attrs)
2495{
2496	struct protection_domain *domain;
2497	struct dma_ops_domain *dma_dom;
 
2498
2499	domain = get_domain(dev);
2500	if (IS_ERR(domain))
2501		return;
2502
2503	dma_dom = to_dma_ops_domain(domain);
2504
2505	__unmap_single(dma_dom, dma_addr, size, dir);
2506}
2507
2508static int sg_num_pages(struct device *dev,
2509			struct scatterlist *sglist,
2510			int nelems)
2511{
2512	unsigned long mask, boundary_size;
2513	struct scatterlist *s;
2514	int i, npages = 0;
2515
2516	mask          = dma_get_seg_boundary(dev);
2517	boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
2518				   1UL << (BITS_PER_LONG - PAGE_SHIFT);
2519
2520	for_each_sg(sglist, s, nelems, i) {
2521		int p, n;
2522
2523		s->dma_address = npages << PAGE_SHIFT;
2524		p = npages % boundary_size;
2525		n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
2526		if (p + n > boundary_size)
2527			npages += boundary_size - p;
2528		npages += n;
2529	}
2530
2531	return npages;
2532}
2533
2534/*
2535 * The exported map_sg function for dma_ops (handles scatter-gather
2536 * lists).
2537 */
2538static int map_sg(struct device *dev, struct scatterlist *sglist,
2539		  int nelems, enum dma_data_direction direction,
2540		  unsigned long attrs)
2541{
2542	int mapped_pages = 0, npages = 0, prot = 0, i;
2543	struct protection_domain *domain;
2544	struct dma_ops_domain *dma_dom;
2545	struct scatterlist *s;
2546	unsigned long address;
 
2547	u64 dma_mask;
2548
 
 
2549	domain = get_domain(dev);
2550	if (IS_ERR(domain))
2551		return 0;
2552
2553	dma_dom  = to_dma_ops_domain(domain);
2554	dma_mask = *dev->dma_mask;
2555
2556	npages = sg_num_pages(dev, sglist, nelems);
2557
2558	address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
2559	if (address == DMA_ERROR_CODE)
2560		goto out_err;
2561
2562	prot = dir2prot(direction);
2563
2564	/* Map all sg entries */
2565	for_each_sg(sglist, s, nelems, i) {
2566		int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
2567
2568		for (j = 0; j < pages; ++j) {
2569			unsigned long bus_addr, phys_addr;
2570			int ret;
2571
2572			bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
2573			phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
2574			ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
2575			if (ret)
2576				goto out_unmap;
2577
2578			mapped_pages += 1;
2579		}
2580	}
2581
2582	/* Everything is mapped - write the right values into s->dma_address */
2583	for_each_sg(sglist, s, nelems, i) {
2584		s->dma_address += address + s->offset;
2585		s->dma_length   = s->length;
2586	}
2587
2588	return nelems;
2589
2590out_unmap:
2591	pr_err("%s: IOMMU mapping error in map_sg (io-pages: %d)\n",
2592	       dev_name(dev), npages);
2593
2594	for_each_sg(sglist, s, nelems, i) {
2595		int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
2596
2597		for (j = 0; j < pages; ++j) {
2598			unsigned long bus_addr;
2599
2600			bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
2601			iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
2602
2603			if (--mapped_pages)
2604				goto out_free_iova;
2605		}
2606	}
2607
2608out_free_iova:
2609	free_iova_fast(&dma_dom->iovad, address, npages);
2610
2611out_err:
2612	return 0;
2613}
2614
2615/*
2616 * The exported map_sg function for dma_ops (handles scatter-gather
2617 * lists).
2618 */
2619static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2620		     int nelems, enum dma_data_direction dir,
2621		     unsigned long attrs)
2622{
2623	struct protection_domain *domain;
2624	struct dma_ops_domain *dma_dom;
2625	unsigned long startaddr;
2626	int npages = 2;
 
2627
2628	domain = get_domain(dev);
2629	if (IS_ERR(domain))
2630		return;
2631
2632	startaddr = sg_dma_address(sglist) & PAGE_MASK;
2633	dma_dom   = to_dma_ops_domain(domain);
2634	npages    = sg_num_pages(dev, sglist, nelems);
2635
2636	__unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
2637}
2638
2639/*
2640 * The exported alloc_coherent function for dma_ops.
2641 */
2642static void *alloc_coherent(struct device *dev, size_t size,
2643			    dma_addr_t *dma_addr, gfp_t flag,
2644			    unsigned long attrs)
2645{
2646	u64 dma_mask = dev->coherent_dma_mask;
2647	struct protection_domain *domain;
2648	struct dma_ops_domain *dma_dom;
2649	struct page *page;
2650
 
 
2651	domain = get_domain(dev);
2652	if (PTR_ERR(domain) == -EINVAL) {
2653		page = alloc_pages(flag, get_order(size));
2654		*dma_addr = page_to_phys(page);
2655		return page_address(page);
2656	} else if (IS_ERR(domain))
2657		return NULL;
2658
2659	dma_dom   = to_dma_ops_domain(domain);
2660	size	  = PAGE_ALIGN(size);
2661	dma_mask  = dev->coherent_dma_mask;
2662	flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2663	flag     |= __GFP_ZERO;
2664
2665	page = alloc_pages(flag | __GFP_NOWARN,  get_order(size));
2666	if (!page) {
2667		if (!gfpflags_allow_blocking(flag))
2668			return NULL;
2669
2670		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
2671						 get_order(size));
2672		if (!page)
2673			return NULL;
2674	}
2675
2676	if (!dma_mask)
2677		dma_mask = *dev->dma_mask;
2678
2679	*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
2680				 size, DMA_BIDIRECTIONAL, dma_mask);
2681
2682	if (*dma_addr == DMA_ERROR_CODE)
2683		goto out_free;
2684
2685	return page_address(page);
2686
2687out_free:
2688
2689	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2690		__free_pages(page, get_order(size));
2691
2692	return NULL;
2693}
2694
2695/*
2696 * The exported free_coherent function for dma_ops.
2697 */
2698static void free_coherent(struct device *dev, size_t size,
2699			  void *virt_addr, dma_addr_t dma_addr,
2700			  unsigned long attrs)
2701{
2702	struct protection_domain *domain;
2703	struct dma_ops_domain *dma_dom;
2704	struct page *page;
2705
 
 
2706	page = virt_to_page(virt_addr);
2707	size = PAGE_ALIGN(size);
2708
2709	domain = get_domain(dev);
2710	if (IS_ERR(domain))
2711		goto free_mem;
2712
2713	dma_dom = to_dma_ops_domain(domain);
2714
2715	__unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
2716
2717free_mem:
2718	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2719		__free_pages(page, get_order(size));
2720}
2721
2722/*
2723 * This function is called by the DMA layer to find out if we can handle a
2724 * particular device. It is part of the dma_ops.
2725 */
2726static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2727{
2728	return check_device(dev);
2729}
2730
2731static struct dma_map_ops amd_iommu_dma_ops = {
2732	.alloc		= alloc_coherent,
2733	.free		= free_coherent,
2734	.map_page	= map_page,
2735	.unmap_page	= unmap_page,
2736	.map_sg		= map_sg,
2737	.unmap_sg	= unmap_sg,
2738	.dma_supported	= amd_iommu_dma_supported,
2739};
2740
2741static int init_reserved_iova_ranges(void)
2742{
2743	struct pci_dev *pdev = NULL;
2744	struct iova *val;
2745
2746	init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
2747			 IOVA_START_PFN, DMA_32BIT_PFN);
 
2748
2749	lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
2750			  &reserved_rbtree_key);
2751
2752	/* MSI memory range */
2753	val = reserve_iova(&reserved_iova_ranges,
2754			   IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
2755	if (!val) {
2756		pr_err("Reserving MSI range failed\n");
2757		return -ENOMEM;
2758	}
2759
2760	/* HT memory range */
2761	val = reserve_iova(&reserved_iova_ranges,
2762			   IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
2763	if (!val) {
2764		pr_err("Reserving HT range failed\n");
2765		return -ENOMEM;
2766	}
2767
2768	/*
2769	 * Memory used for PCI resources
2770	 * FIXME: Check whether we can reserve the PCI-hole completly
2771	 */
2772	for_each_pci_dev(pdev) {
2773		int i;
2774
2775		for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
2776			struct resource *r = &pdev->resource[i];
2777
2778			if (!(r->flags & IORESOURCE_MEM))
2779				continue;
2780
2781			val = reserve_iova(&reserved_iova_ranges,
2782					   IOVA_PFN(r->start),
2783					   IOVA_PFN(r->end));
2784			if (!val) {
2785				pr_err("Reserve pci-resource range failed\n");
2786				return -ENOMEM;
2787			}
2788		}
2789	}
2790
2791	return 0;
2792}
2793
 
 
 
 
 
 
 
 
 
 
 
2794int __init amd_iommu_init_api(void)
2795{
2796	int ret, cpu, err = 0;
2797
2798	ret = iova_cache_get();
2799	if (ret)
2800		return ret;
2801
2802	ret = init_reserved_iova_ranges();
2803	if (ret)
2804		return ret;
2805
2806	for_each_possible_cpu(cpu) {
2807		struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
2808
2809		queue->entries = kzalloc(FLUSH_QUEUE_SIZE *
2810					 sizeof(*queue->entries),
2811					 GFP_KERNEL);
2812		if (!queue->entries)
2813			goto out_put_iova;
2814
2815		spin_lock_init(&queue->lock);
2816	}
2817
2818	err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
2819	if (err)
2820		return err;
2821#ifdef CONFIG_ARM_AMBA
2822	err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
2823	if (err)
2824		return err;
2825#endif
2826	err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
2827	if (err)
2828		return err;
2829	return 0;
2830
2831out_put_iova:
2832	for_each_possible_cpu(cpu) {
2833		struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
2834
2835		kfree(queue->entries);
2836	}
2837
2838	return -ENOMEM;
2839}
2840
2841int __init amd_iommu_init_dma_ops(void)
2842{
2843	setup_timer(&queue_timer, queue_flush_timeout, 0);
2844	atomic_set(&queue_timer_on, 0);
2845
2846	swiotlb        = iommu_pass_through ? 1 : 0;
2847	iommu_detected = 1;
2848
2849	/*
2850	 * In case we don't initialize SWIOTLB (actually the common case
2851	 * when AMD IOMMU is enabled), make sure there are global
2852	 * dma_ops set as a fall-back for devices not handled by this
2853	 * driver (for example non-PCI devices).
2854	 */
2855	if (!swiotlb)
2856		dma_ops = &nommu_dma_ops;
2857
 
 
2858	if (amd_iommu_unmap_flush)
2859		pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
2860	else
2861		pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
2862
2863	return 0;
2864
2865}
2866
2867/*****************************************************************************
2868 *
2869 * The following functions belong to the exported interface of AMD IOMMU
2870 *
2871 * This interface allows access to lower level functions of the IOMMU
2872 * like protection domain handling and assignement of devices to domains
2873 * which is not possible with the dma_ops interface.
2874 *
2875 *****************************************************************************/
2876
2877static void cleanup_domain(struct protection_domain *domain)
2878{
2879	struct iommu_dev_data *entry;
2880	unsigned long flags;
2881
2882	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2883
2884	while (!list_empty(&domain->dev_list)) {
2885		entry = list_first_entry(&domain->dev_list,
2886					 struct iommu_dev_data, list);
2887		__detach_device(entry);
2888	}
2889
2890	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2891}
2892
2893static void protection_domain_free(struct protection_domain *domain)
2894{
2895	if (!domain)
2896		return;
2897
2898	del_domain_from_list(domain);
2899
2900	if (domain->id)
2901		domain_id_free(domain->id);
2902
2903	kfree(domain);
2904}
2905
2906static int protection_domain_init(struct protection_domain *domain)
2907{
2908	spin_lock_init(&domain->lock);
2909	mutex_init(&domain->api_lock);
2910	domain->id = domain_id_alloc();
2911	if (!domain->id)
2912		return -ENOMEM;
2913	INIT_LIST_HEAD(&domain->dev_list);
2914
2915	return 0;
2916}
2917
2918static struct protection_domain *protection_domain_alloc(void)
2919{
2920	struct protection_domain *domain;
2921
2922	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2923	if (!domain)
2924		return NULL;
2925
2926	if (protection_domain_init(domain))
2927		goto out_err;
2928
2929	add_domain_to_list(domain);
2930
2931	return domain;
2932
2933out_err:
2934	kfree(domain);
2935
2936	return NULL;
2937}
2938
2939static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2940{
2941	struct protection_domain *pdomain;
2942	struct dma_ops_domain *dma_domain;
2943
2944	switch (type) {
2945	case IOMMU_DOMAIN_UNMANAGED:
2946		pdomain = protection_domain_alloc();
2947		if (!pdomain)
2948			return NULL;
2949
2950		pdomain->mode    = PAGE_MODE_3_LEVEL;
2951		pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2952		if (!pdomain->pt_root) {
2953			protection_domain_free(pdomain);
2954			return NULL;
2955		}
2956
2957		pdomain->domain.geometry.aperture_start = 0;
2958		pdomain->domain.geometry.aperture_end   = ~0ULL;
2959		pdomain->domain.geometry.force_aperture = true;
2960
2961		break;
2962	case IOMMU_DOMAIN_DMA:
2963		dma_domain = dma_ops_domain_alloc();
2964		if (!dma_domain) {
2965			pr_err("AMD-Vi: Failed to allocate\n");
2966			return NULL;
2967		}
2968		pdomain = &dma_domain->domain;
2969		break;
2970	case IOMMU_DOMAIN_IDENTITY:
2971		pdomain = protection_domain_alloc();
2972		if (!pdomain)
2973			return NULL;
2974
2975		pdomain->mode = PAGE_MODE_NONE;
2976		break;
2977	default:
2978		return NULL;
2979	}
2980
2981	return &pdomain->domain;
2982}
2983
2984static void amd_iommu_domain_free(struct iommu_domain *dom)
2985{
2986	struct protection_domain *domain;
2987	struct dma_ops_domain *dma_dom;
 
 
2988
2989	domain = to_pdomain(dom);
2990
2991	if (domain->dev_cnt > 0)
2992		cleanup_domain(domain);
2993
2994	BUG_ON(domain->dev_cnt != 0);
2995
2996	if (!dom)
2997		return;
2998
2999	switch (dom->type) {
3000	case IOMMU_DOMAIN_DMA:
3001		/*
3002		 * First make sure the domain is no longer referenced from the
3003		 * flush queue
3004		 */
3005		queue_flush_all();
3006
3007		/* Now release the domain */
3008		dma_dom = to_dma_ops_domain(domain);
3009		dma_ops_domain_free(dma_dom);
3010		break;
3011	default:
3012		if (domain->mode != PAGE_MODE_NONE)
3013			free_pagetable(domain);
3014
3015		if (domain->flags & PD_IOMMUV2_MASK)
3016			free_gcr3_table(domain);
3017
3018		protection_domain_free(domain);
3019		break;
3020	}
3021}
3022
3023static void amd_iommu_detach_device(struct iommu_domain *dom,
3024				    struct device *dev)
3025{
3026	struct iommu_dev_data *dev_data = dev->archdata.iommu;
3027	struct amd_iommu *iommu;
3028	int devid;
3029
3030	if (!check_device(dev))
3031		return;
3032
3033	devid = get_device_id(dev);
3034	if (devid < 0)
3035		return;
3036
3037	if (dev_data->domain != NULL)
3038		detach_device(dev);
3039
3040	iommu = amd_iommu_rlookup_table[devid];
3041	if (!iommu)
3042		return;
3043
3044#ifdef CONFIG_IRQ_REMAP
3045	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
3046	    (dom->type == IOMMU_DOMAIN_UNMANAGED))
3047		dev_data->use_vapic = 0;
3048#endif
3049
3050	iommu_completion_wait(iommu);
3051}
3052
3053static int amd_iommu_attach_device(struct iommu_domain *dom,
3054				   struct device *dev)
3055{
3056	struct protection_domain *domain = to_pdomain(dom);
3057	struct iommu_dev_data *dev_data;
3058	struct amd_iommu *iommu;
3059	int ret;
3060
3061	if (!check_device(dev))
3062		return -EINVAL;
3063
3064	dev_data = dev->archdata.iommu;
3065
3066	iommu = amd_iommu_rlookup_table[dev_data->devid];
3067	if (!iommu)
3068		return -EINVAL;
3069
3070	if (dev_data->domain)
3071		detach_device(dev);
3072
3073	ret = attach_device(dev, domain);
3074
3075#ifdef CONFIG_IRQ_REMAP
3076	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
3077		if (dom->type == IOMMU_DOMAIN_UNMANAGED)
3078			dev_data->use_vapic = 1;
3079		else
3080			dev_data->use_vapic = 0;
3081	}
3082#endif
3083
3084	iommu_completion_wait(iommu);
3085
3086	return ret;
3087}
3088
3089static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3090			 phys_addr_t paddr, size_t page_size, int iommu_prot)
3091{
3092	struct protection_domain *domain = to_pdomain(dom);
3093	int prot = 0;
3094	int ret;
3095
3096	if (domain->mode == PAGE_MODE_NONE)
3097		return -EINVAL;
3098
3099	if (iommu_prot & IOMMU_READ)
3100		prot |= IOMMU_PROT_IR;
3101	if (iommu_prot & IOMMU_WRITE)
3102		prot |= IOMMU_PROT_IW;
3103
3104	mutex_lock(&domain->api_lock);
3105	ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
3106	mutex_unlock(&domain->api_lock);
3107
3108	return ret;
3109}
3110
3111static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3112			   size_t page_size)
3113{
3114	struct protection_domain *domain = to_pdomain(dom);
3115	size_t unmap_size;
3116
3117	if (domain->mode == PAGE_MODE_NONE)
3118		return -EINVAL;
3119
3120	mutex_lock(&domain->api_lock);
3121	unmap_size = iommu_unmap_page(domain, iova, page_size);
3122	mutex_unlock(&domain->api_lock);
3123
3124	domain_flush_tlb_pde(domain);
3125
3126	return unmap_size;
3127}
3128
3129static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3130					  dma_addr_t iova)
3131{
3132	struct protection_domain *domain = to_pdomain(dom);
3133	unsigned long offset_mask, pte_pgsize;
3134	u64 *pte, __pte;
3135
3136	if (domain->mode == PAGE_MODE_NONE)
3137		return iova;
3138
3139	pte = fetch_pte(domain, iova, &pte_pgsize);
3140
3141	if (!pte || !IOMMU_PTE_PRESENT(*pte))
3142		return 0;
3143
3144	offset_mask = pte_pgsize - 1;
3145	__pte	    = *pte & PM_ADDR_MASK;
3146
3147	return (__pte & ~offset_mask) | (iova & offset_mask);
3148}
3149
3150static bool amd_iommu_capable(enum iommu_cap cap)
3151{
3152	switch (cap) {
3153	case IOMMU_CAP_CACHE_COHERENCY:
3154		return true;
3155	case IOMMU_CAP_INTR_REMAP:
3156		return (irq_remapping_enabled == 1);
3157	case IOMMU_CAP_NOEXEC:
3158		return false;
3159	}
3160
3161	return false;
3162}
3163
3164static void amd_iommu_get_dm_regions(struct device *dev,
3165				     struct list_head *head)
3166{
3167	struct unity_map_entry *entry;
3168	int devid;
3169
3170	devid = get_device_id(dev);
3171	if (devid < 0)
3172		return;
3173
3174	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3175		struct iommu_dm_region *region;
3176
3177		if (devid < entry->devid_start || devid > entry->devid_end)
3178			continue;
3179
3180		region = kzalloc(sizeof(*region), GFP_KERNEL);
3181		if (!region) {
3182			pr_err("Out of memory allocating dm-regions for %s\n",
3183				dev_name(dev));
3184			return;
3185		}
3186
3187		region->start = entry->address_start;
3188		region->length = entry->address_end - entry->address_start;
3189		if (entry->prot & IOMMU_PROT_IR)
3190			region->prot |= IOMMU_READ;
3191		if (entry->prot & IOMMU_PROT_IW)
3192			region->prot |= IOMMU_WRITE;
3193
3194		list_add_tail(&region->list, head);
3195	}
3196}
3197
3198static void amd_iommu_put_dm_regions(struct device *dev,
3199				     struct list_head *head)
3200{
3201	struct iommu_dm_region *entry, *next;
3202
3203	list_for_each_entry_safe(entry, next, head, list)
3204		kfree(entry);
3205}
3206
3207static void amd_iommu_apply_dm_region(struct device *dev,
3208				      struct iommu_domain *domain,
3209				      struct iommu_dm_region *region)
3210{
3211	struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
3212	unsigned long start, end;
3213
3214	start = IOVA_PFN(region->start);
3215	end   = IOVA_PFN(region->start + region->length);
3216
3217	WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
3218}
3219
3220static const struct iommu_ops amd_iommu_ops = {
3221	.capable = amd_iommu_capable,
3222	.domain_alloc = amd_iommu_domain_alloc,
3223	.domain_free  = amd_iommu_domain_free,
3224	.attach_dev = amd_iommu_attach_device,
3225	.detach_dev = amd_iommu_detach_device,
3226	.map = amd_iommu_map,
3227	.unmap = amd_iommu_unmap,
3228	.map_sg = default_iommu_map_sg,
3229	.iova_to_phys = amd_iommu_iova_to_phys,
3230	.add_device = amd_iommu_add_device,
3231	.remove_device = amd_iommu_remove_device,
3232	.device_group = amd_iommu_device_group,
3233	.get_dm_regions = amd_iommu_get_dm_regions,
3234	.put_dm_regions = amd_iommu_put_dm_regions,
3235	.apply_dm_region = amd_iommu_apply_dm_region,
3236	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
3237};
3238
3239/*****************************************************************************
3240 *
3241 * The next functions do a basic initialization of IOMMU for pass through
3242 * mode
3243 *
3244 * In passthrough mode the IOMMU is initialized and enabled but not used for
3245 * DMA-API translation.
3246 *
3247 *****************************************************************************/
3248
3249/* IOMMUv2 specific functions */
3250int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3251{
3252	return atomic_notifier_chain_register(&ppr_notifier, nb);
3253}
3254EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3255
3256int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3257{
3258	return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3259}
3260EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
3261
3262void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3263{
3264	struct protection_domain *domain = to_pdomain(dom);
3265	unsigned long flags;
3266
3267	spin_lock_irqsave(&domain->lock, flags);
3268
3269	/* Update data structure */
3270	domain->mode    = PAGE_MODE_NONE;
3271	domain->updated = true;
3272
3273	/* Make changes visible to IOMMUs */
3274	update_domain(domain);
3275
3276	/* Page-table is not visible to IOMMU anymore, so free it */
3277	free_pagetable(domain);
3278
3279	spin_unlock_irqrestore(&domain->lock, flags);
3280}
3281EXPORT_SYMBOL(amd_iommu_domain_direct_map);
3282
3283int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3284{
3285	struct protection_domain *domain = to_pdomain(dom);
3286	unsigned long flags;
3287	int levels, ret;
3288
3289	if (pasids <= 0 || pasids > (PASID_MASK + 1))
3290		return -EINVAL;
3291
3292	/* Number of GCR3 table levels required */
3293	for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3294		levels += 1;
3295
3296	if (levels > amd_iommu_max_glx_val)
3297		return -EINVAL;
3298
3299	spin_lock_irqsave(&domain->lock, flags);
3300
3301	/*
3302	 * Save us all sanity checks whether devices already in the
3303	 * domain support IOMMUv2. Just force that the domain has no
3304	 * devices attached when it is switched into IOMMUv2 mode.
3305	 */
3306	ret = -EBUSY;
3307	if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3308		goto out;
3309
3310	ret = -ENOMEM;
3311	domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3312	if (domain->gcr3_tbl == NULL)
3313		goto out;
3314
3315	domain->glx      = levels;
3316	domain->flags   |= PD_IOMMUV2_MASK;
3317	domain->updated  = true;
3318
3319	update_domain(domain);
3320
3321	ret = 0;
3322
3323out:
3324	spin_unlock_irqrestore(&domain->lock, flags);
3325
3326	return ret;
3327}
3328EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
3329
3330static int __flush_pasid(struct protection_domain *domain, int pasid,
3331			 u64 address, bool size)
3332{
3333	struct iommu_dev_data *dev_data;
3334	struct iommu_cmd cmd;
3335	int i, ret;
3336
3337	if (!(domain->flags & PD_IOMMUV2_MASK))
3338		return -EINVAL;
3339
3340	build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3341
3342	/*
3343	 * IOMMU TLB needs to be flushed before Device TLB to
3344	 * prevent device TLB refill from IOMMU TLB
3345	 */
3346	for (i = 0; i < amd_iommus_present; ++i) {
3347		if (domain->dev_iommu[i] == 0)
3348			continue;
3349
3350		ret = iommu_queue_command(amd_iommus[i], &cmd);
3351		if (ret != 0)
3352			goto out;
3353	}
3354
3355	/* Wait until IOMMU TLB flushes are complete */
3356	domain_flush_complete(domain);
3357
3358	/* Now flush device TLBs */
3359	list_for_each_entry(dev_data, &domain->dev_list, list) {
3360		struct amd_iommu *iommu;
3361		int qdep;
3362
3363		/*
3364		   There might be non-IOMMUv2 capable devices in an IOMMUv2
3365		 * domain.
3366		 */
3367		if (!dev_data->ats.enabled)
3368			continue;
3369
3370		qdep  = dev_data->ats.qdep;
3371		iommu = amd_iommu_rlookup_table[dev_data->devid];
3372
3373		build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3374				      qdep, address, size);
3375
3376		ret = iommu_queue_command(iommu, &cmd);
3377		if (ret != 0)
3378			goto out;
3379	}
3380
3381	/* Wait until all device TLBs are flushed */
3382	domain_flush_complete(domain);
3383
3384	ret = 0;
3385
3386out:
3387
3388	return ret;
3389}
3390
3391static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3392				  u64 address)
3393{
 
 
3394	return __flush_pasid(domain, pasid, address, false);
3395}
3396
3397int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3398			 u64 address)
3399{
3400	struct protection_domain *domain = to_pdomain(dom);
3401	unsigned long flags;
3402	int ret;
3403
3404	spin_lock_irqsave(&domain->lock, flags);
3405	ret = __amd_iommu_flush_page(domain, pasid, address);
3406	spin_unlock_irqrestore(&domain->lock, flags);
3407
3408	return ret;
3409}
3410EXPORT_SYMBOL(amd_iommu_flush_page);
3411
3412static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3413{
 
 
3414	return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3415			     true);
3416}
3417
3418int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3419{
3420	struct protection_domain *domain = to_pdomain(dom);
3421	unsigned long flags;
3422	int ret;
3423
3424	spin_lock_irqsave(&domain->lock, flags);
3425	ret = __amd_iommu_flush_tlb(domain, pasid);
3426	spin_unlock_irqrestore(&domain->lock, flags);
3427
3428	return ret;
3429}
3430EXPORT_SYMBOL(amd_iommu_flush_tlb);
3431
3432static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
3433{
3434	int index;
3435	u64 *pte;
3436
3437	while (true) {
3438
3439		index = (pasid >> (9 * level)) & 0x1ff;
3440		pte   = &root[index];
3441
3442		if (level == 0)
3443			break;
3444
3445		if (!(*pte & GCR3_VALID)) {
3446			if (!alloc)
3447				return NULL;
3448
3449			root = (void *)get_zeroed_page(GFP_ATOMIC);
3450			if (root == NULL)
3451				return NULL;
3452
3453			*pte = __pa(root) | GCR3_VALID;
3454		}
3455
3456		root = __va(*pte & PAGE_MASK);
3457
3458		level -= 1;
3459	}
3460
3461	return pte;
3462}
3463
3464static int __set_gcr3(struct protection_domain *domain, int pasid,
3465		      unsigned long cr3)
3466{
3467	u64 *pte;
3468
3469	if (domain->mode != PAGE_MODE_NONE)
3470		return -EINVAL;
3471
3472	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3473	if (pte == NULL)
3474		return -ENOMEM;
3475
3476	*pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3477
3478	return __amd_iommu_flush_tlb(domain, pasid);
3479}
3480
3481static int __clear_gcr3(struct protection_domain *domain, int pasid)
3482{
3483	u64 *pte;
3484
3485	if (domain->mode != PAGE_MODE_NONE)
3486		return -EINVAL;
3487
3488	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3489	if (pte == NULL)
3490		return 0;
3491
3492	*pte = 0;
3493
3494	return __amd_iommu_flush_tlb(domain, pasid);
3495}
3496
3497int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3498			      unsigned long cr3)
3499{
3500	struct protection_domain *domain = to_pdomain(dom);
3501	unsigned long flags;
3502	int ret;
3503
3504	spin_lock_irqsave(&domain->lock, flags);
3505	ret = __set_gcr3(domain, pasid, cr3);
3506	spin_unlock_irqrestore(&domain->lock, flags);
3507
3508	return ret;
3509}
3510EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3511
3512int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3513{
3514	struct protection_domain *domain = to_pdomain(dom);
3515	unsigned long flags;
3516	int ret;
3517
3518	spin_lock_irqsave(&domain->lock, flags);
3519	ret = __clear_gcr3(domain, pasid);
3520	spin_unlock_irqrestore(&domain->lock, flags);
3521
3522	return ret;
3523}
3524EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
3525
3526int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3527			   int status, int tag)
3528{
3529	struct iommu_dev_data *dev_data;
3530	struct amd_iommu *iommu;
3531	struct iommu_cmd cmd;
3532
 
 
3533	dev_data = get_dev_data(&pdev->dev);
3534	iommu    = amd_iommu_rlookup_table[dev_data->devid];
3535
3536	build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3537			   tag, dev_data->pri_tlp);
3538
3539	return iommu_queue_command(iommu, &cmd);
3540}
3541EXPORT_SYMBOL(amd_iommu_complete_ppr);
3542
3543struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
3544{
3545	struct protection_domain *pdomain;
3546
3547	pdomain = get_domain(&pdev->dev);
3548	if (IS_ERR(pdomain))
3549		return NULL;
3550
3551	/* Only return IOMMUv2 domains */
3552	if (!(pdomain->flags & PD_IOMMUV2_MASK))
3553		return NULL;
3554
3555	return &pdomain->domain;
3556}
3557EXPORT_SYMBOL(amd_iommu_get_v2_domain);
3558
3559void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3560{
3561	struct iommu_dev_data *dev_data;
3562
3563	if (!amd_iommu_v2_supported())
3564		return;
3565
3566	dev_data = get_dev_data(&pdev->dev);
3567	dev_data->errata |= (1 << erratum);
3568}
3569EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
3570
3571int amd_iommu_device_info(struct pci_dev *pdev,
3572                          struct amd_iommu_device_info *info)
3573{
3574	int max_pasids;
3575	int pos;
3576
3577	if (pdev == NULL || info == NULL)
3578		return -EINVAL;
3579
3580	if (!amd_iommu_v2_supported())
3581		return -EINVAL;
3582
3583	memset(info, 0, sizeof(*info));
3584
3585	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3586	if (pos)
3587		info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3588
3589	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3590	if (pos)
3591		info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
3592
3593	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
3594	if (pos) {
3595		int features;
3596
3597		max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
3598		max_pasids = min(max_pasids, (1 << 20));
3599
3600		info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
3601		info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
3602
3603		features = pci_pasid_features(pdev);
3604		if (features & PCI_PASID_CAP_EXEC)
3605			info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
3606		if (features & PCI_PASID_CAP_PRIV)
3607			info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
3608	}
3609
3610	return 0;
3611}
3612EXPORT_SYMBOL(amd_iommu_device_info);
3613
3614#ifdef CONFIG_IRQ_REMAP
3615
3616/*****************************************************************************
3617 *
3618 * Interrupt Remapping Implementation
3619 *
3620 *****************************************************************************/
3621
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3622static struct irq_chip amd_ir_chip;
3623
3624#define DTE_IRQ_PHYS_ADDR_MASK	(((1ULL << 45)-1) << 6)
3625#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
3626#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
3627#define DTE_IRQ_REMAP_ENABLE    1ULL
3628
3629static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3630{
3631	u64 dte;
3632
3633	dte	= amd_iommu_dev_table[devid].data[2];
3634	dte	&= ~DTE_IRQ_PHYS_ADDR_MASK;
3635	dte	|= virt_to_phys(table->table);
3636	dte	|= DTE_IRQ_REMAP_INTCTL;
3637	dte	|= DTE_IRQ_TABLE_LEN;
3638	dte	|= DTE_IRQ_REMAP_ENABLE;
3639
3640	amd_iommu_dev_table[devid].data[2] = dte;
3641}
3642
 
 
3643static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3644{
3645	struct irq_remap_table *table = NULL;
3646	struct amd_iommu *iommu;
3647	unsigned long flags;
3648	u16 alias;
3649
3650	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3651
3652	iommu = amd_iommu_rlookup_table[devid];
3653	if (!iommu)
3654		goto out_unlock;
3655
3656	table = irq_lookup_table[devid];
3657	if (table)
3658		goto out_unlock;
3659
3660	alias = amd_iommu_alias_table[devid];
3661	table = irq_lookup_table[alias];
3662	if (table) {
3663		irq_lookup_table[devid] = table;
3664		set_dte_irq_entry(devid, table);
3665		iommu_flush_dte(iommu, devid);
3666		goto out;
3667	}
3668
3669	/* Nothing there yet, allocate new irq remapping table */
3670	table = kzalloc(sizeof(*table), GFP_ATOMIC);
3671	if (!table)
3672		goto out_unlock;
3673
3674	/* Initialize table spin-lock */
3675	spin_lock_init(&table->lock);
3676
3677	if (ioapic)
3678		/* Keep the first 32 indexes free for IOAPIC interrupts */
3679		table->min_index = 32;
3680
3681	table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
3682	if (!table->table) {
3683		kfree(table);
3684		table = NULL;
3685		goto out_unlock;
3686	}
3687
3688	if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3689		memset(table->table, 0,
3690		       MAX_IRQS_PER_TABLE * sizeof(u32));
3691	else
3692		memset(table->table, 0,
3693		       (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
3694
3695	if (ioapic) {
3696		int i;
3697
3698		for (i = 0; i < 32; ++i)
3699			iommu->irte_ops->set_allocated(table, i);
3700	}
3701
3702	irq_lookup_table[devid] = table;
3703	set_dte_irq_entry(devid, table);
3704	iommu_flush_dte(iommu, devid);
3705	if (devid != alias) {
3706		irq_lookup_table[alias] = table;
3707		set_dte_irq_entry(alias, table);
3708		iommu_flush_dte(iommu, alias);
3709	}
3710
3711out:
3712	iommu_completion_wait(iommu);
3713
3714out_unlock:
3715	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3716
3717	return table;
3718}
3719
3720static int alloc_irq_index(u16 devid, int count)
3721{
3722	struct irq_remap_table *table;
3723	unsigned long flags;
3724	int index, c;
3725	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3726
3727	if (!iommu)
3728		return -ENODEV;
3729
3730	table = get_irq_table(devid, false);
3731	if (!table)
3732		return -ENODEV;
3733
3734	spin_lock_irqsave(&table->lock, flags);
3735
3736	/* Scan table for free entries */
3737	for (c = 0, index = table->min_index;
3738	     index < MAX_IRQS_PER_TABLE;
3739	     ++index) {
3740		if (!iommu->irte_ops->is_allocated(table, index))
3741			c += 1;
3742		else
3743			c = 0;
3744
3745		if (c == count)	{
3746			for (; c != 0; --c)
3747				iommu->irte_ops->set_allocated(table, index - c + 1);
3748
3749			index -= count - 1;
3750			goto out;
3751		}
3752	}
3753
3754	index = -ENOSPC;
3755
3756out:
3757	spin_unlock_irqrestore(&table->lock, flags);
3758
3759	return index;
3760}
3761
3762static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
3763			  struct amd_ir_data *data)
3764{
3765	struct irq_remap_table *table;
3766	struct amd_iommu *iommu;
3767	unsigned long flags;
3768	struct irte_ga *entry;
3769
3770	iommu = amd_iommu_rlookup_table[devid];
3771	if (iommu == NULL)
3772		return -EINVAL;
3773
3774	table = get_irq_table(devid, false);
3775	if (!table)
3776		return -ENOMEM;
3777
3778	spin_lock_irqsave(&table->lock, flags);
3779
3780	entry = (struct irte_ga *)table->table;
3781	entry = &entry[index];
3782	entry->lo.fields_remap.valid = 0;
3783	entry->hi.val = irte->hi.val;
3784	entry->lo.val = irte->lo.val;
3785	entry->lo.fields_remap.valid = 1;
3786	if (data)
3787		data->ref = entry;
3788
3789	spin_unlock_irqrestore(&table->lock, flags);
3790
3791	iommu_flush_irt(iommu, devid);
3792	iommu_completion_wait(iommu);
3793
3794	return 0;
3795}
3796
3797static int modify_irte(u16 devid, int index, union irte *irte)
3798{
3799	struct irq_remap_table *table;
3800	struct amd_iommu *iommu;
3801	unsigned long flags;
3802
3803	iommu = amd_iommu_rlookup_table[devid];
3804	if (iommu == NULL)
3805		return -EINVAL;
3806
3807	table = get_irq_table(devid, false);
3808	if (!table)
3809		return -ENOMEM;
3810
3811	spin_lock_irqsave(&table->lock, flags);
3812	table->table[index] = irte->val;
3813	spin_unlock_irqrestore(&table->lock, flags);
3814
3815	iommu_flush_irt(iommu, devid);
3816	iommu_completion_wait(iommu);
3817
3818	return 0;
3819}
3820
3821static void free_irte(u16 devid, int index)
3822{
3823	struct irq_remap_table *table;
3824	struct amd_iommu *iommu;
3825	unsigned long flags;
3826
3827	iommu = amd_iommu_rlookup_table[devid];
3828	if (iommu == NULL)
3829		return;
3830
3831	table = get_irq_table(devid, false);
3832	if (!table)
3833		return;
3834
3835	spin_lock_irqsave(&table->lock, flags);
3836	iommu->irte_ops->clear_allocated(table, index);
3837	spin_unlock_irqrestore(&table->lock, flags);
3838
3839	iommu_flush_irt(iommu, devid);
3840	iommu_completion_wait(iommu);
3841}
3842
3843static void irte_prepare(void *entry,
3844			 u32 delivery_mode, u32 dest_mode,
3845			 u8 vector, u32 dest_apicid, int devid)
3846{
3847	union irte *irte = (union irte *) entry;
3848
3849	irte->val                = 0;
3850	irte->fields.vector      = vector;
3851	irte->fields.int_type    = delivery_mode;
3852	irte->fields.destination = dest_apicid;
3853	irte->fields.dm          = dest_mode;
3854	irte->fields.valid       = 1;
3855}
3856
3857static void irte_ga_prepare(void *entry,
3858			    u32 delivery_mode, u32 dest_mode,
3859			    u8 vector, u32 dest_apicid, int devid)
3860{
3861	struct irte_ga *irte = (struct irte_ga *) entry;
3862	struct iommu_dev_data *dev_data = search_dev_data(devid);
3863
3864	irte->lo.val                      = 0;
3865	irte->hi.val                      = 0;
3866	irte->lo.fields_remap.guest_mode  = dev_data ? dev_data->use_vapic : 0;
3867	irte->lo.fields_remap.int_type    = delivery_mode;
3868	irte->lo.fields_remap.dm          = dest_mode;
3869	irte->hi.fields.vector            = vector;
3870	irte->lo.fields_remap.destination = dest_apicid;
3871	irte->lo.fields_remap.valid       = 1;
3872}
3873
3874static void irte_activate(void *entry, u16 devid, u16 index)
3875{
3876	union irte *irte = (union irte *) entry;
3877
3878	irte->fields.valid = 1;
3879	modify_irte(devid, index, irte);
3880}
3881
3882static void irte_ga_activate(void *entry, u16 devid, u16 index)
3883{
3884	struct irte_ga *irte = (struct irte_ga *) entry;
3885
3886	irte->lo.fields_remap.valid = 1;
3887	modify_irte_ga(devid, index, irte, NULL);
3888}
3889
3890static void irte_deactivate(void *entry, u16 devid, u16 index)
3891{
3892	union irte *irte = (union irte *) entry;
3893
3894	irte->fields.valid = 0;
3895	modify_irte(devid, index, irte);
3896}
3897
3898static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
3899{
3900	struct irte_ga *irte = (struct irte_ga *) entry;
3901
3902	irte->lo.fields_remap.valid = 0;
3903	modify_irte_ga(devid, index, irte, NULL);
3904}
3905
3906static void irte_set_affinity(void *entry, u16 devid, u16 index,
3907			      u8 vector, u32 dest_apicid)
3908{
3909	union irte *irte = (union irte *) entry;
3910
3911	irte->fields.vector = vector;
3912	irte->fields.destination = dest_apicid;
3913	modify_irte(devid, index, irte);
3914}
3915
3916static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
3917				 u8 vector, u32 dest_apicid)
3918{
3919	struct irte_ga *irte = (struct irte_ga *) entry;
3920	struct iommu_dev_data *dev_data = search_dev_data(devid);
3921
3922	if (!dev_data || !dev_data->use_vapic) {
3923		irte->hi.fields.vector = vector;
3924		irte->lo.fields_remap.destination = dest_apicid;
3925		irte->lo.fields_remap.guest_mode = 0;
3926		modify_irte_ga(devid, index, irte, NULL);
3927	}
3928}
3929
3930#define IRTE_ALLOCATED (~1U)
3931static void irte_set_allocated(struct irq_remap_table *table, int index)
3932{
3933	table->table[index] = IRTE_ALLOCATED;
3934}
3935
3936static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3937{
3938	struct irte_ga *ptr = (struct irte_ga *)table->table;
3939	struct irte_ga *irte = &ptr[index];
3940
3941	memset(&irte->lo.val, 0, sizeof(u64));
3942	memset(&irte->hi.val, 0, sizeof(u64));
3943	irte->hi.fields.vector = 0xff;
3944}
3945
3946static bool irte_is_allocated(struct irq_remap_table *table, int index)
3947{
3948	union irte *ptr = (union irte *)table->table;
3949	union irte *irte = &ptr[index];
3950
3951	return irte->val != 0;
3952}
3953
3954static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3955{
3956	struct irte_ga *ptr = (struct irte_ga *)table->table;
3957	struct irte_ga *irte = &ptr[index];
3958
3959	return irte->hi.fields.vector != 0;
3960}
3961
3962static void irte_clear_allocated(struct irq_remap_table *table, int index)
3963{
3964	table->table[index] = 0;
3965}
3966
3967static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3968{
3969	struct irte_ga *ptr = (struct irte_ga *)table->table;
3970	struct irte_ga *irte = &ptr[index];
3971
3972	memset(&irte->lo.val, 0, sizeof(u64));
3973	memset(&irte->hi.val, 0, sizeof(u64));
3974}
3975
3976static int get_devid(struct irq_alloc_info *info)
3977{
3978	int devid = -1;
3979
3980	switch (info->type) {
3981	case X86_IRQ_ALLOC_TYPE_IOAPIC:
3982		devid     = get_ioapic_devid(info->ioapic_id);
3983		break;
3984	case X86_IRQ_ALLOC_TYPE_HPET:
3985		devid     = get_hpet_devid(info->hpet_id);
3986		break;
3987	case X86_IRQ_ALLOC_TYPE_MSI:
3988	case X86_IRQ_ALLOC_TYPE_MSIX:
3989		devid = get_device_id(&info->msi_dev->dev);
3990		break;
3991	default:
3992		BUG_ON(1);
3993		break;
3994	}
3995
3996	return devid;
3997}
3998
3999static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
4000{
4001	struct amd_iommu *iommu;
4002	int devid;
4003
4004	if (!info)
4005		return NULL;
4006
4007	devid = get_devid(info);
4008	if (devid >= 0) {
4009		iommu = amd_iommu_rlookup_table[devid];
4010		if (iommu)
4011			return iommu->ir_domain;
4012	}
4013
4014	return NULL;
4015}
4016
4017static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
4018{
4019	struct amd_iommu *iommu;
4020	int devid;
4021
4022	if (!info)
4023		return NULL;
4024
4025	switch (info->type) {
4026	case X86_IRQ_ALLOC_TYPE_MSI:
4027	case X86_IRQ_ALLOC_TYPE_MSIX:
4028		devid = get_device_id(&info->msi_dev->dev);
4029		if (devid < 0)
4030			return NULL;
4031
4032		iommu = amd_iommu_rlookup_table[devid];
4033		if (iommu)
4034			return iommu->msi_domain;
4035		break;
4036	default:
4037		break;
4038	}
4039
4040	return NULL;
4041}
4042
4043struct irq_remap_ops amd_iommu_irq_ops = {
4044	.prepare		= amd_iommu_prepare,
4045	.enable			= amd_iommu_enable,
4046	.disable		= amd_iommu_disable,
4047	.reenable		= amd_iommu_reenable,
4048	.enable_faulting	= amd_iommu_enable_faulting,
4049	.get_ir_irq_domain	= get_ir_irq_domain,
4050	.get_irq_domain		= get_irq_domain,
4051};
4052
4053static void irq_remapping_prepare_irte(struct amd_ir_data *data,
4054				       struct irq_cfg *irq_cfg,
4055				       struct irq_alloc_info *info,
4056				       int devid, int index, int sub_handle)
4057{
4058	struct irq_2_irte *irte_info = &data->irq_2_irte;
4059	struct msi_msg *msg = &data->msi_entry;
 
4060	struct IO_APIC_route_entry *entry;
4061	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
4062
4063	if (!iommu)
4064		return;
4065
4066	data->irq_2_irte.devid = devid;
4067	data->irq_2_irte.index = index + sub_handle;
4068	iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
4069				 apic->irq_dest_mode, irq_cfg->vector,
4070				 irq_cfg->dest_apicid, devid);
 
 
 
 
 
4071
4072	switch (info->type) {
4073	case X86_IRQ_ALLOC_TYPE_IOAPIC:
4074		/* Setup IOAPIC entry */
4075		entry = info->ioapic_entry;
4076		info->ioapic_entry = NULL;
4077		memset(entry, 0, sizeof(*entry));
4078		entry->vector        = index;
4079		entry->mask          = 0;
4080		entry->trigger       = info->ioapic_trigger;
4081		entry->polarity      = info->ioapic_polarity;
4082		/* Mask level triggered irqs. */
4083		if (info->ioapic_trigger)
4084			entry->mask = 1;
4085		break;
4086
4087	case X86_IRQ_ALLOC_TYPE_HPET:
4088	case X86_IRQ_ALLOC_TYPE_MSI:
4089	case X86_IRQ_ALLOC_TYPE_MSIX:
4090		msg->address_hi = MSI_ADDR_BASE_HI;
4091		msg->address_lo = MSI_ADDR_BASE_LO;
4092		msg->data = irte_info->index;
4093		break;
4094
4095	default:
4096		BUG_ON(1);
4097		break;
4098	}
4099}
4100
4101struct amd_irte_ops irte_32_ops = {
4102	.prepare = irte_prepare,
4103	.activate = irte_activate,
4104	.deactivate = irte_deactivate,
4105	.set_affinity = irte_set_affinity,
4106	.set_allocated = irte_set_allocated,
4107	.is_allocated = irte_is_allocated,
4108	.clear_allocated = irte_clear_allocated,
4109};
4110
4111struct amd_irte_ops irte_128_ops = {
4112	.prepare = irte_ga_prepare,
4113	.activate = irte_ga_activate,
4114	.deactivate = irte_ga_deactivate,
4115	.set_affinity = irte_ga_set_affinity,
4116	.set_allocated = irte_ga_set_allocated,
4117	.is_allocated = irte_ga_is_allocated,
4118	.clear_allocated = irte_ga_clear_allocated,
4119};
4120
4121static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
4122			       unsigned int nr_irqs, void *arg)
4123{
4124	struct irq_alloc_info *info = arg;
4125	struct irq_data *irq_data;
4126	struct amd_ir_data *data = NULL;
4127	struct irq_cfg *cfg;
4128	int i, ret, devid;
4129	int index = -1;
4130
4131	if (!info)
4132		return -EINVAL;
4133	if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
4134	    info->type != X86_IRQ_ALLOC_TYPE_MSIX)
4135		return -EINVAL;
4136
4137	/*
4138	 * With IRQ remapping enabled, don't need contiguous CPU vectors
4139	 * to support multiple MSI interrupts.
4140	 */
4141	if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
4142		info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
4143
4144	devid = get_devid(info);
4145	if (devid < 0)
4146		return -EINVAL;
4147
4148	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
4149	if (ret < 0)
4150		return ret;
4151
4152	if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
4153		if (get_irq_table(devid, true))
4154			index = info->ioapic_pin;
4155		else
4156			ret = -ENOMEM;
4157	} else {
4158		index = alloc_irq_index(devid, nr_irqs);
4159	}
4160	if (index < 0) {
4161		pr_warn("Failed to allocate IRTE\n");
4162		ret = index;
4163		goto out_free_parent;
4164	}
4165
4166	for (i = 0; i < nr_irqs; i++) {
4167		irq_data = irq_domain_get_irq_data(domain, virq + i);
4168		cfg = irqd_cfg(irq_data);
4169		if (!irq_data || !cfg) {
4170			ret = -EINVAL;
4171			goto out_free_data;
4172		}
4173
4174		ret = -ENOMEM;
4175		data = kzalloc(sizeof(*data), GFP_KERNEL);
4176		if (!data)
4177			goto out_free_data;
4178
4179		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
4180			data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
4181		else
4182			data->entry = kzalloc(sizeof(struct irte_ga),
4183						     GFP_KERNEL);
4184		if (!data->entry) {
4185			kfree(data);
4186			goto out_free_data;
4187		}
4188
4189		irq_data->hwirq = (devid << 16) + i;
4190		irq_data->chip_data = data;
4191		irq_data->chip = &amd_ir_chip;
4192		irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
4193		irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
4194	}
4195
4196	return 0;
4197
4198out_free_data:
4199	for (i--; i >= 0; i--) {
4200		irq_data = irq_domain_get_irq_data(domain, virq + i);
4201		if (irq_data)
4202			kfree(irq_data->chip_data);
4203	}
4204	for (i = 0; i < nr_irqs; i++)
4205		free_irte(devid, index + i);
4206out_free_parent:
4207	irq_domain_free_irqs_common(domain, virq, nr_irqs);
4208	return ret;
4209}
4210
4211static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
4212			       unsigned int nr_irqs)
4213{
4214	struct irq_2_irte *irte_info;
4215	struct irq_data *irq_data;
4216	struct amd_ir_data *data;
4217	int i;
4218
4219	for (i = 0; i < nr_irqs; i++) {
4220		irq_data = irq_domain_get_irq_data(domain, virq  + i);
4221		if (irq_data && irq_data->chip_data) {
4222			data = irq_data->chip_data;
4223			irte_info = &data->irq_2_irte;
4224			free_irte(irte_info->devid, irte_info->index);
4225			kfree(data->entry);
4226			kfree(data);
4227		}
4228	}
4229	irq_domain_free_irqs_common(domain, virq, nr_irqs);
4230}
4231
4232static void irq_remapping_activate(struct irq_domain *domain,
4233				   struct irq_data *irq_data)
4234{
4235	struct amd_ir_data *data = irq_data->chip_data;
4236	struct irq_2_irte *irte_info = &data->irq_2_irte;
4237	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
4238
4239	if (iommu)
4240		iommu->irte_ops->activate(data->entry, irte_info->devid,
4241					  irte_info->index);
4242}
4243
4244static void irq_remapping_deactivate(struct irq_domain *domain,
4245				     struct irq_data *irq_data)
4246{
4247	struct amd_ir_data *data = irq_data->chip_data;
4248	struct irq_2_irte *irte_info = &data->irq_2_irte;
4249	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
4250
4251	if (iommu)
4252		iommu->irte_ops->deactivate(data->entry, irte_info->devid,
4253					    irte_info->index);
4254}
4255
4256static struct irq_domain_ops amd_ir_domain_ops = {
4257	.alloc = irq_remapping_alloc,
4258	.free = irq_remapping_free,
4259	.activate = irq_remapping_activate,
4260	.deactivate = irq_remapping_deactivate,
4261};
4262
4263static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
4264{
4265	struct amd_iommu *iommu;
4266	struct amd_iommu_pi_data *pi_data = vcpu_info;
4267	struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
4268	struct amd_ir_data *ir_data = data->chip_data;
4269	struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
4270	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4271	struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
4272
4273	/* Note:
4274	 * This device has never been set up for guest mode.
4275	 * we should not modify the IRTE
4276	 */
4277	if (!dev_data || !dev_data->use_vapic)
4278		return 0;
4279
4280	pi_data->ir_data = ir_data;
4281
4282	/* Note:
4283	 * SVM tries to set up for VAPIC mode, but we are in
4284	 * legacy mode. So, we force legacy mode instead.
4285	 */
4286	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
4287		pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n",
4288			 __func__);
4289		pi_data->is_guest_mode = false;
4290	}
4291
4292	iommu = amd_iommu_rlookup_table[irte_info->devid];
4293	if (iommu == NULL)
4294		return -EINVAL;
4295
4296	pi_data->prev_ga_tag = ir_data->cached_ga_tag;
4297	if (pi_data->is_guest_mode) {
4298		/* Setting */
4299		irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
4300		irte->hi.fields.vector = vcpu_pi_info->vector;
4301		irte->lo.fields_vapic.guest_mode = 1;
4302		irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
4303
4304		ir_data->cached_ga_tag = pi_data->ga_tag;
4305	} else {
4306		/* Un-Setting */
4307		struct irq_cfg *cfg = irqd_cfg(data);
4308
4309		irte->hi.val = 0;
4310		irte->lo.val = 0;
4311		irte->hi.fields.vector = cfg->vector;
4312		irte->lo.fields_remap.guest_mode = 0;
4313		irte->lo.fields_remap.destination = cfg->dest_apicid;
4314		irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
4315		irte->lo.fields_remap.dm = apic->irq_dest_mode;
4316
4317		/*
4318		 * This communicates the ga_tag back to the caller
4319		 * so that it can do all the necessary clean up.
4320		 */
4321		ir_data->cached_ga_tag = 0;
4322	}
4323
4324	return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
4325}
4326
4327static int amd_ir_set_affinity(struct irq_data *data,
4328			       const struct cpumask *mask, bool force)
4329{
4330	struct amd_ir_data *ir_data = data->chip_data;
4331	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4332	struct irq_cfg *cfg = irqd_cfg(data);
4333	struct irq_data *parent = data->parent_data;
4334	struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
4335	int ret;
4336
4337	if (!iommu)
4338		return -ENODEV;
4339
4340	ret = parent->chip->irq_set_affinity(parent, mask, force);
4341	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
4342		return ret;
4343
4344	/*
4345	 * Atomically updates the IRTE with the new destination, vector
4346	 * and flushes the interrupt entry cache.
4347	 */
4348	iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
4349			    irte_info->index, cfg->vector, cfg->dest_apicid);
 
4350
4351	/*
4352	 * After this point, all the interrupts will start arriving
4353	 * at the new destination. So, time to cleanup the previous
4354	 * vector allocation.
4355	 */
4356	send_cleanup_vector(cfg);
4357
4358	return IRQ_SET_MASK_OK_DONE;
4359}
4360
4361static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4362{
4363	struct amd_ir_data *ir_data = irq_data->chip_data;
4364
4365	*msg = ir_data->msi_entry;
4366}
4367
4368static struct irq_chip amd_ir_chip = {
4369	.irq_ack = ir_ack_apic_edge,
4370	.irq_set_affinity = amd_ir_set_affinity,
4371	.irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
4372	.irq_compose_msi_msg = ir_compose_msi_msg,
4373};
4374
4375int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
4376{
4377	iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu);
4378	if (!iommu->ir_domain)
4379		return -ENOMEM;
4380
4381	iommu->ir_domain->parent = arch_get_ir_parent_domain();
4382	iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
4383
4384	return 0;
4385}
4386
4387int amd_iommu_update_ga(int cpu, bool is_run, void *data)
4388{
4389	unsigned long flags;
4390	struct amd_iommu *iommu;
4391	struct irq_remap_table *irt;
4392	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
4393	int devid = ir_data->irq_2_irte.devid;
4394	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
4395	struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
4396
4397	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
4398	    !ref || !entry || !entry->lo.fields_vapic.guest_mode)
4399		return 0;
4400
4401	iommu = amd_iommu_rlookup_table[devid];
4402	if (!iommu)
4403		return -ENODEV;
4404
4405	irt = get_irq_table(devid, false);
4406	if (!irt)
4407		return -ENODEV;
4408
4409	spin_lock_irqsave(&irt->lock, flags);
4410
4411	if (ref->lo.fields_vapic.guest_mode) {
4412		if (cpu >= 0)
4413			ref->lo.fields_vapic.destination = cpu;
4414		ref->lo.fields_vapic.is_run = is_run;
4415		barrier();
4416	}
4417
4418	spin_unlock_irqrestore(&irt->lock, flags);
4419
4420	iommu_flush_irt(iommu, devid);
4421	iommu_completion_wait(iommu);
4422	return 0;
4423}
4424EXPORT_SYMBOL(amd_iommu_update_ga);
4425#endif