Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/ratelimit.h>
  21#include <linux/pci.h>
  22#include <linux/pci-ats.h>
  23#include <linux/bitmap.h>
  24#include <linux/slab.h>
  25#include <linux/debugfs.h>
  26#include <linux/scatterlist.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/iommu-helper.h>
  29#include <linux/iommu.h>
  30#include <linux/delay.h>
  31#include <linux/amd-iommu.h>
  32#include <linux/notifier.h>
  33#include <linux/export.h>
  34#include <linux/irq.h>
  35#include <linux/msi.h>
  36#include <linux/dma-contiguous.h>
  37#include <linux/irqdomain.h>
  38#include <linux/percpu.h>
  39#include <asm/irq_remapping.h>
  40#include <asm/io_apic.h>
  41#include <asm/apic.h>
  42#include <asm/hw_irq.h>
  43#include <asm/msidef.h>
  44#include <asm/proto.h>
  45#include <asm/iommu.h>
  46#include <asm/gart.h>
  47#include <asm/dma.h>
  48
  49#include "amd_iommu_proto.h"
  50#include "amd_iommu_types.h"
  51#include "irq_remapping.h"
  52
  53#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  54
  55#define LOOP_TIMEOUT	100000
  56
  57/*
  58 * This bitmap is used to advertise the page sizes our hardware support
  59 * to the IOMMU core, which will then use this information to split
  60 * physically contiguous memory regions it is mapping into page sizes
  61 * that we support.
  62 *
  63 * 512GB Pages are not supported due to a hardware bug
 
 
 
 
 
 
 
 
  64 */
  65#define AMD_IOMMU_PGSIZES	((~0xFFFUL) & ~(2ULL << 38))
  66
  67static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  68
 
 
 
 
  69/* List of all available dev_data structures */
  70static LIST_HEAD(dev_data_list);
  71static DEFINE_SPINLOCK(dev_data_list_lock);
  72
  73LIST_HEAD(ioapic_map);
  74LIST_HEAD(hpet_map);
  75
  76/*
  77 * Domain for untranslated devices - only allocated
  78 * if iommu=pt passed on kernel cmd line.
  79 */
  80static const struct iommu_ops amd_iommu_ops;
 
 
  81
  82static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
  83int amd_iommu_max_glx_val = -1;
  84
  85static struct dma_map_ops amd_iommu_dma_ops;
  86
  87/*
  88 * This struct contains device specific data for the IOMMU
  89 */
  90struct iommu_dev_data {
  91	struct list_head list;		  /* For domain->dev_list */
  92	struct list_head dev_data_list;	  /* For global dev_data_list */
  93	struct protection_domain *domain; /* Domain the device is bound to */
  94	u16 devid;			  /* PCI Device ID */
  95	u16 alias;			  /* Alias Device ID */
  96	bool iommu_v2;			  /* Device can make use of IOMMUv2 */
  97	bool passthrough;		  /* Device is identity mapped */
  98	struct {
  99		bool enabled;
 100		int qdep;
 101	} ats;				  /* ATS state */
 102	bool pri_tlp;			  /* PASID TLB required for
 103					     PPR completions */
 104	u32 errata;			  /* Bitmap for errata to apply */
 105};
 106
 107/*
 108 * general struct to manage commands send to an IOMMU
 109 */
 110struct iommu_cmd {
 111	u32 data[4];
 112};
 113
 114struct kmem_cache *amd_iommu_irq_cache;
 115
 116static void update_domain(struct protection_domain *domain);
 117static int protection_domain_init(struct protection_domain *domain);
 118static void detach_device(struct device *dev);
 119
 120/*
 121 * For dynamic growth the aperture size is split into ranges of 128MB of
 122 * DMA address space each. This struct represents one such range.
 123 */
 124struct aperture_range {
 125
 126	spinlock_t bitmap_lock;
 127
 128	/* address allocation bitmap */
 129	unsigned long *bitmap;
 130	unsigned long offset;
 131	unsigned long next_bit;
 132
 133	/*
 134	 * Array of PTE pages for the aperture. In this array we save all the
 135	 * leaf pages of the domain page table used for the aperture. This way
 136	 * we don't need to walk the page table to find a specific PTE. We can
 137	 * just calculate its address in constant time.
 138	 */
 139	u64 *pte_pages[64];
 140};
 141
 142/*
 143 * Data container for a dma_ops specific protection domain
 144 */
 145struct dma_ops_domain {
 146	/* generic protection domain information */
 147	struct protection_domain domain;
 148
 149	/* size of the aperture for the mappings */
 150	unsigned long aperture_size;
 151
 152	/* aperture index we start searching for free addresses */
 153	u32 __percpu *next_index;
 154
 155	/* address space relevant data */
 156	struct aperture_range *aperture[APERTURE_MAX_RANGES];
 157};
 158
 159/****************************************************************************
 160 *
 161 * Helper functions
 162 *
 163 ****************************************************************************/
 164
 165static struct protection_domain *to_pdomain(struct iommu_domain *dom)
 166{
 167	return container_of(dom, struct protection_domain, domain);
 168}
 169
 170static inline u16 get_device_id(struct device *dev)
 171{
 172	struct pci_dev *pdev = to_pci_dev(dev);
 173
 174	return PCI_DEVID(pdev->bus->number, pdev->devfn);
 175}
 176
 177static struct iommu_dev_data *alloc_dev_data(u16 devid)
 178{
 179	struct iommu_dev_data *dev_data;
 180	unsigned long flags;
 181
 182	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
 183	if (!dev_data)
 184		return NULL;
 185
 186	dev_data->devid = devid;
 
 187
 188	spin_lock_irqsave(&dev_data_list_lock, flags);
 189	list_add_tail(&dev_data->dev_data_list, &dev_data_list);
 190	spin_unlock_irqrestore(&dev_data_list_lock, flags);
 191
 192	return dev_data;
 193}
 194
 
 
 
 
 
 
 
 
 
 
 
 195static struct iommu_dev_data *search_dev_data(u16 devid)
 196{
 197	struct iommu_dev_data *dev_data;
 198	unsigned long flags;
 199
 200	spin_lock_irqsave(&dev_data_list_lock, flags);
 201	list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
 202		if (dev_data->devid == devid)
 203			goto out_unlock;
 204	}
 205
 206	dev_data = NULL;
 207
 208out_unlock:
 209	spin_unlock_irqrestore(&dev_data_list_lock, flags);
 210
 211	return dev_data;
 212}
 213
 214static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
 215{
 216	*(u16 *)data = alias;
 217	return 0;
 218}
 219
 220static u16 get_alias(struct device *dev)
 221{
 222	struct pci_dev *pdev = to_pci_dev(dev);
 223	u16 devid, ivrs_alias, pci_alias;
 224
 225	devid = get_device_id(dev);
 226	ivrs_alias = amd_iommu_alias_table[devid];
 227	pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
 228
 229	if (ivrs_alias == pci_alias)
 230		return ivrs_alias;
 231
 232	/*
 233	 * DMA alias showdown
 234	 *
 235	 * The IVRS is fairly reliable in telling us about aliases, but it
 236	 * can't know about every screwy device.  If we don't have an IVRS
 237	 * reported alias, use the PCI reported alias.  In that case we may
 238	 * still need to initialize the rlookup and dev_table entries if the
 239	 * alias is to a non-existent device.
 240	 */
 241	if (ivrs_alias == devid) {
 242		if (!amd_iommu_rlookup_table[pci_alias]) {
 243			amd_iommu_rlookup_table[pci_alias] =
 244				amd_iommu_rlookup_table[devid];
 245			memcpy(amd_iommu_dev_table[pci_alias].data,
 246			       amd_iommu_dev_table[devid].data,
 247			       sizeof(amd_iommu_dev_table[pci_alias].data));
 248		}
 249
 250		return pci_alias;
 251	}
 252
 253	pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
 254		"for device %s[%04x:%04x], kernel reported alias "
 255		"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
 256		PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
 257		PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
 258		PCI_FUNC(pci_alias));
 259
 260	/*
 261	 * If we don't have a PCI DMA alias and the IVRS alias is on the same
 262	 * bus, then the IVRS table may know about a quirk that we don't.
 263	 */
 264	if (pci_alias == devid &&
 265	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
 266		pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
 267		pdev->dma_alias_devfn = ivrs_alias & 0xff;
 268		pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
 269			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
 270			dev_name(dev));
 271	}
 272
 273	return ivrs_alias;
 274}
 275
 276static struct iommu_dev_data *find_dev_data(u16 devid)
 277{
 278	struct iommu_dev_data *dev_data;
 279
 280	dev_data = search_dev_data(devid);
 281
 282	if (dev_data == NULL)
 283		dev_data = alloc_dev_data(devid);
 284
 285	return dev_data;
 286}
 287
 
 
 
 
 
 
 
 288static struct iommu_dev_data *get_dev_data(struct device *dev)
 289{
 290	return dev->archdata.iommu;
 291}
 292
 293static bool pci_iommuv2_capable(struct pci_dev *pdev)
 294{
 295	static const int caps[] = {
 296		PCI_EXT_CAP_ID_ATS,
 297		PCI_EXT_CAP_ID_PRI,
 298		PCI_EXT_CAP_ID_PASID,
 299	};
 300	int i, pos;
 301
 302	for (i = 0; i < 3; ++i) {
 303		pos = pci_find_ext_capability(pdev, caps[i]);
 304		if (pos == 0)
 305			return false;
 306	}
 307
 308	return true;
 309}
 310
 311static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
 312{
 313	struct iommu_dev_data *dev_data;
 314
 315	dev_data = get_dev_data(&pdev->dev);
 316
 317	return dev_data->errata & (1 << erratum) ? true : false;
 318}
 319
 320/*
 321 * This function actually applies the mapping to the page table of the
 322 * dma_ops domain.
 323 */
 324static void alloc_unity_mapping(struct dma_ops_domain *dma_dom,
 325				struct unity_map_entry *e)
 326{
 327	u64 addr;
 328
 329	for (addr = e->address_start; addr < e->address_end;
 330	     addr += PAGE_SIZE) {
 331		if (addr < dma_dom->aperture_size)
 332			__set_bit(addr >> PAGE_SHIFT,
 333				  dma_dom->aperture[0]->bitmap);
 334	}
 335}
 336
 337/*
 338 * Inits the unity mappings required for a specific device
 339 */
 340static void init_unity_mappings_for_device(struct device *dev,
 341					   struct dma_ops_domain *dma_dom)
 342{
 343	struct unity_map_entry *e;
 344	u16 devid;
 345
 346	devid = get_device_id(dev);
 347
 348	list_for_each_entry(e, &amd_iommu_unity_map, list) {
 349		if (!(devid >= e->devid_start && devid <= e->devid_end))
 350			continue;
 351		alloc_unity_mapping(dma_dom, e);
 
 
 352	}
 
 
 
 
 353}
 354
 355/*
 356 * This function checks if the driver got a valid device from the caller to
 357 * avoid dereferencing invalid pointers.
 358 */
 359static bool check_device(struct device *dev)
 360{
 361	u16 devid;
 362
 363	if (!dev || !dev->dma_mask)
 364		return false;
 365
 366	/* No PCI device */
 367	if (!dev_is_pci(dev))
 368		return false;
 369
 370	devid = get_device_id(dev);
 371
 372	/* Out of our scope? */
 373	if (devid > amd_iommu_last_bdf)
 374		return false;
 375
 376	if (amd_iommu_rlookup_table[devid] == NULL)
 377		return false;
 378
 379	return true;
 380}
 381
 382static void init_iommu_group(struct device *dev)
 383{
 384	struct dma_ops_domain *dma_domain;
 385	struct iommu_domain *domain;
 386	struct iommu_group *group;
 387
 388	group = iommu_group_get_for_dev(dev);
 389	if (IS_ERR(group))
 390		return;
 391
 392	domain = iommu_group_default_domain(group);
 393	if (!domain)
 394		goto out;
 395
 396	dma_domain = to_pdomain(domain)->priv;
 397
 398	init_unity_mappings_for_device(dev, dma_domain);
 399out:
 400	iommu_group_put(group);
 401}
 402
 403static int iommu_init_device(struct device *dev)
 404{
 405	struct pci_dev *pdev = to_pci_dev(dev);
 406	struct iommu_dev_data *dev_data;
 
 407
 408	if (dev->archdata.iommu)
 409		return 0;
 410
 411	dev_data = find_dev_data(get_device_id(dev));
 412	if (!dev_data)
 413		return -ENOMEM;
 414
 415	dev_data->alias = get_alias(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 416
 417	if (pci_iommuv2_capable(pdev)) {
 418		struct amd_iommu *iommu;
 419
 420		iommu              = amd_iommu_rlookup_table[dev_data->devid];
 421		dev_data->iommu_v2 = iommu->is_iommu_v2;
 422	}
 423
 424	dev->archdata.iommu = dev_data;
 425
 426	iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
 427			  dev);
 428
 429	return 0;
 430}
 431
 432static void iommu_ignore_device(struct device *dev)
 433{
 434	u16 devid, alias;
 435
 436	devid = get_device_id(dev);
 437	alias = get_alias(dev);
 438
 439	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
 440	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
 441
 442	amd_iommu_rlookup_table[devid] = NULL;
 443	amd_iommu_rlookup_table[alias] = NULL;
 444}
 445
 446static void iommu_uninit_device(struct device *dev)
 447{
 448	struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev));
 449
 450	if (!dev_data)
 451		return;
 
 
 452
 453	if (dev_data->domain)
 454		detach_device(dev);
 
 
 455
 456	iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
 457			    dev);
 458
 459	iommu_group_remove_device(dev);
 
 460
 461	/* Remove dma-ops */
 462	dev->archdata.dma_ops = NULL;
 463
 464	/*
 465	 * We keep dev_data around for unplugged devices and reuse it when the
 466	 * device is re-plugged - not doing so would introduce a ton of races.
 467	 */
 468}
 469
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 470#ifdef CONFIG_AMD_IOMMU_STATS
 471
 472/*
 473 * Initialization code for statistics collection
 474 */
 475
 476DECLARE_STATS_COUNTER(compl_wait);
 477DECLARE_STATS_COUNTER(cnt_map_single);
 478DECLARE_STATS_COUNTER(cnt_unmap_single);
 479DECLARE_STATS_COUNTER(cnt_map_sg);
 480DECLARE_STATS_COUNTER(cnt_unmap_sg);
 481DECLARE_STATS_COUNTER(cnt_alloc_coherent);
 482DECLARE_STATS_COUNTER(cnt_free_coherent);
 483DECLARE_STATS_COUNTER(cross_page);
 484DECLARE_STATS_COUNTER(domain_flush_single);
 485DECLARE_STATS_COUNTER(domain_flush_all);
 486DECLARE_STATS_COUNTER(alloced_io_mem);
 487DECLARE_STATS_COUNTER(total_map_requests);
 488DECLARE_STATS_COUNTER(complete_ppr);
 489DECLARE_STATS_COUNTER(invalidate_iotlb);
 490DECLARE_STATS_COUNTER(invalidate_iotlb_all);
 491DECLARE_STATS_COUNTER(pri_requests);
 492
 
 493static struct dentry *stats_dir;
 494static struct dentry *de_fflush;
 495
 496static void amd_iommu_stats_add(struct __iommu_counter *cnt)
 497{
 498	if (stats_dir == NULL)
 499		return;
 500
 501	cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
 502				       &cnt->value);
 503}
 504
 505static void amd_iommu_stats_init(void)
 506{
 507	stats_dir = debugfs_create_dir("amd-iommu", NULL);
 508	if (stats_dir == NULL)
 509		return;
 510
 511	de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
 512					 &amd_iommu_unmap_flush);
 513
 514	amd_iommu_stats_add(&compl_wait);
 515	amd_iommu_stats_add(&cnt_map_single);
 516	amd_iommu_stats_add(&cnt_unmap_single);
 517	amd_iommu_stats_add(&cnt_map_sg);
 518	amd_iommu_stats_add(&cnt_unmap_sg);
 519	amd_iommu_stats_add(&cnt_alloc_coherent);
 520	amd_iommu_stats_add(&cnt_free_coherent);
 521	amd_iommu_stats_add(&cross_page);
 522	amd_iommu_stats_add(&domain_flush_single);
 523	amd_iommu_stats_add(&domain_flush_all);
 524	amd_iommu_stats_add(&alloced_io_mem);
 525	amd_iommu_stats_add(&total_map_requests);
 526	amd_iommu_stats_add(&complete_ppr);
 527	amd_iommu_stats_add(&invalidate_iotlb);
 528	amd_iommu_stats_add(&invalidate_iotlb_all);
 529	amd_iommu_stats_add(&pri_requests);
 530}
 531
 532#endif
 533
 534/****************************************************************************
 535 *
 536 * Interrupt handling functions
 537 *
 538 ****************************************************************************/
 539
 540static void dump_dte_entry(u16 devid)
 541{
 542	int i;
 543
 544	for (i = 0; i < 4; ++i)
 545		pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
 546			amd_iommu_dev_table[devid].data[i]);
 547}
 548
 549static void dump_command(unsigned long phys_addr)
 550{
 551	struct iommu_cmd *cmd = phys_to_virt(phys_addr);
 552	int i;
 553
 554	for (i = 0; i < 4; ++i)
 555		pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
 556}
 557
 558static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 559{
 560	int type, devid, domid, flags;
 561	volatile u32 *event = __evt;
 562	int count = 0;
 563	u64 address;
 564
 565retry:
 566	type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
 567	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
 568	domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
 569	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
 570	address = (u64)(((u64)event[3]) << 32) | event[2];
 571
 572	if (type == 0) {
 573		/* Did we hit the erratum? */
 574		if (++count == LOOP_TIMEOUT) {
 575			pr_err("AMD-Vi: No event written to event log\n");
 576			return;
 577		}
 578		udelay(1);
 579		goto retry;
 580	}
 581
 582	printk(KERN_ERR "AMD-Vi: Event logged [");
 583
 584	switch (type) {
 585	case EVENT_TYPE_ILL_DEV:
 586		printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
 587		       "address=0x%016llx flags=0x%04x]\n",
 588		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 589		       address, flags);
 590		dump_dte_entry(devid);
 591		break;
 592	case EVENT_TYPE_IO_FAULT:
 593		printk("IO_PAGE_FAULT device=%02x:%02x.%x "
 594		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 595		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 596		       domid, address, flags);
 597		break;
 598	case EVENT_TYPE_DEV_TAB_ERR:
 599		printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 600		       "address=0x%016llx flags=0x%04x]\n",
 601		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 602		       address, flags);
 603		break;
 604	case EVENT_TYPE_PAGE_TAB_ERR:
 605		printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 606		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 607		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 608		       domid, address, flags);
 609		break;
 610	case EVENT_TYPE_ILL_CMD:
 611		printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
 612		dump_command(address);
 613		break;
 614	case EVENT_TYPE_CMD_HARD_ERR:
 615		printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
 616		       "flags=0x%04x]\n", address, flags);
 617		break;
 618	case EVENT_TYPE_IOTLB_INV_TO:
 619		printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
 620		       "address=0x%016llx]\n",
 621		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 622		       address);
 623		break;
 624	case EVENT_TYPE_INV_DEV_REQ:
 625		printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
 626		       "address=0x%016llx flags=0x%04x]\n",
 627		       PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 628		       address, flags);
 629		break;
 630	default:
 631		printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
 632	}
 633
 634	memset(__evt, 0, 4 * sizeof(u32));
 635}
 636
 637static void iommu_poll_events(struct amd_iommu *iommu)
 638{
 639	u32 head, tail;
 
 
 
 640
 641	head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 642	tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 643
 644	while (head != tail) {
 645		iommu_print_event(iommu, iommu->evt_buf + head);
 646		head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
 647	}
 648
 649	writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 
 
 650}
 651
 652static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
 653{
 654	struct amd_iommu_fault fault;
 655
 656	INC_STATS_COUNTER(pri_requests);
 657
 658	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
 659		pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
 660		return;
 661	}
 662
 663	fault.address   = raw[1];
 664	fault.pasid     = PPR_PASID(raw[0]);
 665	fault.device_id = PPR_DEVID(raw[0]);
 666	fault.tag       = PPR_TAG(raw[0]);
 667	fault.flags     = PPR_FLAGS(raw[0]);
 668
 669	atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
 670}
 671
 672static void iommu_poll_ppr_log(struct amd_iommu *iommu)
 673{
 
 674	u32 head, tail;
 675
 676	if (iommu->ppr_log == NULL)
 677		return;
 678
 
 
 
 
 
 679	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 680	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 681
 682	while (head != tail) {
 683		volatile u64 *raw;
 684		u64 entry[2];
 685		int i;
 686
 687		raw = (u64 *)(iommu->ppr_log + head);
 688
 689		/*
 690		 * Hardware bug: Interrupt may arrive before the entry is
 691		 * written to memory. If this happens we need to wait for the
 692		 * entry to arrive.
 693		 */
 694		for (i = 0; i < LOOP_TIMEOUT; ++i) {
 695			if (PPR_REQ_TYPE(raw[0]) != 0)
 696				break;
 697			udelay(1);
 698		}
 699
 700		/* Avoid memcpy function-call overhead */
 701		entry[0] = raw[0];
 702		entry[1] = raw[1];
 703
 704		/*
 705		 * To detect the hardware bug we need to clear the entry
 706		 * back to zero.
 707		 */
 708		raw[0] = raw[1] = 0UL;
 709
 710		/* Update head pointer of hardware ring-buffer */
 711		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
 712		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 713
 
 
 
 
 
 
 714		/* Handle PPR entry */
 715		iommu_handle_ppr_entry(iommu, entry);
 716
 
 
 717		/* Refresh ring-buffer information */
 718		head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 719		tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 720	}
 
 
 721}
 722
 723irqreturn_t amd_iommu_int_thread(int irq, void *data)
 724{
 725	struct amd_iommu *iommu = (struct amd_iommu *) data;
 726	u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 727
 728	while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
 729		/* Enable EVT and PPR interrupts again */
 730		writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
 731			iommu->mmio_base + MMIO_STATUS_OFFSET);
 732
 733		if (status & MMIO_STATUS_EVT_INT_MASK) {
 734			pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
 735			iommu_poll_events(iommu);
 736		}
 737
 738		if (status & MMIO_STATUS_PPR_INT_MASK) {
 739			pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
 740			iommu_poll_ppr_log(iommu);
 741		}
 742
 743		/*
 744		 * Hardware bug: ERBT1312
 745		 * When re-enabling interrupt (by writing 1
 746		 * to clear the bit), the hardware might also try to set
 747		 * the interrupt bit in the event status register.
 748		 * In this scenario, the bit will be set, and disable
 749		 * subsequent interrupts.
 750		 *
 751		 * Workaround: The IOMMU driver should read back the
 752		 * status register and check if the interrupt bits are cleared.
 753		 * If not, driver will need to go through the interrupt handler
 754		 * again and re-clear the bits
 755		 */
 756		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 757	}
 
 758	return IRQ_HANDLED;
 759}
 760
 761irqreturn_t amd_iommu_int_handler(int irq, void *data)
 762{
 763	return IRQ_WAKE_THREAD;
 764}
 765
 766/****************************************************************************
 767 *
 768 * IOMMU command queuing functions
 769 *
 770 ****************************************************************************/
 771
 772static int wait_on_sem(volatile u64 *sem)
 773{
 774	int i = 0;
 775
 776	while (*sem == 0 && i < LOOP_TIMEOUT) {
 777		udelay(1);
 778		i += 1;
 779	}
 780
 781	if (i == LOOP_TIMEOUT) {
 782		pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
 783		return -EIO;
 784	}
 785
 786	return 0;
 787}
 788
 789static void copy_cmd_to_buffer(struct amd_iommu *iommu,
 790			       struct iommu_cmd *cmd,
 791			       u32 tail)
 792{
 793	u8 *target;
 794
 795	target = iommu->cmd_buf + tail;
 796	tail   = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
 797
 798	/* Copy command to buffer */
 799	memcpy(target, cmd, sizeof(*cmd));
 800
 801	/* Tell the IOMMU about it */
 802	writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 803}
 804
 805static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 806{
 807	WARN_ON(address & 0x7ULL);
 808
 809	memset(cmd, 0, sizeof(*cmd));
 810	cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
 811	cmd->data[1] = upper_32_bits(__pa(address));
 812	cmd->data[2] = 1;
 813	CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 814}
 815
 816static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
 817{
 818	memset(cmd, 0, sizeof(*cmd));
 819	cmd->data[0] = devid;
 820	CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
 821}
 822
 823static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
 824				  size_t size, u16 domid, int pde)
 825{
 826	u64 pages;
 827	bool s;
 828
 829	pages = iommu_num_pages(address, size, PAGE_SIZE);
 830	s     = false;
 831
 832	if (pages > 1) {
 833		/*
 834		 * If we have to flush more than one page, flush all
 835		 * TLB entries for this domain
 836		 */
 837		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 838		s = true;
 839	}
 840
 841	address &= PAGE_MASK;
 842
 843	memset(cmd, 0, sizeof(*cmd));
 844	cmd->data[1] |= domid;
 845	cmd->data[2]  = lower_32_bits(address);
 846	cmd->data[3]  = upper_32_bits(address);
 847	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 848	if (s) /* size bit - we flush more than one 4kb page */
 849		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 850	if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
 851		cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 852}
 853
 854static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
 855				  u64 address, size_t size)
 856{
 857	u64 pages;
 858	bool s;
 859
 860	pages = iommu_num_pages(address, size, PAGE_SIZE);
 861	s     = false;
 862
 863	if (pages > 1) {
 864		/*
 865		 * If we have to flush more than one page, flush all
 866		 * TLB entries for this domain
 867		 */
 868		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 869		s = true;
 870	}
 871
 872	address &= PAGE_MASK;
 873
 874	memset(cmd, 0, sizeof(*cmd));
 875	cmd->data[0]  = devid;
 876	cmd->data[0] |= (qdep & 0xff) << 24;
 877	cmd->data[1]  = devid;
 878	cmd->data[2]  = lower_32_bits(address);
 879	cmd->data[3]  = upper_32_bits(address);
 880	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 881	if (s)
 882		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 883}
 884
 885static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
 886				  u64 address, bool size)
 887{
 888	memset(cmd, 0, sizeof(*cmd));
 889
 890	address &= ~(0xfffULL);
 891
 892	cmd->data[0]  = pasid;
 893	cmd->data[1]  = domid;
 894	cmd->data[2]  = lower_32_bits(address);
 895	cmd->data[3]  = upper_32_bits(address);
 896	cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 897	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 898	if (size)
 899		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 900	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 901}
 902
 903static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
 904				  int qdep, u64 address, bool size)
 905{
 906	memset(cmd, 0, sizeof(*cmd));
 907
 908	address &= ~(0xfffULL);
 909
 910	cmd->data[0]  = devid;
 911	cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
 912	cmd->data[0] |= (qdep  & 0xff) << 24;
 913	cmd->data[1]  = devid;
 914	cmd->data[1] |= (pasid & 0xff) << 16;
 915	cmd->data[2]  = lower_32_bits(address);
 916	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 917	cmd->data[3]  = upper_32_bits(address);
 918	if (size)
 919		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 920	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 921}
 922
 923static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
 924			       int status, int tag, bool gn)
 925{
 926	memset(cmd, 0, sizeof(*cmd));
 927
 928	cmd->data[0]  = devid;
 929	if (gn) {
 930		cmd->data[1]  = pasid;
 931		cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
 932	}
 933	cmd->data[3]  = tag & 0x1ff;
 934	cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
 935
 936	CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
 937}
 938
 939static void build_inv_all(struct iommu_cmd *cmd)
 940{
 941	memset(cmd, 0, sizeof(*cmd));
 942	CMD_SET_TYPE(cmd, CMD_INV_ALL);
 943}
 944
 945static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
 946{
 947	memset(cmd, 0, sizeof(*cmd));
 948	cmd->data[0] = devid;
 949	CMD_SET_TYPE(cmd, CMD_INV_IRT);
 950}
 951
 952/*
 953 * Writes the command to the IOMMUs command buffer and informs the
 954 * hardware about the new command.
 955 */
 956static int iommu_queue_command_sync(struct amd_iommu *iommu,
 957				    struct iommu_cmd *cmd,
 958				    bool sync)
 959{
 960	u32 left, tail, head, next_tail;
 961	unsigned long flags;
 962
 
 
 963again:
 964	spin_lock_irqsave(&iommu->lock, flags);
 965
 966	head      = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
 967	tail      = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 968	next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
 969	left      = (head - next_tail) % CMD_BUFFER_SIZE;
 970
 971	if (left <= 2) {
 972		struct iommu_cmd sync_cmd;
 973		volatile u64 sem = 0;
 974		int ret;
 975
 976		build_completion_wait(&sync_cmd, (u64)&sem);
 977		copy_cmd_to_buffer(iommu, &sync_cmd, tail);
 978
 979		spin_unlock_irqrestore(&iommu->lock, flags);
 980
 981		if ((ret = wait_on_sem(&sem)) != 0)
 982			return ret;
 983
 984		goto again;
 985	}
 986
 987	copy_cmd_to_buffer(iommu, cmd, tail);
 988
 989	/* We need to sync now to make sure all commands are processed */
 990	iommu->need_sync = sync;
 991
 992	spin_unlock_irqrestore(&iommu->lock, flags);
 993
 994	return 0;
 995}
 996
 997static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
 998{
 999	return iommu_queue_command_sync(iommu, cmd, true);
1000}
1001
1002/*
1003 * This function queues a completion wait command into the command
1004 * buffer of an IOMMU
1005 */
1006static int iommu_completion_wait(struct amd_iommu *iommu)
1007{
1008	struct iommu_cmd cmd;
1009	volatile u64 sem = 0;
1010	int ret;
1011
1012	if (!iommu->need_sync)
1013		return 0;
1014
1015	build_completion_wait(&cmd, (u64)&sem);
1016
1017	ret = iommu_queue_command_sync(iommu, &cmd, false);
1018	if (ret)
1019		return ret;
1020
1021	return wait_on_sem(&sem);
1022}
1023
1024static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1025{
1026	struct iommu_cmd cmd;
1027
1028	build_inv_dte(&cmd, devid);
1029
1030	return iommu_queue_command(iommu, &cmd);
1031}
1032
1033static void iommu_flush_dte_all(struct amd_iommu *iommu)
1034{
1035	u32 devid;
1036
1037	for (devid = 0; devid <= 0xffff; ++devid)
1038		iommu_flush_dte(iommu, devid);
1039
1040	iommu_completion_wait(iommu);
1041}
1042
1043/*
1044 * This function uses heavy locking and may disable irqs for some time. But
1045 * this is no issue because it is only called during resume.
1046 */
1047static void iommu_flush_tlb_all(struct amd_iommu *iommu)
1048{
1049	u32 dom_id;
1050
1051	for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1052		struct iommu_cmd cmd;
1053		build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1054				      dom_id, 1);
1055		iommu_queue_command(iommu, &cmd);
1056	}
1057
1058	iommu_completion_wait(iommu);
1059}
1060
1061static void iommu_flush_all(struct amd_iommu *iommu)
1062{
1063	struct iommu_cmd cmd;
1064
1065	build_inv_all(&cmd);
1066
1067	iommu_queue_command(iommu, &cmd);
1068	iommu_completion_wait(iommu);
1069}
1070
1071static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1072{
1073	struct iommu_cmd cmd;
1074
1075	build_inv_irt(&cmd, devid);
1076
1077	iommu_queue_command(iommu, &cmd);
1078}
1079
1080static void iommu_flush_irt_all(struct amd_iommu *iommu)
1081{
1082	u32 devid;
1083
1084	for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1085		iommu_flush_irt(iommu, devid);
1086
1087	iommu_completion_wait(iommu);
1088}
1089
1090void iommu_flush_all_caches(struct amd_iommu *iommu)
1091{
1092	if (iommu_feature(iommu, FEATURE_IA)) {
1093		iommu_flush_all(iommu);
1094	} else {
1095		iommu_flush_dte_all(iommu);
1096		iommu_flush_irt_all(iommu);
1097		iommu_flush_tlb_all(iommu);
1098	}
1099}
1100
1101/*
1102 * Command send function for flushing on-device TLB
1103 */
1104static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1105			      u64 address, size_t size)
1106{
1107	struct amd_iommu *iommu;
1108	struct iommu_cmd cmd;
1109	int qdep;
1110
1111	qdep     = dev_data->ats.qdep;
1112	iommu    = amd_iommu_rlookup_table[dev_data->devid];
1113
1114	build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
1115
1116	return iommu_queue_command(iommu, &cmd);
1117}
1118
1119/*
1120 * Command send function for invalidating a device table entry
1121 */
1122static int device_flush_dte(struct iommu_dev_data *dev_data)
1123{
1124	struct amd_iommu *iommu;
1125	u16 alias;
1126	int ret;
1127
1128	iommu = amd_iommu_rlookup_table[dev_data->devid];
1129	alias = dev_data->alias;
1130
1131	ret = iommu_flush_dte(iommu, dev_data->devid);
1132	if (!ret && alias != dev_data->devid)
1133		ret = iommu_flush_dte(iommu, alias);
1134	if (ret)
1135		return ret;
1136
1137	if (dev_data->ats.enabled)
1138		ret = device_flush_iotlb(dev_data, 0, ~0UL);
1139
1140	return ret;
1141}
1142
1143/*
1144 * TLB invalidation function which is called from the mapping functions.
1145 * It invalidates a single PTE if the range to flush is within a single
1146 * page. Otherwise it flushes the whole TLB of the IOMMU.
1147 */
1148static void __domain_flush_pages(struct protection_domain *domain,
1149				 u64 address, size_t size, int pde)
1150{
1151	struct iommu_dev_data *dev_data;
1152	struct iommu_cmd cmd;
1153	int ret = 0, i;
1154
1155	build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1156
1157	for (i = 0; i < amd_iommus_present; ++i) {
1158		if (!domain->dev_iommu[i])
1159			continue;
1160
1161		/*
1162		 * Devices of this domain are behind this IOMMU
1163		 * We need a TLB flush
1164		 */
1165		ret |= iommu_queue_command(amd_iommus[i], &cmd);
1166	}
1167
1168	list_for_each_entry(dev_data, &domain->dev_list, list) {
1169
1170		if (!dev_data->ats.enabled)
1171			continue;
1172
1173		ret |= device_flush_iotlb(dev_data, address, size);
1174	}
1175
1176	WARN_ON(ret);
1177}
1178
1179static void domain_flush_pages(struct protection_domain *domain,
1180			       u64 address, size_t size)
1181{
1182	__domain_flush_pages(domain, address, size, 0);
1183}
1184
1185/* Flush the whole IO/TLB for a given protection domain */
1186static void domain_flush_tlb(struct protection_domain *domain)
1187{
1188	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1189}
1190
1191/* Flush the whole IO/TLB for a given protection domain - including PDE */
1192static void domain_flush_tlb_pde(struct protection_domain *domain)
1193{
1194	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1195}
1196
1197static void domain_flush_complete(struct protection_domain *domain)
1198{
1199	int i;
1200
1201	for (i = 0; i < amd_iommus_present; ++i) {
1202		if (!domain->dev_iommu[i])
1203			continue;
1204
1205		/*
1206		 * Devices of this domain are behind this IOMMU
1207		 * We need to wait for completion of all commands.
1208		 */
1209		iommu_completion_wait(amd_iommus[i]);
1210	}
1211}
1212
1213
1214/*
1215 * This function flushes the DTEs for all devices in domain
1216 */
1217static void domain_flush_devices(struct protection_domain *domain)
1218{
1219	struct iommu_dev_data *dev_data;
1220
1221	list_for_each_entry(dev_data, &domain->dev_list, list)
1222		device_flush_dte(dev_data);
1223}
1224
1225/****************************************************************************
1226 *
1227 * The functions below are used the create the page table mappings for
1228 * unity mapped regions.
1229 *
1230 ****************************************************************************/
1231
1232/*
1233 * This function is used to add another level to an IO page table. Adding
1234 * another level increases the size of the address space by 9 bits to a size up
1235 * to 64 bits.
1236 */
1237static bool increase_address_space(struct protection_domain *domain,
1238				   gfp_t gfp)
1239{
1240	u64 *pte;
1241
1242	if (domain->mode == PAGE_MODE_6_LEVEL)
1243		/* address space already 64 bit large */
1244		return false;
1245
1246	pte = (void *)get_zeroed_page(gfp);
1247	if (!pte)
1248		return false;
1249
1250	*pte             = PM_LEVEL_PDE(domain->mode,
1251					virt_to_phys(domain->pt_root));
1252	domain->pt_root  = pte;
1253	domain->mode    += 1;
1254	domain->updated  = true;
1255
1256	return true;
1257}
1258
1259static u64 *alloc_pte(struct protection_domain *domain,
1260		      unsigned long address,
1261		      unsigned long page_size,
1262		      u64 **pte_page,
1263		      gfp_t gfp)
1264{
1265	int level, end_lvl;
1266	u64 *pte, *page;
1267
1268	BUG_ON(!is_power_of_2(page_size));
1269
1270	while (address > PM_LEVEL_SIZE(domain->mode))
1271		increase_address_space(domain, gfp);
1272
1273	level   = domain->mode - 1;
1274	pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1275	address = PAGE_SIZE_ALIGN(address, page_size);
1276	end_lvl = PAGE_SIZE_LEVEL(page_size);
1277
1278	while (level > end_lvl) {
1279		u64 __pte, __npte;
1280
1281		__pte = *pte;
1282
1283		if (!IOMMU_PTE_PRESENT(__pte)) {
1284			page = (u64 *)get_zeroed_page(gfp);
1285			if (!page)
1286				return NULL;
1287
1288			__npte = PM_LEVEL_PDE(level, virt_to_phys(page));
1289
1290			if (cmpxchg64(pte, __pte, __npte)) {
1291				free_page((unsigned long)page);
1292				continue;
1293			}
1294		}
1295
1296		/* No level skipping support yet */
1297		if (PM_PTE_LEVEL(*pte) != level)
1298			return NULL;
1299
1300		level -= 1;
1301
1302		pte = IOMMU_PTE_PAGE(*pte);
1303
1304		if (pte_page && level == end_lvl)
1305			*pte_page = pte;
1306
1307		pte = &pte[PM_LEVEL_INDEX(level, address)];
1308	}
1309
1310	return pte;
1311}
1312
1313/*
1314 * This function checks if there is a PTE for a given dma address. If
1315 * there is one, it returns the pointer to it.
1316 */
1317static u64 *fetch_pte(struct protection_domain *domain,
1318		      unsigned long address,
1319		      unsigned long *page_size)
1320{
1321	int level;
1322	u64 *pte;
1323
1324	if (address > PM_LEVEL_SIZE(domain->mode))
1325		return NULL;
1326
1327	level	   =  domain->mode - 1;
1328	pte	   = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1329	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
1330
1331	while (level > 0) {
1332
1333		/* Not Present */
1334		if (!IOMMU_PTE_PRESENT(*pte))
1335			return NULL;
1336
1337		/* Large PTE */
1338		if (PM_PTE_LEVEL(*pte) == 7 ||
1339		    PM_PTE_LEVEL(*pte) == 0)
1340			break;
 
 
 
 
 
 
 
 
 
 
1341
1342		/* No level skipping support yet */
1343		if (PM_PTE_LEVEL(*pte) != level)
1344			return NULL;
1345
1346		level -= 1;
1347
1348		/* Walk to the next level */
1349		pte	   = IOMMU_PTE_PAGE(*pte);
1350		pte	   = &pte[PM_LEVEL_INDEX(level, address)];
1351		*page_size = PTE_LEVEL_PAGE_SIZE(level);
1352	}
1353
1354	if (PM_PTE_LEVEL(*pte) == 0x07) {
1355		unsigned long pte_mask;
1356
1357		/*
1358		 * If we have a series of large PTEs, make
1359		 * sure to return a pointer to the first one.
1360		 */
1361		*page_size = pte_mask = PTE_PAGE_SIZE(*pte);
1362		pte_mask   = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1363		pte        = (u64 *)(((unsigned long)pte) & pte_mask);
1364	}
1365
1366	return pte;
1367}
1368
1369/*
1370 * Generic mapping functions. It maps a physical address into a DMA
1371 * address space. It allocates the page table pages if necessary.
1372 * In the future it can be extended to a generic mapping function
1373 * supporting all features of AMD IOMMU page tables like level skipping
1374 * and full 64 bit address spaces.
1375 */
1376static int iommu_map_page(struct protection_domain *dom,
1377			  unsigned long bus_addr,
1378			  unsigned long phys_addr,
1379			  int prot,
1380			  unsigned long page_size)
1381{
1382	u64 __pte, *pte;
1383	int i, count;
1384
1385	BUG_ON(!IS_ALIGNED(bus_addr, page_size));
1386	BUG_ON(!IS_ALIGNED(phys_addr, page_size));
1387
1388	if (!(prot & IOMMU_PROT_MASK))
1389		return -EINVAL;
1390
1391	count = PAGE_SIZE_PTE_COUNT(page_size);
1392	pte   = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
1393
1394	if (!pte)
1395		return -ENOMEM;
1396
1397	for (i = 0; i < count; ++i)
1398		if (IOMMU_PTE_PRESENT(pte[i]))
1399			return -EBUSY;
1400
1401	if (count > 1) {
1402		__pte = PAGE_SIZE_PTE(phys_addr, page_size);
1403		__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
1404	} else
1405		__pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
1406
1407	if (prot & IOMMU_PROT_IR)
1408		__pte |= IOMMU_PTE_IR;
1409	if (prot & IOMMU_PROT_IW)
1410		__pte |= IOMMU_PTE_IW;
1411
1412	for (i = 0; i < count; ++i)
1413		pte[i] = __pte;
1414
1415	update_domain(dom);
1416
1417	return 0;
1418}
1419
1420static unsigned long iommu_unmap_page(struct protection_domain *dom,
1421				      unsigned long bus_addr,
1422				      unsigned long page_size)
1423{
1424	unsigned long long unmapped;
1425	unsigned long unmap_size;
1426	u64 *pte;
1427
1428	BUG_ON(!is_power_of_2(page_size));
1429
1430	unmapped = 0;
1431
1432	while (unmapped < page_size) {
1433
1434		pte = fetch_pte(dom, bus_addr, &unmap_size);
1435
1436		if (pte) {
1437			int i, count;
1438
1439			count = PAGE_SIZE_PTE_COUNT(unmap_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1440			for (i = 0; i < count; i++)
1441				pte[i] = 0ULL;
1442		}
1443
1444		bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1445		unmapped += unmap_size;
1446	}
1447
1448	BUG_ON(unmapped && !is_power_of_2(unmapped));
1449
1450	return unmapped;
1451}
1452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1453/****************************************************************************
1454 *
1455 * The next functions belong to the address allocator for the dma_ops
1456 * interface functions. They work like the allocators in the other IOMMU
1457 * drivers. Its basically a bitmap which marks the allocated pages in
1458 * the aperture. Maybe it could be enhanced in the future to a more
1459 * efficient allocator.
1460 *
1461 ****************************************************************************/
1462
1463/*
1464 * The address allocator core functions.
1465 *
1466 * called with domain->lock held
1467 */
1468
1469/*
1470 * Used to reserve address ranges in the aperture (e.g. for exclusion
1471 * ranges.
1472 */
1473static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1474				      unsigned long start_page,
1475				      unsigned int pages)
1476{
1477	unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1478
1479	if (start_page + pages > last_page)
1480		pages = last_page - start_page;
1481
1482	for (i = start_page; i < start_page + pages; ++i) {
1483		int index = i / APERTURE_RANGE_PAGES;
1484		int page  = i % APERTURE_RANGE_PAGES;
1485		__set_bit(page, dom->aperture[index]->bitmap);
1486	}
1487}
1488
1489/*
1490 * This function is used to add a new aperture range to an existing
1491 * aperture in case of dma_ops domain allocation or address allocation
1492 * failure.
1493 */
1494static int alloc_new_range(struct dma_ops_domain *dma_dom,
1495			   bool populate, gfp_t gfp)
1496{
1497	int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
1498	unsigned long i, old_size, pte_pgsize;
1499	struct aperture_range *range;
1500	struct amd_iommu *iommu;
1501	unsigned long flags;
1502
1503#ifdef CONFIG_IOMMU_STRESS
1504	populate = false;
1505#endif
1506
1507	if (index >= APERTURE_MAX_RANGES)
1508		return -ENOMEM;
1509
1510	range = kzalloc(sizeof(struct aperture_range), gfp);
1511	if (!range)
1512		return -ENOMEM;
1513
1514	range->bitmap = (void *)get_zeroed_page(gfp);
1515	if (!range->bitmap)
1516		goto out_free;
1517
1518	range->offset = dma_dom->aperture_size;
1519
1520	spin_lock_init(&range->bitmap_lock);
1521
1522	if (populate) {
1523		unsigned long address = dma_dom->aperture_size;
1524		int i, num_ptes = APERTURE_RANGE_PAGES / 512;
1525		u64 *pte, *pte_page;
1526
1527		for (i = 0; i < num_ptes; ++i) {
1528			pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
1529					&pte_page, gfp);
1530			if (!pte)
1531				goto out_free;
1532
1533			range->pte_pages[i] = pte_page;
1534
1535			address += APERTURE_RANGE_SIZE / 64;
1536		}
1537	}
1538
1539	spin_lock_irqsave(&dma_dom->domain.lock, flags);
1540
1541	/* First take the bitmap_lock and then publish the range */
1542	spin_lock(&range->bitmap_lock);
1543
1544	old_size                 = dma_dom->aperture_size;
1545	dma_dom->aperture[index] = range;
1546	dma_dom->aperture_size  += APERTURE_RANGE_SIZE;
1547
1548	/* Reserve address range used for MSI messages */
1549	if (old_size < MSI_ADDR_BASE_LO &&
1550	    dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
1551		unsigned long spage;
1552		int pages;
1553
1554		pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
1555		spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
1556
1557		dma_ops_reserve_addresses(dma_dom, spage, pages);
1558	}
1559
1560	/* Initialize the exclusion range if necessary */
1561	for_each_iommu(iommu) {
1562		if (iommu->exclusion_start &&
1563		    iommu->exclusion_start >= dma_dom->aperture[index]->offset
1564		    && iommu->exclusion_start < dma_dom->aperture_size) {
1565			unsigned long startpage;
1566			int pages = iommu_num_pages(iommu->exclusion_start,
1567						    iommu->exclusion_length,
1568						    PAGE_SIZE);
1569			startpage = iommu->exclusion_start >> PAGE_SHIFT;
1570			dma_ops_reserve_addresses(dma_dom, startpage, pages);
1571		}
1572	}
1573
1574	/*
1575	 * Check for areas already mapped as present in the new aperture
1576	 * range and mark those pages as reserved in the allocator. Such
1577	 * mappings may already exist as a result of requested unity
1578	 * mappings for devices.
1579	 */
1580	for (i = dma_dom->aperture[index]->offset;
1581	     i < dma_dom->aperture_size;
1582	     i += pte_pgsize) {
1583		u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
1584		if (!pte || !IOMMU_PTE_PRESENT(*pte))
1585			continue;
1586
1587		dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
1588					  pte_pgsize >> 12);
1589	}
1590
1591	update_domain(&dma_dom->domain);
1592
1593	spin_unlock(&range->bitmap_lock);
1594
1595	spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
1596
1597	return 0;
1598
1599out_free:
1600	update_domain(&dma_dom->domain);
1601
1602	free_page((unsigned long)range->bitmap);
1603
1604	kfree(range);
 
1605
1606	return -ENOMEM;
1607}
1608
1609static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom,
1610					 struct aperture_range *range,
1611					 unsigned long pages,
1612					 unsigned long dma_mask,
1613					 unsigned long boundary_size,
1614					 unsigned long align_mask,
1615					 bool trylock)
1616{
1617	unsigned long offset, limit, flags;
1618	dma_addr_t address;
1619	bool flush = false;
1620
1621	offset = range->offset >> PAGE_SHIFT;
1622	limit  = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1623					dma_mask >> PAGE_SHIFT);
1624
1625	if (trylock) {
1626		if (!spin_trylock_irqsave(&range->bitmap_lock, flags))
1627			return -1;
1628	} else {
1629		spin_lock_irqsave(&range->bitmap_lock, flags);
1630	}
1631
1632	address = iommu_area_alloc(range->bitmap, limit, range->next_bit,
1633				   pages, offset, boundary_size, align_mask);
1634	if (address == -1) {
1635		/* Nothing found, retry one time */
1636		address = iommu_area_alloc(range->bitmap, limit,
1637					   0, pages, offset, boundary_size,
1638					   align_mask);
1639		flush = true;
1640	}
1641
1642	if (address != -1)
1643		range->next_bit = address + pages;
1644
1645	spin_unlock_irqrestore(&range->bitmap_lock, flags);
1646
1647	if (flush) {
1648		domain_flush_tlb(&dom->domain);
1649		domain_flush_complete(&dom->domain);
1650	}
1651
1652	return address;
1653}
1654
1655static unsigned long dma_ops_area_alloc(struct device *dev,
1656					struct dma_ops_domain *dom,
1657					unsigned int pages,
1658					unsigned long align_mask,
1659					u64 dma_mask)
 
1660{
1661	unsigned long boundary_size, mask;
 
 
 
1662	unsigned long address = -1;
1663	bool first = true;
1664	u32 start, i;
1665
1666	preempt_disable();
1667
1668	mask = dma_get_seg_boundary(dev);
 
1669
1670again:
1671	start = this_cpu_read(*dom->next_index);
1672
1673	/* Sanity check - is it really necessary? */
1674	if (unlikely(start > APERTURE_MAX_RANGES)) {
1675		start = 0;
1676		this_cpu_write(*dom->next_index, 0);
1677	}
1678
1679	boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
1680				   1UL << (BITS_PER_LONG - PAGE_SHIFT);
1681
1682	for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1683		struct aperture_range *range;
1684		int index;
1685
1686		index = (start + i) % APERTURE_MAX_RANGES;
1687
1688		range = dom->aperture[index];
 
1689
1690		if (!range || range->offset >= dma_mask)
1691			continue;
1692
1693		address = dma_ops_aperture_alloc(dom, range, pages,
1694						 dma_mask, boundary_size,
1695						 align_mask, first);
1696		if (address != -1) {
1697			address = range->offset + (address << PAGE_SHIFT);
1698			this_cpu_write(*dom->next_index, index);
 
1699			break;
1700		}
1701	}
1702
1703	if (address == -1 && first) {
1704		first = false;
1705		goto again;
1706	}
1707
1708	preempt_enable();
1709
1710	return address;
1711}
1712
1713static unsigned long dma_ops_alloc_addresses(struct device *dev,
1714					     struct dma_ops_domain *dom,
1715					     unsigned int pages,
1716					     unsigned long align_mask,
1717					     u64 dma_mask)
1718{
1719	unsigned long address = -1;
 
 
 
 
 
1720
1721	while (address == -1) {
1722		address = dma_ops_area_alloc(dev, dom, pages,
1723					     align_mask, dma_mask);
1724
1725		if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC))
1726			break;
 
 
 
1727	}
1728
1729	if (unlikely(address == -1))
1730		address = DMA_ERROR_CODE;
1731
1732	WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1733
1734	return address;
1735}
1736
1737/*
1738 * The address free function.
1739 *
1740 * called with domain->lock held
1741 */
1742static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1743				   unsigned long address,
1744				   unsigned int pages)
1745{
1746	unsigned i = address >> APERTURE_RANGE_SHIFT;
1747	struct aperture_range *range = dom->aperture[i];
1748	unsigned long flags;
1749
1750	BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1751
1752#ifdef CONFIG_IOMMU_STRESS
1753	if (i < 4)
1754		return;
1755#endif
1756
1757	if (amd_iommu_unmap_flush) {
1758		domain_flush_tlb(&dom->domain);
1759		domain_flush_complete(&dom->domain);
1760	}
1761
1762	address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1763
1764	spin_lock_irqsave(&range->bitmap_lock, flags);
1765	if (address + pages > range->next_bit)
1766		range->next_bit = address + pages;
1767	bitmap_clear(range->bitmap, address, pages);
1768	spin_unlock_irqrestore(&range->bitmap_lock, flags);
1769
1770}
1771
1772/****************************************************************************
1773 *
1774 * The next functions belong to the domain allocation. A domain is
1775 * allocated for every IOMMU as the default domain. If device isolation
1776 * is enabled, every device get its own domain. The most important thing
1777 * about domains is the page table mapping the DMA address space they
1778 * contain.
1779 *
1780 ****************************************************************************/
1781
1782/*
1783 * This function adds a protection domain to the global protection domain list
1784 */
1785static void add_domain_to_list(struct protection_domain *domain)
1786{
1787	unsigned long flags;
1788
1789	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1790	list_add(&domain->list, &amd_iommu_pd_list);
1791	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1792}
1793
1794/*
1795 * This function removes a protection domain to the global
1796 * protection domain list
1797 */
1798static void del_domain_from_list(struct protection_domain *domain)
1799{
1800	unsigned long flags;
1801
1802	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1803	list_del(&domain->list);
1804	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1805}
1806
1807static u16 domain_id_alloc(void)
1808{
1809	unsigned long flags;
1810	int id;
1811
1812	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1813	id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1814	BUG_ON(id == 0);
1815	if (id > 0 && id < MAX_DOMAIN_ID)
1816		__set_bit(id, amd_iommu_pd_alloc_bitmap);
1817	else
1818		id = 0;
1819	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1820
1821	return id;
1822}
1823
1824static void domain_id_free(int id)
1825{
1826	unsigned long flags;
1827
1828	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1829	if (id > 0 && id < MAX_DOMAIN_ID)
1830		__clear_bit(id, amd_iommu_pd_alloc_bitmap);
1831	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1832}
1833
1834#define DEFINE_FREE_PT_FN(LVL, FN)				\
1835static void free_pt_##LVL (unsigned long __pt)			\
1836{								\
1837	unsigned long p;					\
1838	u64 *pt;						\
1839	int i;							\
1840								\
1841	pt = (u64 *)__pt;					\
1842								\
1843	for (i = 0; i < 512; ++i) {				\
1844		/* PTE present? */				\
1845		if (!IOMMU_PTE_PRESENT(pt[i]))			\
1846			continue;				\
1847								\
1848		/* Large PTE? */				\
1849		if (PM_PTE_LEVEL(pt[i]) == 0 ||			\
1850		    PM_PTE_LEVEL(pt[i]) == 7)			\
1851			continue;				\
1852								\
1853		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);	\
1854		FN(p);						\
1855	}							\
1856	free_page((unsigned long)pt);				\
1857}
1858
1859DEFINE_FREE_PT_FN(l2, free_page)
1860DEFINE_FREE_PT_FN(l3, free_pt_l2)
1861DEFINE_FREE_PT_FN(l4, free_pt_l3)
1862DEFINE_FREE_PT_FN(l5, free_pt_l4)
1863DEFINE_FREE_PT_FN(l6, free_pt_l5)
1864
1865static void free_pagetable(struct protection_domain *domain)
1866{
1867	unsigned long root = (unsigned long)domain->pt_root;
 
1868
1869	switch (domain->mode) {
1870	case PAGE_MODE_NONE:
1871		break;
1872	case PAGE_MODE_1_LEVEL:
1873		free_page(root);
1874		break;
1875	case PAGE_MODE_2_LEVEL:
1876		free_pt_l2(root);
1877		break;
1878	case PAGE_MODE_3_LEVEL:
1879		free_pt_l3(root);
1880		break;
1881	case PAGE_MODE_4_LEVEL:
1882		free_pt_l4(root);
1883		break;
1884	case PAGE_MODE_5_LEVEL:
1885		free_pt_l5(root);
1886		break;
1887	case PAGE_MODE_6_LEVEL:
1888		free_pt_l6(root);
1889		break;
1890	default:
1891		BUG();
1892	}
 
 
 
 
1893}
1894
1895static void free_gcr3_tbl_level1(u64 *tbl)
1896{
1897	u64 *ptr;
1898	int i;
1899
1900	for (i = 0; i < 512; ++i) {
1901		if (!(tbl[i] & GCR3_VALID))
1902			continue;
1903
1904		ptr = __va(tbl[i] & PAGE_MASK);
1905
1906		free_page((unsigned long)ptr);
1907	}
1908}
1909
1910static void free_gcr3_tbl_level2(u64 *tbl)
1911{
1912	u64 *ptr;
1913	int i;
1914
1915	for (i = 0; i < 512; ++i) {
1916		if (!(tbl[i] & GCR3_VALID))
1917			continue;
1918
1919		ptr = __va(tbl[i] & PAGE_MASK);
1920
1921		free_gcr3_tbl_level1(ptr);
1922	}
1923}
1924
1925static void free_gcr3_table(struct protection_domain *domain)
1926{
1927	if (domain->glx == 2)
1928		free_gcr3_tbl_level2(domain->gcr3_tbl);
1929	else if (domain->glx == 1)
1930		free_gcr3_tbl_level1(domain->gcr3_tbl);
1931	else
1932		BUG_ON(domain->glx != 0);
1933
1934	free_page((unsigned long)domain->gcr3_tbl);
1935}
1936
1937/*
1938 * Free a domain, only used if something went wrong in the
1939 * allocation path and we need to free an already allocated page table
1940 */
1941static void dma_ops_domain_free(struct dma_ops_domain *dom)
1942{
1943	int i;
1944
1945	if (!dom)
1946		return;
1947
1948	free_percpu(dom->next_index);
1949
1950	del_domain_from_list(&dom->domain);
1951
1952	free_pagetable(&dom->domain);
1953
1954	for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1955		if (!dom->aperture[i])
1956			continue;
1957		free_page((unsigned long)dom->aperture[i]->bitmap);
1958		kfree(dom->aperture[i]);
1959	}
1960
1961	kfree(dom);
1962}
1963
1964static int dma_ops_domain_alloc_apertures(struct dma_ops_domain *dma_dom,
1965					  int max_apertures)
1966{
1967	int ret, i, apertures;
1968
1969	apertures = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
1970	ret       = 0;
1971
1972	for (i = apertures; i < max_apertures; ++i) {
1973		ret = alloc_new_range(dma_dom, false, GFP_KERNEL);
1974		if (ret)
1975			break;
1976	}
1977
1978	return ret;
1979}
1980
1981/*
1982 * Allocates a new protection domain usable for the dma_ops functions.
1983 * It also initializes the page table and the address allocator data
1984 * structures required for the dma_ops interface
1985 */
1986static struct dma_ops_domain *dma_ops_domain_alloc(void)
1987{
1988	struct dma_ops_domain *dma_dom;
1989	int cpu;
1990
1991	dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1992	if (!dma_dom)
1993		return NULL;
1994
1995	if (protection_domain_init(&dma_dom->domain))
1996		goto free_dma_dom;
1997
1998	dma_dom->next_index = alloc_percpu(u32);
1999	if (!dma_dom->next_index)
2000		goto free_dma_dom;
2001
2002	dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
2003	dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2004	dma_dom->domain.flags = PD_DMA_OPS_MASK;
2005	dma_dom->domain.priv = dma_dom;
2006	if (!dma_dom->domain.pt_root)
2007		goto free_dma_dom;
2008
 
 
 
2009	add_domain_to_list(&dma_dom->domain);
2010
2011	if (alloc_new_range(dma_dom, true, GFP_KERNEL))
2012		goto free_dma_dom;
2013
2014	/*
2015	 * mark the first page as allocated so we never return 0 as
2016	 * a valid dma-address. So we can use 0 as error value
2017	 */
2018	dma_dom->aperture[0]->bitmap[0] = 1;
 
2019
2020	for_each_possible_cpu(cpu)
2021		*per_cpu_ptr(dma_dom->next_index, cpu) = 0;
2022
2023	return dma_dom;
2024
2025free_dma_dom:
2026	dma_ops_domain_free(dma_dom);
2027
2028	return NULL;
2029}
2030
2031/*
2032 * little helper function to check whether a given protection domain is a
2033 * dma_ops domain
2034 */
2035static bool dma_ops_domain(struct protection_domain *domain)
2036{
2037	return domain->flags & PD_DMA_OPS_MASK;
2038}
2039
2040static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
2041{
2042	u64 pte_root = 0;
2043	u64 flags = 0;
2044
2045	if (domain->mode != PAGE_MODE_NONE)
2046		pte_root = virt_to_phys(domain->pt_root);
2047
2048	pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
2049		    << DEV_ENTRY_MODE_SHIFT;
2050	pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
2051
2052	flags = amd_iommu_dev_table[devid].data[1];
2053
2054	if (ats)
2055		flags |= DTE_FLAG_IOTLB;
2056
2057	if (domain->flags & PD_IOMMUV2_MASK) {
2058		u64 gcr3 = __pa(domain->gcr3_tbl);
2059		u64 glx  = domain->glx;
2060		u64 tmp;
2061
2062		pte_root |= DTE_FLAG_GV;
2063		pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
2064
2065		/* First mask out possible old values for GCR3 table */
2066		tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
2067		flags    &= ~tmp;
2068
2069		tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
2070		flags    &= ~tmp;
2071
2072		/* Encode GCR3 table into DTE */
2073		tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
2074		pte_root |= tmp;
2075
2076		tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
2077		flags    |= tmp;
2078
2079		tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
2080		flags    |= tmp;
2081	}
2082
2083	flags &= ~(0xffffUL);
2084	flags |= domain->id;
2085
2086	amd_iommu_dev_table[devid].data[1]  = flags;
2087	amd_iommu_dev_table[devid].data[0]  = pte_root;
2088}
2089
2090static void clear_dte_entry(u16 devid)
2091{
2092	/* remove entry from the device table seen by the hardware */
2093	amd_iommu_dev_table[devid].data[0]  = IOMMU_PTE_P | IOMMU_PTE_TV;
2094	amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
2095
2096	amd_iommu_apply_erratum_63(devid);
2097}
2098
2099static void do_attach(struct iommu_dev_data *dev_data,
2100		      struct protection_domain *domain)
2101{
2102	struct amd_iommu *iommu;
2103	u16 alias;
2104	bool ats;
2105
2106	iommu = amd_iommu_rlookup_table[dev_data->devid];
2107	alias = dev_data->alias;
2108	ats   = dev_data->ats.enabled;
2109
2110	/* Update data structures */
2111	dev_data->domain = domain;
2112	list_add(&dev_data->list, &domain->dev_list);
 
2113
2114	/* Do reference counting */
2115	domain->dev_iommu[iommu->index] += 1;
2116	domain->dev_cnt                 += 1;
2117
2118	/* Update device table */
2119	set_dte_entry(dev_data->devid, domain, ats);
2120	if (alias != dev_data->devid)
2121		set_dte_entry(alias, domain, ats);
2122
2123	device_flush_dte(dev_data);
2124}
2125
2126static void do_detach(struct iommu_dev_data *dev_data)
2127{
2128	struct amd_iommu *iommu;
2129	u16 alias;
2130
2131	/*
2132	 * First check if the device is still attached. It might already
2133	 * be detached from its domain because the generic
2134	 * iommu_detach_group code detached it and we try again here in
2135	 * our alias handling.
2136	 */
2137	if (!dev_data->domain)
2138		return;
2139
2140	iommu = amd_iommu_rlookup_table[dev_data->devid];
2141	alias = dev_data->alias;
2142
2143	/* decrease reference counters */
2144	dev_data->domain->dev_iommu[iommu->index] -= 1;
2145	dev_data->domain->dev_cnt                 -= 1;
2146
2147	/* Update data structures */
2148	dev_data->domain = NULL;
2149	list_del(&dev_data->list);
2150	clear_dte_entry(dev_data->devid);
2151	if (alias != dev_data->devid)
2152		clear_dte_entry(alias);
2153
2154	/* Flush the DTE entry */
2155	device_flush_dte(dev_data);
2156}
2157
2158/*
2159 * If a device is not yet associated with a domain, this function does
2160 * assigns it visible for the hardware
2161 */
2162static int __attach_device(struct iommu_dev_data *dev_data,
2163			   struct protection_domain *domain)
2164{
2165	int ret;
2166
2167	/*
2168	 * Must be called with IRQs disabled. Warn here to detect early
2169	 * when its not.
2170	 */
2171	WARN_ON(!irqs_disabled());
2172
2173	/* lock domain */
2174	spin_lock(&domain->lock);
2175
2176	ret = -EBUSY;
2177	if (dev_data->domain != NULL)
2178		goto out_unlock;
 
 
 
 
 
2179
2180	/* Attach alias group root */
2181	do_attach(dev_data, domain);
 
 
 
 
 
 
 
 
 
 
 
 
 
2182
2183	ret = 0;
2184
2185out_unlock:
2186
2187	/* ready */
2188	spin_unlock(&domain->lock);
2189
2190	return ret;
2191}
2192
2193
2194static void pdev_iommuv2_disable(struct pci_dev *pdev)
2195{
2196	pci_disable_ats(pdev);
2197	pci_disable_pri(pdev);
2198	pci_disable_pasid(pdev);
2199}
2200
2201/* FIXME: Change generic reset-function to do the same */
2202static int pri_reset_while_enabled(struct pci_dev *pdev)
2203{
2204	u16 control;
2205	int pos;
2206
2207	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2208	if (!pos)
2209		return -EINVAL;
2210
2211	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
2212	control |= PCI_PRI_CTRL_RESET;
2213	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
2214
2215	return 0;
2216}
2217
2218static int pdev_iommuv2_enable(struct pci_dev *pdev)
2219{
2220	bool reset_enable;
2221	int reqs, ret;
2222
2223	/* FIXME: Hardcode number of outstanding requests for now */
2224	reqs = 32;
2225	if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
2226		reqs = 1;
2227	reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
2228
2229	/* Only allow access to user-accessible pages */
2230	ret = pci_enable_pasid(pdev, 0);
2231	if (ret)
2232		goto out_err;
2233
2234	/* First reset the PRI state of the device */
2235	ret = pci_reset_pri(pdev);
2236	if (ret)
2237		goto out_err;
2238
2239	/* Enable PRI */
2240	ret = pci_enable_pri(pdev, reqs);
2241	if (ret)
2242		goto out_err;
2243
2244	if (reset_enable) {
2245		ret = pri_reset_while_enabled(pdev);
2246		if (ret)
2247			goto out_err;
2248	}
2249
2250	ret = pci_enable_ats(pdev, PAGE_SHIFT);
2251	if (ret)
2252		goto out_err;
2253
2254	return 0;
2255
2256out_err:
2257	pci_disable_pri(pdev);
2258	pci_disable_pasid(pdev);
2259
2260	return ret;
2261}
2262
2263/* FIXME: Move this to PCI code */
2264#define PCI_PRI_TLP_OFF		(1 << 15)
2265
2266static bool pci_pri_tlp_required(struct pci_dev *pdev)
2267{
2268	u16 status;
2269	int pos;
2270
2271	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2272	if (!pos)
2273		return false;
2274
2275	pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
2276
2277	return (status & PCI_PRI_TLP_OFF) ? true : false;
2278}
2279
2280/*
2281 * If a device is not yet associated with a domain, this function
2282 * assigns it visible for the hardware
2283 */
2284static int attach_device(struct device *dev,
2285			 struct protection_domain *domain)
2286{
2287	struct pci_dev *pdev = to_pci_dev(dev);
2288	struct iommu_dev_data *dev_data;
2289	unsigned long flags;
2290	int ret;
2291
2292	dev_data = get_dev_data(dev);
2293
2294	if (domain->flags & PD_IOMMUV2_MASK) {
2295		if (!dev_data->passthrough)
2296			return -EINVAL;
2297
2298		if (dev_data->iommu_v2) {
2299			if (pdev_iommuv2_enable(pdev) != 0)
2300				return -EINVAL;
2301
2302			dev_data->ats.enabled = true;
2303			dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2304			dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
2305		}
2306	} else if (amd_iommu_iotlb_sup &&
2307		   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2308		dev_data->ats.enabled = true;
2309		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2310	}
2311
2312	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2313	ret = __attach_device(dev_data, domain);
2314	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2315
2316	/*
2317	 * We might boot into a crash-kernel here. The crashed kernel
2318	 * left the caches in the IOMMU dirty. So we have to flush
2319	 * here to evict all dirty stuff.
2320	 */
2321	domain_flush_tlb_pde(domain);
2322
2323	return ret;
2324}
2325
2326/*
2327 * Removes a device from a protection domain (unlocked)
2328 */
2329static void __detach_device(struct iommu_dev_data *dev_data)
2330{
2331	struct protection_domain *domain;
 
2332
2333	/*
2334	 * Must be called with IRQs disabled. Warn here to detect early
2335	 * when its not.
2336	 */
2337	WARN_ON(!irqs_disabled());
2338
2339	if (WARN_ON(!dev_data->domain))
2340		return;
2341
2342	domain = dev_data->domain;
2343
2344	spin_lock(&domain->lock);
2345
2346	do_detach(dev_data);
 
2347
2348	spin_unlock(&domain->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2349}
2350
2351/*
2352 * Removes a device from a protection domain (with devtable_lock held)
2353 */
2354static void detach_device(struct device *dev)
2355{
2356	struct protection_domain *domain;
2357	struct iommu_dev_data *dev_data;
2358	unsigned long flags;
2359
2360	dev_data = get_dev_data(dev);
2361	domain   = dev_data->domain;
2362
2363	/* lock device table */
2364	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2365	__detach_device(dev_data);
2366	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2367
2368	if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2369		pdev_iommuv2_disable(to_pci_dev(dev));
2370	else if (dev_data->ats.enabled)
2371		pci_disable_ats(to_pci_dev(dev));
2372
2373	dev_data->ats.enabled = false;
2374}
2375
2376static int amd_iommu_add_device(struct device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2377{
 
 
2378	struct iommu_dev_data *dev_data;
2379	struct iommu_domain *domain;
2380	struct amd_iommu *iommu;
 
2381	u16 devid;
2382	int ret;
2383
2384	if (!check_device(dev) || get_dev_data(dev))
2385		return 0;
2386
2387	devid = get_device_id(dev);
2388	iommu = amd_iommu_rlookup_table[devid];
 
2389
2390	ret = iommu_init_device(dev);
2391	if (ret) {
2392		if (ret != -ENOTSUPP)
2393			pr_err("Failed to initialize device %s - trying to proceed anyway\n",
2394				dev_name(dev));
2395
2396		iommu_ignore_device(dev);
2397		dev->archdata.dma_ops = &nommu_dma_ops;
2398		goto out;
2399	}
2400	init_iommu_group(dev);
2401
2402	dev_data = get_dev_data(dev);
 
 
 
 
 
 
2403
2404	BUG_ON(!dev_data);
2405
2406	if (iommu_pass_through || dev_data->iommu_v2)
2407		iommu_request_dm_for_dev(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2408
2409	/* Domains are initialized for this device - have a look what we ended up with */
2410	domain = iommu_get_domain_for_dev(dev);
2411	if (domain->type == IOMMU_DOMAIN_IDENTITY)
2412		dev_data->passthrough = true;
2413	else
2414		dev->archdata.dma_ops = &amd_iommu_dma_ops;
2415
2416out:
 
 
 
 
 
 
 
 
2417	iommu_completion_wait(iommu);
2418
 
2419	return 0;
2420}
2421
2422static void amd_iommu_remove_device(struct device *dev)
2423{
2424	struct amd_iommu *iommu;
2425	u16 devid;
2426
2427	if (!check_device(dev))
2428		return;
2429
2430	devid = get_device_id(dev);
2431	iommu = amd_iommu_rlookup_table[devid];
2432
2433	iommu_uninit_device(dev);
2434	iommu_completion_wait(iommu);
 
2435}
2436
2437/*****************************************************************************
2438 *
2439 * The next functions belong to the dma_ops mapping/unmapping code.
2440 *
2441 *****************************************************************************/
2442
2443/*
2444 * In the dma_ops path we only have the struct device. This function
2445 * finds the corresponding IOMMU, the protection domain and the
2446 * requestor id for a given device.
2447 * If the device is not yet associated with a domain this is also done
2448 * in this function.
2449 */
2450static struct protection_domain *get_domain(struct device *dev)
2451{
2452	struct protection_domain *domain;
2453	struct iommu_domain *io_domain;
 
2454
2455	if (!check_device(dev))
2456		return ERR_PTR(-EINVAL);
2457
2458	io_domain = iommu_get_domain_for_dev(dev);
2459	if (!io_domain)
2460		return NULL;
2461
2462	domain = to_pdomain(io_domain);
2463	if (!dma_ops_domain(domain))
2464		return ERR_PTR(-EBUSY);
2465
2466	return domain;
 
 
 
 
 
 
 
 
 
 
 
2467}
2468
2469static void update_device_table(struct protection_domain *domain)
2470{
2471	struct iommu_dev_data *dev_data;
2472
2473	list_for_each_entry(dev_data, &domain->dev_list, list)
2474		set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
2475}
2476
2477static void update_domain(struct protection_domain *domain)
2478{
2479	if (!domain->updated)
2480		return;
2481
2482	update_device_table(domain);
2483
2484	domain_flush_devices(domain);
2485	domain_flush_tlb_pde(domain);
2486
2487	domain->updated = false;
2488}
2489
2490/*
2491 * This function fetches the PTE for a given address in the aperture
2492 */
2493static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
2494			    unsigned long address)
2495{
2496	struct aperture_range *aperture;
2497	u64 *pte, *pte_page;
2498
2499	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2500	if (!aperture)
2501		return NULL;
2502
2503	pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2504	if (!pte) {
2505		pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
2506				GFP_ATOMIC);
2507		aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
2508	} else
2509		pte += PM_LEVEL_INDEX(0, address);
2510
2511	update_domain(&dom->domain);
2512
2513	return pte;
2514}
2515
2516/*
2517 * This is the generic map function. It maps one 4kb page at paddr to
2518 * the given address in the DMA address space for the domain.
2519 */
2520static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
2521				     unsigned long address,
2522				     phys_addr_t paddr,
2523				     int direction)
2524{
2525	u64 *pte, __pte;
2526
2527	WARN_ON(address > dom->aperture_size);
2528
2529	paddr &= PAGE_MASK;
2530
2531	pte  = dma_ops_get_pte(dom, address);
2532	if (!pte)
2533		return DMA_ERROR_CODE;
2534
2535	__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
2536
2537	if (direction == DMA_TO_DEVICE)
2538		__pte |= IOMMU_PTE_IR;
2539	else if (direction == DMA_FROM_DEVICE)
2540		__pte |= IOMMU_PTE_IW;
2541	else if (direction == DMA_BIDIRECTIONAL)
2542		__pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
2543
2544	WARN_ON_ONCE(*pte);
2545
2546	*pte = __pte;
2547
2548	return (dma_addr_t)address;
2549}
2550
2551/*
2552 * The generic unmapping function for on page in the DMA address space.
2553 */
2554static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
2555				 unsigned long address)
2556{
2557	struct aperture_range *aperture;
2558	u64 *pte;
2559
2560	if (address >= dom->aperture_size)
2561		return;
2562
2563	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2564	if (!aperture)
2565		return;
2566
2567	pte  = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2568	if (!pte)
2569		return;
2570
2571	pte += PM_LEVEL_INDEX(0, address);
2572
2573	WARN_ON_ONCE(!*pte);
2574
2575	*pte = 0ULL;
2576}
2577
2578/*
2579 * This function contains common code for mapping of a physically
2580 * contiguous memory region into DMA address space. It is used by all
2581 * mapping functions provided with this IOMMU driver.
2582 * Must be called with the domain lock held.
2583 */
2584static dma_addr_t __map_single(struct device *dev,
2585			       struct dma_ops_domain *dma_dom,
2586			       phys_addr_t paddr,
2587			       size_t size,
2588			       int dir,
2589			       bool align,
2590			       u64 dma_mask)
2591{
2592	dma_addr_t offset = paddr & ~PAGE_MASK;
2593	dma_addr_t address, start, ret;
2594	unsigned int pages;
2595	unsigned long align_mask = 0;
2596	int i;
2597
2598	pages = iommu_num_pages(paddr, size, PAGE_SIZE);
2599	paddr &= PAGE_MASK;
2600
2601	INC_STATS_COUNTER(total_map_requests);
2602
2603	if (pages > 1)
2604		INC_STATS_COUNTER(cross_page);
2605
2606	if (align)
2607		align_mask = (1UL << get_order(size)) - 1;
2608
 
2609	address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
2610					  dma_mask);
 
 
 
 
 
 
 
2611
2612	if (address == DMA_ERROR_CODE)
2613		goto out;
 
 
 
 
 
 
 
2614
2615	start = address;
2616	for (i = 0; i < pages; ++i) {
2617		ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
2618		if (ret == DMA_ERROR_CODE)
2619			goto out_unmap;
2620
2621		paddr += PAGE_SIZE;
2622		start += PAGE_SIZE;
2623	}
2624	address += offset;
2625
2626	ADD_STATS_COUNTER(alloced_io_mem, size);
2627
2628	if (unlikely(amd_iommu_np_cache)) {
 
 
 
2629		domain_flush_pages(&dma_dom->domain, address, size);
2630		domain_flush_complete(&dma_dom->domain);
2631	}
2632
2633out:
2634	return address;
2635
2636out_unmap:
2637
2638	for (--i; i >= 0; --i) {
2639		start -= PAGE_SIZE;
2640		dma_ops_domain_unmap(dma_dom, start);
2641	}
2642
2643	dma_ops_free_addresses(dma_dom, address, pages);
2644
2645	return DMA_ERROR_CODE;
2646}
2647
2648/*
2649 * Does the reverse of the __map_single function. Must be called with
2650 * the domain lock held too
2651 */
2652static void __unmap_single(struct dma_ops_domain *dma_dom,
2653			   dma_addr_t dma_addr,
2654			   size_t size,
2655			   int dir)
2656{
2657	dma_addr_t flush_addr;
2658	dma_addr_t i, start;
2659	unsigned int pages;
2660
2661	if ((dma_addr == DMA_ERROR_CODE) ||
2662	    (dma_addr + size > dma_dom->aperture_size))
2663		return;
2664
2665	flush_addr = dma_addr;
2666	pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
2667	dma_addr &= PAGE_MASK;
2668	start = dma_addr;
2669
2670	for (i = 0; i < pages; ++i) {
2671		dma_ops_domain_unmap(dma_dom, start);
2672		start += PAGE_SIZE;
2673	}
2674
2675	SUB_STATS_COUNTER(alloced_io_mem, size);
2676
2677	dma_ops_free_addresses(dma_dom, dma_addr, pages);
 
 
 
 
 
2678}
2679
2680/*
2681 * The exported map_single function for dma_ops.
2682 */
2683static dma_addr_t map_page(struct device *dev, struct page *page,
2684			   unsigned long offset, size_t size,
2685			   enum dma_data_direction dir,
2686			   struct dma_attrs *attrs)
2687{
2688	phys_addr_t paddr = page_to_phys(page) + offset;
2689	struct protection_domain *domain;
 
2690	u64 dma_mask;
 
2691
2692	INC_STATS_COUNTER(cnt_map_single);
2693
2694	domain = get_domain(dev);
2695	if (PTR_ERR(domain) == -EINVAL)
2696		return (dma_addr_t)paddr;
2697	else if (IS_ERR(domain))
2698		return DMA_ERROR_CODE;
2699
2700	dma_mask = *dev->dma_mask;
2701
2702	return __map_single(dev, domain->priv, paddr, size, dir, false,
 
 
2703			    dma_mask);
 
 
 
 
 
 
 
 
 
2704}
2705
2706/*
2707 * The exported unmap_single function for dma_ops.
2708 */
2709static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2710		       enum dma_data_direction dir, struct dma_attrs *attrs)
2711{
 
2712	struct protection_domain *domain;
2713
2714	INC_STATS_COUNTER(cnt_unmap_single);
2715
2716	domain = get_domain(dev);
2717	if (IS_ERR(domain))
2718		return;
2719
 
 
2720	__unmap_single(domain->priv, dma_addr, size, dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2721}
2722
2723/*
2724 * The exported map_sg function for dma_ops (handles scatter-gather
2725 * lists).
2726 */
2727static int map_sg(struct device *dev, struct scatterlist *sglist,
2728		  int nelems, enum dma_data_direction dir,
2729		  struct dma_attrs *attrs)
2730{
 
2731	struct protection_domain *domain;
2732	int i;
2733	struct scatterlist *s;
2734	phys_addr_t paddr;
2735	int mapped_elems = 0;
2736	u64 dma_mask;
2737
2738	INC_STATS_COUNTER(cnt_map_sg);
2739
2740	domain = get_domain(dev);
2741	if (IS_ERR(domain))
 
 
2742		return 0;
2743
2744	dma_mask = *dev->dma_mask;
2745
 
 
2746	for_each_sg(sglist, s, nelems, i) {
2747		paddr = sg_phys(s);
2748
2749		s->dma_address = __map_single(dev, domain->priv,
2750					      paddr, s->length, dir, false,
2751					      dma_mask);
2752
2753		if (s->dma_address) {
2754			s->dma_length = s->length;
2755			mapped_elems++;
2756		} else
2757			goto unmap;
2758	}
2759
2760	return mapped_elems;
2761
 
 
 
 
2762unmap:
2763	for_each_sg(sglist, s, mapped_elems, i) {
2764		if (s->dma_address)
2765			__unmap_single(domain->priv, s->dma_address,
2766				       s->dma_length, dir);
2767		s->dma_address = s->dma_length = 0;
2768	}
2769
2770	return 0;
 
 
2771}
2772
2773/*
2774 * The exported map_sg function for dma_ops (handles scatter-gather
2775 * lists).
2776 */
2777static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2778		     int nelems, enum dma_data_direction dir,
2779		     struct dma_attrs *attrs)
2780{
 
2781	struct protection_domain *domain;
2782	struct scatterlist *s;
2783	int i;
2784
2785	INC_STATS_COUNTER(cnt_unmap_sg);
2786
2787	domain = get_domain(dev);
2788	if (IS_ERR(domain))
2789		return;
2790
 
 
2791	for_each_sg(sglist, s, nelems, i) {
2792		__unmap_single(domain->priv, s->dma_address,
2793			       s->dma_length, dir);
2794		s->dma_address = s->dma_length = 0;
2795	}
 
 
 
 
2796}
2797
2798/*
2799 * The exported alloc_coherent function for dma_ops.
2800 */
2801static void *alloc_coherent(struct device *dev, size_t size,
2802			    dma_addr_t *dma_addr, gfp_t flag,
2803			    struct dma_attrs *attrs)
2804{
2805	u64 dma_mask = dev->coherent_dma_mask;
 
2806	struct protection_domain *domain;
2807	struct page *page;
 
2808
2809	INC_STATS_COUNTER(cnt_alloc_coherent);
2810
2811	domain = get_domain(dev);
2812	if (PTR_ERR(domain) == -EINVAL) {
2813		page = alloc_pages(flag, get_order(size));
2814		*dma_addr = page_to_phys(page);
2815		return page_address(page);
2816	} else if (IS_ERR(domain))
2817		return NULL;
2818
2819	size	  = PAGE_ALIGN(size);
2820	dma_mask  = dev->coherent_dma_mask;
2821	flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2822	flag     |= __GFP_ZERO;
2823
2824	page = alloc_pages(flag | __GFP_NOWARN,  get_order(size));
2825	if (!page) {
2826		if (!gfpflags_allow_blocking(flag))
2827			return NULL;
2828
2829		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
2830						 get_order(size));
2831		if (!page)
2832			return NULL;
2833	}
2834
2835	if (!dma_mask)
2836		dma_mask = *dev->dma_mask;
2837
2838	*dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
 
 
2839				 size, DMA_BIDIRECTIONAL, true, dma_mask);
2840
2841	if (*dma_addr == DMA_ERROR_CODE)
 
2842		goto out_free;
 
 
 
2843
2844	return page_address(page);
 
 
2845
2846out_free:
2847
2848	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2849		__free_pages(page, get_order(size));
2850
2851	return NULL;
2852}
2853
2854/*
2855 * The exported free_coherent function for dma_ops.
2856 */
2857static void free_coherent(struct device *dev, size_t size,
2858			  void *virt_addr, dma_addr_t dma_addr,
2859			  struct dma_attrs *attrs)
2860{
 
2861	struct protection_domain *domain;
2862	struct page *page;
2863
2864	INC_STATS_COUNTER(cnt_free_coherent);
2865
2866	page = virt_to_page(virt_addr);
2867	size = PAGE_ALIGN(size);
2868
2869	domain = get_domain(dev);
2870	if (IS_ERR(domain))
2871		goto free_mem;
2872
 
 
2873	__unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
2874
 
 
 
 
2875free_mem:
2876	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2877		__free_pages(page, get_order(size));
2878}
2879
2880/*
2881 * This function is called by the DMA layer to find out if we can handle a
2882 * particular device. It is part of the dma_ops.
2883 */
2884static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2885{
2886	return check_device(dev);
2887}
2888
2889static int set_dma_mask(struct device *dev, u64 mask)
 
 
 
 
 
 
 
2890{
2891	struct protection_domain *domain;
2892	int max_apertures = 1;
 
 
2893
2894	domain = get_domain(dev);
2895	if (IS_ERR(domain))
2896		return PTR_ERR(domain);
2897
2898	if (mask == DMA_BIT_MASK(64))
2899		max_apertures = 8;
2900	else if (mask > DMA_BIT_MASK(32))
2901		max_apertures = 4;
2902
2903	/*
2904	 * To prevent lock contention it doesn't make sense to allocate more
2905	 * apertures than online cpus
2906	 */
2907	if (max_apertures > num_online_cpus())
2908		max_apertures = num_online_cpus();
 
 
 
2909
2910	if (dma_ops_domain_alloc_apertures(domain->priv, max_apertures))
2911		dev_err(dev, "Can't allocate %d iommu apertures\n",
2912			max_apertures);
2913
2914	return 0;
 
 
 
 
 
 
 
 
 
 
 
2915}
2916
2917static struct dma_map_ops amd_iommu_dma_ops = {
2918	.alloc		= alloc_coherent,
2919	.free		= free_coherent,
2920	.map_page	= map_page,
2921	.unmap_page	= unmap_page,
2922	.map_sg		= map_sg,
2923	.unmap_sg	= unmap_sg,
2924	.dma_supported	= amd_iommu_dma_supported,
2925	.set_dma_mask	= set_dma_mask,
2926};
2927
2928int __init amd_iommu_init_api(void)
2929{
2930	return bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2931}
2932
2933int __init amd_iommu_init_dma_ops(void)
2934{
2935	swiotlb        = iommu_pass_through ? 1 : 0;
2936	iommu_detected = 1;
2937
2938	/*
2939	 * In case we don't initialize SWIOTLB (actually the common case
2940	 * when AMD IOMMU is enabled), make sure there are global
2941	 * dma_ops set as a fall-back for devices not handled by this
2942	 * driver (for example non-PCI devices).
2943	 */
2944	if (!swiotlb)
2945		dma_ops = &nommu_dma_ops;
 
 
 
 
 
 
 
2946
2947	amd_iommu_stats_init();
 
 
 
2948
2949	if (amd_iommu_unmap_flush)
2950		pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
2951	else
2952		pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
 
 
 
 
 
 
 
2953
2954	return 0;
 
 
 
 
 
 
 
 
 
2955}
2956
2957/*****************************************************************************
2958 *
2959 * The following functions belong to the exported interface of AMD IOMMU
2960 *
2961 * This interface allows access to lower level functions of the IOMMU
2962 * like protection domain handling and assignement of devices to domains
2963 * which is not possible with the dma_ops interface.
2964 *
2965 *****************************************************************************/
2966
2967static void cleanup_domain(struct protection_domain *domain)
2968{
2969	struct iommu_dev_data *entry;
2970	unsigned long flags;
2971
2972	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2973
2974	while (!list_empty(&domain->dev_list)) {
2975		entry = list_first_entry(&domain->dev_list,
2976					 struct iommu_dev_data, list);
2977		__detach_device(entry);
2978	}
2979
2980	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2981}
2982
2983static void protection_domain_free(struct protection_domain *domain)
2984{
2985	if (!domain)
2986		return;
2987
2988	del_domain_from_list(domain);
2989
2990	if (domain->id)
2991		domain_id_free(domain->id);
2992
2993	kfree(domain);
2994}
2995
2996static int protection_domain_init(struct protection_domain *domain)
2997{
2998	spin_lock_init(&domain->lock);
2999	mutex_init(&domain->api_lock);
3000	domain->id = domain_id_alloc();
3001	if (!domain->id)
3002		return -ENOMEM;
3003	INIT_LIST_HEAD(&domain->dev_list);
3004
3005	return 0;
3006}
3007
3008static struct protection_domain *protection_domain_alloc(void)
3009{
3010	struct protection_domain *domain;
3011
3012	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
3013	if (!domain)
3014		return NULL;
3015
3016	if (protection_domain_init(domain))
 
 
 
3017		goto out_err;
 
3018
3019	add_domain_to_list(domain);
3020
3021	return domain;
3022
3023out_err:
3024	kfree(domain);
3025
3026	return NULL;
3027}
3028
3029static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
3030{
3031	struct protection_domain *pdomain;
3032	struct dma_ops_domain *dma_domain;
3033
3034	switch (type) {
3035	case IOMMU_DOMAIN_UNMANAGED:
3036		pdomain = protection_domain_alloc();
3037		if (!pdomain)
3038			return NULL;
3039
3040		pdomain->mode    = PAGE_MODE_3_LEVEL;
3041		pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
3042		if (!pdomain->pt_root) {
3043			protection_domain_free(pdomain);
3044			return NULL;
3045		}
3046
3047		pdomain->domain.geometry.aperture_start = 0;
3048		pdomain->domain.geometry.aperture_end   = ~0ULL;
3049		pdomain->domain.geometry.force_aperture = true;
 
 
3050
3051		break;
3052	case IOMMU_DOMAIN_DMA:
3053		dma_domain = dma_ops_domain_alloc();
3054		if (!dma_domain) {
3055			pr_err("AMD-Vi: Failed to allocate\n");
3056			return NULL;
3057		}
3058		pdomain = &dma_domain->domain;
3059		break;
3060	case IOMMU_DOMAIN_IDENTITY:
3061		pdomain = protection_domain_alloc();
3062		if (!pdomain)
3063			return NULL;
3064
3065		pdomain->mode = PAGE_MODE_NONE;
3066		break;
3067	default:
3068		return NULL;
3069	}
 
3070
3071	return &pdomain->domain;
 
 
 
 
 
 
 
3072}
3073
3074static void amd_iommu_domain_free(struct iommu_domain *dom)
3075{
3076	struct protection_domain *domain;
3077
3078	if (!dom)
3079		return;
3080
3081	domain = to_pdomain(dom);
3082
3083	if (domain->dev_cnt > 0)
3084		cleanup_domain(domain);
3085
3086	BUG_ON(domain->dev_cnt != 0);
3087
3088	if (domain->mode != PAGE_MODE_NONE)
3089		free_pagetable(domain);
3090
3091	if (domain->flags & PD_IOMMUV2_MASK)
3092		free_gcr3_table(domain);
3093
3094	protection_domain_free(domain);
 
 
3095}
3096
3097static void amd_iommu_detach_device(struct iommu_domain *dom,
3098				    struct device *dev)
3099{
3100	struct iommu_dev_data *dev_data = dev->archdata.iommu;
3101	struct amd_iommu *iommu;
3102	u16 devid;
3103
3104	if (!check_device(dev))
3105		return;
3106
3107	devid = get_device_id(dev);
3108
3109	if (dev_data->domain != NULL)
3110		detach_device(dev);
3111
3112	iommu = amd_iommu_rlookup_table[devid];
3113	if (!iommu)
3114		return;
3115
3116	iommu_completion_wait(iommu);
3117}
3118
3119static int amd_iommu_attach_device(struct iommu_domain *dom,
3120				   struct device *dev)
3121{
3122	struct protection_domain *domain = to_pdomain(dom);
3123	struct iommu_dev_data *dev_data;
3124	struct amd_iommu *iommu;
3125	int ret;
3126
3127	if (!check_device(dev))
3128		return -EINVAL;
3129
3130	dev_data = dev->archdata.iommu;
3131
3132	iommu = amd_iommu_rlookup_table[dev_data->devid];
3133	if (!iommu)
3134		return -EINVAL;
3135
3136	if (dev_data->domain)
3137		detach_device(dev);
3138
3139	ret = attach_device(dev, domain);
3140
3141	iommu_completion_wait(iommu);
3142
3143	return ret;
3144}
3145
3146static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3147			 phys_addr_t paddr, size_t page_size, int iommu_prot)
3148{
3149	struct protection_domain *domain = to_pdomain(dom);
3150	int prot = 0;
3151	int ret;
3152
3153	if (domain->mode == PAGE_MODE_NONE)
3154		return -EINVAL;
3155
3156	if (iommu_prot & IOMMU_READ)
3157		prot |= IOMMU_PROT_IR;
3158	if (iommu_prot & IOMMU_WRITE)
3159		prot |= IOMMU_PROT_IW;
3160
3161	mutex_lock(&domain->api_lock);
3162	ret = iommu_map_page(domain, iova, paddr, prot, page_size);
3163	mutex_unlock(&domain->api_lock);
3164
3165	return ret;
3166}
3167
3168static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3169			   size_t page_size)
3170{
3171	struct protection_domain *domain = to_pdomain(dom);
3172	size_t unmap_size;
3173
3174	if (domain->mode == PAGE_MODE_NONE)
3175		return -EINVAL;
3176
3177	mutex_lock(&domain->api_lock);
3178	unmap_size = iommu_unmap_page(domain, iova, page_size);
3179	mutex_unlock(&domain->api_lock);
3180
3181	domain_flush_tlb_pde(domain);
3182
3183	return unmap_size;
3184}
3185
3186static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3187					  dma_addr_t iova)
3188{
3189	struct protection_domain *domain = to_pdomain(dom);
3190	unsigned long offset_mask, pte_pgsize;
 
3191	u64 *pte, __pte;
3192
3193	if (domain->mode == PAGE_MODE_NONE)
3194		return iova;
3195
3196	pte = fetch_pte(domain, iova, &pte_pgsize);
3197
3198	if (!pte || !IOMMU_PTE_PRESENT(*pte))
3199		return 0;
3200
3201	offset_mask = pte_pgsize - 1;
3202	__pte	    = *pte & PM_ADDR_MASK;
 
 
 
 
 
3203
3204	return (__pte & ~offset_mask) | (iova & offset_mask);
3205}
3206
3207static bool amd_iommu_capable(enum iommu_cap cap)
 
3208{
3209	switch (cap) {
3210	case IOMMU_CAP_CACHE_COHERENCY:
3211		return true;
3212	case IOMMU_CAP_INTR_REMAP:
3213		return (irq_remapping_enabled == 1);
3214	case IOMMU_CAP_NOEXEC:
3215		return false;
3216	}
3217
3218	return false;
3219}
3220
3221static void amd_iommu_get_dm_regions(struct device *dev,
3222				     struct list_head *head)
3223{
3224	struct unity_map_entry *entry;
 
3225	u16 devid;
3226
3227	devid = get_device_id(dev);
3228
3229	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3230		struct iommu_dm_region *region;
3231
3232		if (devid < entry->devid_start || devid > entry->devid_end)
3233			continue;
3234
3235		region = kzalloc(sizeof(*region), GFP_KERNEL);
3236		if (!region) {
3237			pr_err("Out of memory allocating dm-regions for %s\n",
3238				dev_name(dev));
3239			return;
3240		}
3241
3242		region->start = entry->address_start;
3243		region->length = entry->address_end - entry->address_start;
3244		if (entry->prot & IOMMU_PROT_IR)
3245			region->prot |= IOMMU_READ;
3246		if (entry->prot & IOMMU_PROT_IW)
3247			region->prot |= IOMMU_WRITE;
3248
3249		list_add_tail(&region->list, head);
3250	}
3251}
 
 
3252
3253static void amd_iommu_put_dm_regions(struct device *dev,
3254				     struct list_head *head)
3255{
3256	struct iommu_dm_region *entry, *next;
3257
3258	list_for_each_entry_safe(entry, next, head, list)
3259		kfree(entry);
3260}
3261
3262static const struct iommu_ops amd_iommu_ops = {
3263	.capable = amd_iommu_capable,
3264	.domain_alloc = amd_iommu_domain_alloc,
3265	.domain_free  = amd_iommu_domain_free,
3266	.attach_dev = amd_iommu_attach_device,
3267	.detach_dev = amd_iommu_detach_device,
3268	.map = amd_iommu_map,
3269	.unmap = amd_iommu_unmap,
3270	.map_sg = default_iommu_map_sg,
3271	.iova_to_phys = amd_iommu_iova_to_phys,
3272	.add_device = amd_iommu_add_device,
3273	.remove_device = amd_iommu_remove_device,
3274	.device_group = pci_device_group,
3275	.get_dm_regions = amd_iommu_get_dm_regions,
3276	.put_dm_regions = amd_iommu_put_dm_regions,
3277	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
3278};
3279
3280/*****************************************************************************
3281 *
3282 * The next functions do a basic initialization of IOMMU for pass through
3283 * mode
3284 *
3285 * In passthrough mode the IOMMU is initialized and enabled but not used for
3286 * DMA-API translation.
3287 *
3288 *****************************************************************************/
3289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3290/* IOMMUv2 specific functions */
3291int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3292{
3293	return atomic_notifier_chain_register(&ppr_notifier, nb);
3294}
3295EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3296
3297int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3298{
3299	return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3300}
3301EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
3302
3303void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3304{
3305	struct protection_domain *domain = to_pdomain(dom);
3306	unsigned long flags;
3307
3308	spin_lock_irqsave(&domain->lock, flags);
3309
3310	/* Update data structure */
3311	domain->mode    = PAGE_MODE_NONE;
3312	domain->updated = true;
3313
3314	/* Make changes visible to IOMMUs */
3315	update_domain(domain);
3316
3317	/* Page-table is not visible to IOMMU anymore, so free it */
3318	free_pagetable(domain);
3319
3320	spin_unlock_irqrestore(&domain->lock, flags);
3321}
3322EXPORT_SYMBOL(amd_iommu_domain_direct_map);
3323
3324int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3325{
3326	struct protection_domain *domain = to_pdomain(dom);
3327	unsigned long flags;
3328	int levels, ret;
3329
3330	if (pasids <= 0 || pasids > (PASID_MASK + 1))
3331		return -EINVAL;
3332
3333	/* Number of GCR3 table levels required */
3334	for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3335		levels += 1;
3336
3337	if (levels > amd_iommu_max_glx_val)
3338		return -EINVAL;
3339
3340	spin_lock_irqsave(&domain->lock, flags);
3341
3342	/*
3343	 * Save us all sanity checks whether devices already in the
3344	 * domain support IOMMUv2. Just force that the domain has no
3345	 * devices attached when it is switched into IOMMUv2 mode.
3346	 */
3347	ret = -EBUSY;
3348	if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3349		goto out;
3350
3351	ret = -ENOMEM;
3352	domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3353	if (domain->gcr3_tbl == NULL)
3354		goto out;
3355
3356	domain->glx      = levels;
3357	domain->flags   |= PD_IOMMUV2_MASK;
3358	domain->updated  = true;
3359
3360	update_domain(domain);
3361
3362	ret = 0;
3363
3364out:
3365	spin_unlock_irqrestore(&domain->lock, flags);
3366
3367	return ret;
3368}
3369EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
3370
3371static int __flush_pasid(struct protection_domain *domain, int pasid,
3372			 u64 address, bool size)
3373{
3374	struct iommu_dev_data *dev_data;
3375	struct iommu_cmd cmd;
3376	int i, ret;
3377
3378	if (!(domain->flags & PD_IOMMUV2_MASK))
3379		return -EINVAL;
3380
3381	build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3382
3383	/*
3384	 * IOMMU TLB needs to be flushed before Device TLB to
3385	 * prevent device TLB refill from IOMMU TLB
3386	 */
3387	for (i = 0; i < amd_iommus_present; ++i) {
3388		if (domain->dev_iommu[i] == 0)
3389			continue;
3390
3391		ret = iommu_queue_command(amd_iommus[i], &cmd);
3392		if (ret != 0)
3393			goto out;
3394	}
3395
3396	/* Wait until IOMMU TLB flushes are complete */
3397	domain_flush_complete(domain);
3398
3399	/* Now flush device TLBs */
3400	list_for_each_entry(dev_data, &domain->dev_list, list) {
3401		struct amd_iommu *iommu;
3402		int qdep;
3403
3404		/*
3405		   There might be non-IOMMUv2 capable devices in an IOMMUv2
3406		 * domain.
3407		 */
3408		if (!dev_data->ats.enabled)
3409			continue;
3410
3411		qdep  = dev_data->ats.qdep;
3412		iommu = amd_iommu_rlookup_table[dev_data->devid];
3413
3414		build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3415				      qdep, address, size);
3416
3417		ret = iommu_queue_command(iommu, &cmd);
3418		if (ret != 0)
3419			goto out;
3420	}
3421
3422	/* Wait until all device TLBs are flushed */
3423	domain_flush_complete(domain);
3424
3425	ret = 0;
3426
3427out:
3428
3429	return ret;
3430}
3431
3432static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3433				  u64 address)
3434{
3435	INC_STATS_COUNTER(invalidate_iotlb);
3436
3437	return __flush_pasid(domain, pasid, address, false);
3438}
3439
3440int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3441			 u64 address)
3442{
3443	struct protection_domain *domain = to_pdomain(dom);
3444	unsigned long flags;
3445	int ret;
3446
3447	spin_lock_irqsave(&domain->lock, flags);
3448	ret = __amd_iommu_flush_page(domain, pasid, address);
3449	spin_unlock_irqrestore(&domain->lock, flags);
3450
3451	return ret;
3452}
3453EXPORT_SYMBOL(amd_iommu_flush_page);
3454
3455static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3456{
3457	INC_STATS_COUNTER(invalidate_iotlb_all);
3458
3459	return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3460			     true);
3461}
3462
3463int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3464{
3465	struct protection_domain *domain = to_pdomain(dom);
3466	unsigned long flags;
3467	int ret;
3468
3469	spin_lock_irqsave(&domain->lock, flags);
3470	ret = __amd_iommu_flush_tlb(domain, pasid);
3471	spin_unlock_irqrestore(&domain->lock, flags);
3472
3473	return ret;
3474}
3475EXPORT_SYMBOL(amd_iommu_flush_tlb);
3476
3477static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
3478{
3479	int index;
3480	u64 *pte;
3481
3482	while (true) {
3483
3484		index = (pasid >> (9 * level)) & 0x1ff;
3485		pte   = &root[index];
3486
3487		if (level == 0)
3488			break;
3489
3490		if (!(*pte & GCR3_VALID)) {
3491			if (!alloc)
3492				return NULL;
3493
3494			root = (void *)get_zeroed_page(GFP_ATOMIC);
3495			if (root == NULL)
3496				return NULL;
3497
3498			*pte = __pa(root) | GCR3_VALID;
3499		}
3500
3501		root = __va(*pte & PAGE_MASK);
3502
3503		level -= 1;
3504	}
3505
3506	return pte;
3507}
3508
3509static int __set_gcr3(struct protection_domain *domain, int pasid,
3510		      unsigned long cr3)
3511{
3512	u64 *pte;
3513
3514	if (domain->mode != PAGE_MODE_NONE)
3515		return -EINVAL;
3516
3517	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3518	if (pte == NULL)
3519		return -ENOMEM;
3520
3521	*pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3522
3523	return __amd_iommu_flush_tlb(domain, pasid);
3524}
3525
3526static int __clear_gcr3(struct protection_domain *domain, int pasid)
3527{
3528	u64 *pte;
3529
3530	if (domain->mode != PAGE_MODE_NONE)
3531		return -EINVAL;
3532
3533	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3534	if (pte == NULL)
3535		return 0;
3536
3537	*pte = 0;
3538
3539	return __amd_iommu_flush_tlb(domain, pasid);
3540}
3541
3542int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3543			      unsigned long cr3)
3544{
3545	struct protection_domain *domain = to_pdomain(dom);
3546	unsigned long flags;
3547	int ret;
3548
3549	spin_lock_irqsave(&domain->lock, flags);
3550	ret = __set_gcr3(domain, pasid, cr3);
3551	spin_unlock_irqrestore(&domain->lock, flags);
3552
3553	return ret;
3554}
3555EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3556
3557int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3558{
3559	struct protection_domain *domain = to_pdomain(dom);
3560	unsigned long flags;
3561	int ret;
3562
3563	spin_lock_irqsave(&domain->lock, flags);
3564	ret = __clear_gcr3(domain, pasid);
3565	spin_unlock_irqrestore(&domain->lock, flags);
3566
3567	return ret;
3568}
3569EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
3570
3571int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3572			   int status, int tag)
3573{
3574	struct iommu_dev_data *dev_data;
3575	struct amd_iommu *iommu;
3576	struct iommu_cmd cmd;
3577
3578	INC_STATS_COUNTER(complete_ppr);
3579
3580	dev_data = get_dev_data(&pdev->dev);
3581	iommu    = amd_iommu_rlookup_table[dev_data->devid];
3582
3583	build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3584			   tag, dev_data->pri_tlp);
3585
3586	return iommu_queue_command(iommu, &cmd);
3587}
3588EXPORT_SYMBOL(amd_iommu_complete_ppr);
3589
3590struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
3591{
3592	struct protection_domain *pdomain;
3593
3594	pdomain = get_domain(&pdev->dev);
3595	if (IS_ERR(pdomain))
3596		return NULL;
3597
3598	/* Only return IOMMUv2 domains */
3599	if (!(pdomain->flags & PD_IOMMUV2_MASK))
3600		return NULL;
3601
3602	return &pdomain->domain;
3603}
3604EXPORT_SYMBOL(amd_iommu_get_v2_domain);
3605
3606void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3607{
3608	struct iommu_dev_data *dev_data;
3609
3610	if (!amd_iommu_v2_supported())
3611		return;
3612
3613	dev_data = get_dev_data(&pdev->dev);
3614	dev_data->errata |= (1 << erratum);
3615}
3616EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
3617
3618int amd_iommu_device_info(struct pci_dev *pdev,
3619                          struct amd_iommu_device_info *info)
3620{
3621	int max_pasids;
3622	int pos;
3623
3624	if (pdev == NULL || info == NULL)
3625		return -EINVAL;
3626
3627	if (!amd_iommu_v2_supported())
3628		return -EINVAL;
3629
3630	memset(info, 0, sizeof(*info));
3631
3632	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3633	if (pos)
3634		info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3635
3636	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3637	if (pos)
3638		info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
3639
3640	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
3641	if (pos) {
3642		int features;
3643
3644		max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
3645		max_pasids = min(max_pasids, (1 << 20));
3646
3647		info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
3648		info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
3649
3650		features = pci_pasid_features(pdev);
3651		if (features & PCI_PASID_CAP_EXEC)
3652			info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
3653		if (features & PCI_PASID_CAP_PRIV)
3654			info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
3655	}
3656
3657	return 0;
3658}
3659EXPORT_SYMBOL(amd_iommu_device_info);
3660
3661#ifdef CONFIG_IRQ_REMAP
3662
3663/*****************************************************************************
3664 *
3665 * Interrupt Remapping Implementation
3666 *
3667 *****************************************************************************/
3668
3669union irte {
3670	u32 val;
3671	struct {
3672		u32 valid	: 1,
3673		    no_fault	: 1,
3674		    int_type	: 3,
3675		    rq_eoi	: 1,
3676		    dm		: 1,
3677		    rsvd_1	: 1,
3678		    destination	: 8,
3679		    vector	: 8,
3680		    rsvd_2	: 8;
3681	} fields;
3682};
3683
3684struct irq_2_irte {
3685	u16 devid; /* Device ID for IRTE table */
3686	u16 index; /* Index into IRTE table*/
3687};
3688
3689struct amd_ir_data {
3690	struct irq_2_irte			irq_2_irte;
3691	union irte				irte_entry;
3692	union {
3693		struct msi_msg			msi_entry;
3694	};
3695};
3696
3697static struct irq_chip amd_ir_chip;
3698
3699#define DTE_IRQ_PHYS_ADDR_MASK	(((1ULL << 45)-1) << 6)
3700#define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
3701#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
3702#define DTE_IRQ_REMAP_ENABLE    1ULL
3703
3704static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3705{
3706	u64 dte;
3707
3708	dte	= amd_iommu_dev_table[devid].data[2];
3709	dte	&= ~DTE_IRQ_PHYS_ADDR_MASK;
3710	dte	|= virt_to_phys(table->table);
3711	dte	|= DTE_IRQ_REMAP_INTCTL;
3712	dte	|= DTE_IRQ_TABLE_LEN;
3713	dte	|= DTE_IRQ_REMAP_ENABLE;
3714
3715	amd_iommu_dev_table[devid].data[2] = dte;
3716}
3717
3718#define IRTE_ALLOCATED (~1U)
3719
3720static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3721{
3722	struct irq_remap_table *table = NULL;
3723	struct amd_iommu *iommu;
3724	unsigned long flags;
3725	u16 alias;
3726
3727	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3728
3729	iommu = amd_iommu_rlookup_table[devid];
3730	if (!iommu)
3731		goto out_unlock;
3732
3733	table = irq_lookup_table[devid];
3734	if (table)
3735		goto out;
3736
3737	alias = amd_iommu_alias_table[devid];
3738	table = irq_lookup_table[alias];
3739	if (table) {
3740		irq_lookup_table[devid] = table;
3741		set_dte_irq_entry(devid, table);
3742		iommu_flush_dte(iommu, devid);
3743		goto out;
3744	}
3745
3746	/* Nothing there yet, allocate new irq remapping table */
3747	table = kzalloc(sizeof(*table), GFP_ATOMIC);
3748	if (!table)
3749		goto out;
3750
3751	/* Initialize table spin-lock */
3752	spin_lock_init(&table->lock);
3753
3754	if (ioapic)
3755		/* Keep the first 32 indexes free for IOAPIC interrupts */
3756		table->min_index = 32;
3757
3758	table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
3759	if (!table->table) {
3760		kfree(table);
3761		table = NULL;
3762		goto out;
3763	}
3764
3765	memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
3766
3767	if (ioapic) {
3768		int i;
3769
3770		for (i = 0; i < 32; ++i)
3771			table->table[i] = IRTE_ALLOCATED;
3772	}
3773
3774	irq_lookup_table[devid] = table;
3775	set_dte_irq_entry(devid, table);
3776	iommu_flush_dte(iommu, devid);
3777	if (devid != alias) {
3778		irq_lookup_table[alias] = table;
3779		set_dte_irq_entry(alias, table);
3780		iommu_flush_dte(iommu, alias);
3781	}
3782
3783out:
3784	iommu_completion_wait(iommu);
3785
3786out_unlock:
3787	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3788
3789	return table;
3790}
3791
3792static int alloc_irq_index(u16 devid, int count)
3793{
3794	struct irq_remap_table *table;
3795	unsigned long flags;
3796	int index, c;
3797
3798	table = get_irq_table(devid, false);
3799	if (!table)
3800		return -ENODEV;
3801
3802	spin_lock_irqsave(&table->lock, flags);
3803
3804	/* Scan table for free entries */
3805	for (c = 0, index = table->min_index;
3806	     index < MAX_IRQS_PER_TABLE;
3807	     ++index) {
3808		if (table->table[index] == 0)
3809			c += 1;
3810		else
3811			c = 0;
3812
3813		if (c == count)	{
3814			for (; c != 0; --c)
3815				table->table[index - c + 1] = IRTE_ALLOCATED;
3816
3817			index -= count - 1;
3818			goto out;
3819		}
3820	}
3821
3822	index = -ENOSPC;
3823
3824out:
3825	spin_unlock_irqrestore(&table->lock, flags);
3826
3827	return index;
3828}
3829
3830static int modify_irte(u16 devid, int index, union irte irte)
3831{
3832	struct irq_remap_table *table;
3833	struct amd_iommu *iommu;
3834	unsigned long flags;
3835
3836	iommu = amd_iommu_rlookup_table[devid];
3837	if (iommu == NULL)
3838		return -EINVAL;
3839
3840	table = get_irq_table(devid, false);
3841	if (!table)
3842		return -ENOMEM;
3843
3844	spin_lock_irqsave(&table->lock, flags);
3845	table->table[index] = irte.val;
3846	spin_unlock_irqrestore(&table->lock, flags);
3847
3848	iommu_flush_irt(iommu, devid);
3849	iommu_completion_wait(iommu);
3850
3851	return 0;
3852}
3853
3854static void free_irte(u16 devid, int index)
3855{
3856	struct irq_remap_table *table;
3857	struct amd_iommu *iommu;
3858	unsigned long flags;
3859
3860	iommu = amd_iommu_rlookup_table[devid];
3861	if (iommu == NULL)
3862		return;
3863
3864	table = get_irq_table(devid, false);
3865	if (!table)
3866		return;
3867
3868	spin_lock_irqsave(&table->lock, flags);
3869	table->table[index] = 0;
3870	spin_unlock_irqrestore(&table->lock, flags);
3871
3872	iommu_flush_irt(iommu, devid);
3873	iommu_completion_wait(iommu);
3874}
3875
3876static int get_devid(struct irq_alloc_info *info)
3877{
3878	int devid = -1;
3879
3880	switch (info->type) {
3881	case X86_IRQ_ALLOC_TYPE_IOAPIC:
3882		devid     = get_ioapic_devid(info->ioapic_id);
3883		break;
3884	case X86_IRQ_ALLOC_TYPE_HPET:
3885		devid     = get_hpet_devid(info->hpet_id);
3886		break;
3887	case X86_IRQ_ALLOC_TYPE_MSI:
3888	case X86_IRQ_ALLOC_TYPE_MSIX:
3889		devid = get_device_id(&info->msi_dev->dev);
3890		break;
3891	default:
3892		BUG_ON(1);
3893		break;
3894	}
3895
3896	return devid;
3897}
3898
3899static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
3900{
3901	struct amd_iommu *iommu;
3902	int devid;
3903
3904	if (!info)
3905		return NULL;
3906
3907	devid = get_devid(info);
3908	if (devid >= 0) {
3909		iommu = amd_iommu_rlookup_table[devid];
3910		if (iommu)
3911			return iommu->ir_domain;
3912	}
3913
3914	return NULL;
3915}
3916
3917static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
3918{
3919	struct amd_iommu *iommu;
3920	int devid;
3921
3922	if (!info)
3923		return NULL;
3924
3925	switch (info->type) {
3926	case X86_IRQ_ALLOC_TYPE_MSI:
3927	case X86_IRQ_ALLOC_TYPE_MSIX:
3928		devid = get_device_id(&info->msi_dev->dev);
3929		iommu = amd_iommu_rlookup_table[devid];
3930		if (iommu)
3931			return iommu->msi_domain;
3932		break;
3933	default:
3934		break;
3935	}
3936
3937	return NULL;
3938}
3939
3940struct irq_remap_ops amd_iommu_irq_ops = {
3941	.prepare		= amd_iommu_prepare,
3942	.enable			= amd_iommu_enable,
3943	.disable		= amd_iommu_disable,
3944	.reenable		= amd_iommu_reenable,
3945	.enable_faulting	= amd_iommu_enable_faulting,
3946	.get_ir_irq_domain	= get_ir_irq_domain,
3947	.get_irq_domain		= get_irq_domain,
3948};
3949
3950static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3951				       struct irq_cfg *irq_cfg,
3952				       struct irq_alloc_info *info,
3953				       int devid, int index, int sub_handle)
3954{
3955	struct irq_2_irte *irte_info = &data->irq_2_irte;
3956	struct msi_msg *msg = &data->msi_entry;
3957	union irte *irte = &data->irte_entry;
3958	struct IO_APIC_route_entry *entry;
3959
3960	data->irq_2_irte.devid = devid;
3961	data->irq_2_irte.index = index + sub_handle;
3962
3963	/* Setup IRTE for IOMMU */
3964	irte->val = 0;
3965	irte->fields.vector      = irq_cfg->vector;
3966	irte->fields.int_type    = apic->irq_delivery_mode;
3967	irte->fields.destination = irq_cfg->dest_apicid;
3968	irte->fields.dm          = apic->irq_dest_mode;
3969	irte->fields.valid       = 1;
3970
3971	switch (info->type) {
3972	case X86_IRQ_ALLOC_TYPE_IOAPIC:
3973		/* Setup IOAPIC entry */
3974		entry = info->ioapic_entry;
3975		info->ioapic_entry = NULL;
3976		memset(entry, 0, sizeof(*entry));
3977		entry->vector        = index;
3978		entry->mask          = 0;
3979		entry->trigger       = info->ioapic_trigger;
3980		entry->polarity      = info->ioapic_polarity;
3981		/* Mask level triggered irqs. */
3982		if (info->ioapic_trigger)
3983			entry->mask = 1;
3984		break;
3985
3986	case X86_IRQ_ALLOC_TYPE_HPET:
3987	case X86_IRQ_ALLOC_TYPE_MSI:
3988	case X86_IRQ_ALLOC_TYPE_MSIX:
3989		msg->address_hi = MSI_ADDR_BASE_HI;
3990		msg->address_lo = MSI_ADDR_BASE_LO;
3991		msg->data = irte_info->index;
3992		break;
3993
3994	default:
3995		BUG_ON(1);
3996		break;
3997	}
3998}
3999
4000static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
4001			       unsigned int nr_irqs, void *arg)
4002{
4003	struct irq_alloc_info *info = arg;
4004	struct irq_data *irq_data;
4005	struct amd_ir_data *data;
4006	struct irq_cfg *cfg;
4007	int i, ret, devid;
4008	int index = -1;
4009
4010	if (!info)
4011		return -EINVAL;
4012	if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
4013	    info->type != X86_IRQ_ALLOC_TYPE_MSIX)
4014		return -EINVAL;
4015
4016	/*
4017	 * With IRQ remapping enabled, don't need contiguous CPU vectors
4018	 * to support multiple MSI interrupts.
4019	 */
4020	if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
4021		info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
4022
4023	devid = get_devid(info);
4024	if (devid < 0)
4025		return -EINVAL;
4026
4027	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
4028	if (ret < 0)
4029		return ret;
4030
4031	if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
4032		if (get_irq_table(devid, true))
4033			index = info->ioapic_pin;
4034		else
4035			ret = -ENOMEM;
4036	} else {
4037		index = alloc_irq_index(devid, nr_irqs);
4038	}
4039	if (index < 0) {
4040		pr_warn("Failed to allocate IRTE\n");
4041		goto out_free_parent;
4042	}
4043
4044	for (i = 0; i < nr_irqs; i++) {
4045		irq_data = irq_domain_get_irq_data(domain, virq + i);
4046		cfg = irqd_cfg(irq_data);
4047		if (!irq_data || !cfg) {
4048			ret = -EINVAL;
4049			goto out_free_data;
4050		}
4051
4052		ret = -ENOMEM;
4053		data = kzalloc(sizeof(*data), GFP_KERNEL);
4054		if (!data)
4055			goto out_free_data;
4056
4057		irq_data->hwirq = (devid << 16) + i;
4058		irq_data->chip_data = data;
4059		irq_data->chip = &amd_ir_chip;
4060		irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
4061		irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
4062	}
4063
4064	return 0;
4065
4066out_free_data:
4067	for (i--; i >= 0; i--) {
4068		irq_data = irq_domain_get_irq_data(domain, virq + i);
4069		if (irq_data)
4070			kfree(irq_data->chip_data);
4071	}
4072	for (i = 0; i < nr_irqs; i++)
4073		free_irte(devid, index + i);
4074out_free_parent:
4075	irq_domain_free_irqs_common(domain, virq, nr_irqs);
4076	return ret;
4077}
4078
4079static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
4080			       unsigned int nr_irqs)
4081{
4082	struct irq_2_irte *irte_info;
4083	struct irq_data *irq_data;
4084	struct amd_ir_data *data;
4085	int i;
4086
4087	for (i = 0; i < nr_irqs; i++) {
4088		irq_data = irq_domain_get_irq_data(domain, virq  + i);
4089		if (irq_data && irq_data->chip_data) {
4090			data = irq_data->chip_data;
4091			irte_info = &data->irq_2_irte;
4092			free_irte(irte_info->devid, irte_info->index);
4093			kfree(data);
4094		}
4095	}
4096	irq_domain_free_irqs_common(domain, virq, nr_irqs);
4097}
4098
4099static void irq_remapping_activate(struct irq_domain *domain,
4100				   struct irq_data *irq_data)
4101{
4102	struct amd_ir_data *data = irq_data->chip_data;
4103	struct irq_2_irte *irte_info = &data->irq_2_irte;
4104
4105	modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
4106}
4107
4108static void irq_remapping_deactivate(struct irq_domain *domain,
4109				     struct irq_data *irq_data)
4110{
4111	struct amd_ir_data *data = irq_data->chip_data;
4112	struct irq_2_irte *irte_info = &data->irq_2_irte;
4113	union irte entry;
4114
4115	entry.val = 0;
4116	modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
4117}
4118
4119static struct irq_domain_ops amd_ir_domain_ops = {
4120	.alloc = irq_remapping_alloc,
4121	.free = irq_remapping_free,
4122	.activate = irq_remapping_activate,
4123	.deactivate = irq_remapping_deactivate,
4124};
4125
4126static int amd_ir_set_affinity(struct irq_data *data,
4127			       const struct cpumask *mask, bool force)
4128{
4129	struct amd_ir_data *ir_data = data->chip_data;
4130	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4131	struct irq_cfg *cfg = irqd_cfg(data);
4132	struct irq_data *parent = data->parent_data;
4133	int ret;
4134
4135	ret = parent->chip->irq_set_affinity(parent, mask, force);
4136	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
4137		return ret;
4138
4139	/*
4140	 * Atomically updates the IRTE with the new destination, vector
4141	 * and flushes the interrupt entry cache.
4142	 */
4143	ir_data->irte_entry.fields.vector = cfg->vector;
4144	ir_data->irte_entry.fields.destination = cfg->dest_apicid;
4145	modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry);
4146
4147	/*
4148	 * After this point, all the interrupts will start arriving
4149	 * at the new destination. So, time to cleanup the previous
4150	 * vector allocation.
4151	 */
4152	send_cleanup_vector(cfg);
4153
4154	return IRQ_SET_MASK_OK_DONE;
4155}
4156
4157static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4158{
4159	struct amd_ir_data *ir_data = irq_data->chip_data;
4160
4161	*msg = ir_data->msi_entry;
4162}
4163
4164static struct irq_chip amd_ir_chip = {
4165	.irq_ack = ir_ack_apic_edge,
4166	.irq_set_affinity = amd_ir_set_affinity,
4167	.irq_compose_msi_msg = ir_compose_msi_msg,
4168};
4169
4170int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
4171{
4172	iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu);
4173	if (!iommu->ir_domain)
4174		return -ENOMEM;
4175
4176	iommu->ir_domain->parent = arch_get_ir_parent_domain();
4177	iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
4178
4179	return 0;
4180}
4181#endif
v3.5.6
   1/*
   2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <joerg.roedel@amd.com>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/ratelimit.h>
  21#include <linux/pci.h>
  22#include <linux/pci-ats.h>
  23#include <linux/bitmap.h>
  24#include <linux/slab.h>
  25#include <linux/debugfs.h>
  26#include <linux/scatterlist.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/iommu-helper.h>
  29#include <linux/iommu.h>
  30#include <linux/delay.h>
  31#include <linux/amd-iommu.h>
  32#include <linux/notifier.h>
  33#include <linux/export.h>
 
 
 
 
 
 
 
 
 
  34#include <asm/msidef.h>
  35#include <asm/proto.h>
  36#include <asm/iommu.h>
  37#include <asm/gart.h>
  38#include <asm/dma.h>
  39
  40#include "amd_iommu_proto.h"
  41#include "amd_iommu_types.h"
 
  42
  43#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  44
  45#define LOOP_TIMEOUT	100000
  46
  47/*
  48 * This bitmap is used to advertise the page sizes our hardware support
  49 * to the IOMMU core, which will then use this information to split
  50 * physically contiguous memory regions it is mapping into page sizes
  51 * that we support.
  52 *
  53 * Traditionally the IOMMU core just handed us the mappings directly,
  54 * after making sure the size is an order of a 4KiB page and that the
  55 * mapping has natural alignment.
  56 *
  57 * To retain this behavior, we currently advertise that we support
  58 * all page sizes that are an order of 4KiB.
  59 *
  60 * If at some point we'd like to utilize the IOMMU core's new behavior,
  61 * we could change this to advertise the real page sizes we support.
  62 */
  63#define AMD_IOMMU_PGSIZES	(~0xFFFUL)
  64
  65static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  66
  67/* A list of preallocated protection domains */
  68static LIST_HEAD(iommu_pd_list);
  69static DEFINE_SPINLOCK(iommu_pd_list_lock);
  70
  71/* List of all available dev_data structures */
  72static LIST_HEAD(dev_data_list);
  73static DEFINE_SPINLOCK(dev_data_list_lock);
  74
 
 
 
  75/*
  76 * Domain for untranslated devices - only allocated
  77 * if iommu=pt passed on kernel cmd line.
  78 */
  79static struct protection_domain *pt_domain;
  80
  81static struct iommu_ops amd_iommu_ops;
  82
  83static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
  84int amd_iommu_max_glx_val = -1;
  85
  86static struct dma_map_ops amd_iommu_dma_ops;
  87
  88/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89 * general struct to manage commands send to an IOMMU
  90 */
  91struct iommu_cmd {
  92	u32 data[4];
  93};
  94
 
 
  95static void update_domain(struct protection_domain *domain);
  96static int __init alloc_passthrough_domain(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97
  98/****************************************************************************
  99 *
 100 * Helper functions
 101 *
 102 ****************************************************************************/
 103
 
 
 
 
 
 
 
 
 
 
 
 
 104static struct iommu_dev_data *alloc_dev_data(u16 devid)
 105{
 106	struct iommu_dev_data *dev_data;
 107	unsigned long flags;
 108
 109	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
 110	if (!dev_data)
 111		return NULL;
 112
 113	dev_data->devid = devid;
 114	atomic_set(&dev_data->bind, 0);
 115
 116	spin_lock_irqsave(&dev_data_list_lock, flags);
 117	list_add_tail(&dev_data->dev_data_list, &dev_data_list);
 118	spin_unlock_irqrestore(&dev_data_list_lock, flags);
 119
 120	return dev_data;
 121}
 122
 123static void free_dev_data(struct iommu_dev_data *dev_data)
 124{
 125	unsigned long flags;
 126
 127	spin_lock_irqsave(&dev_data_list_lock, flags);
 128	list_del(&dev_data->dev_data_list);
 129	spin_unlock_irqrestore(&dev_data_list_lock, flags);
 130
 131	kfree(dev_data);
 132}
 133
 134static struct iommu_dev_data *search_dev_data(u16 devid)
 135{
 136	struct iommu_dev_data *dev_data;
 137	unsigned long flags;
 138
 139	spin_lock_irqsave(&dev_data_list_lock, flags);
 140	list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
 141		if (dev_data->devid == devid)
 142			goto out_unlock;
 143	}
 144
 145	dev_data = NULL;
 146
 147out_unlock:
 148	spin_unlock_irqrestore(&dev_data_list_lock, flags);
 149
 150	return dev_data;
 151}
 152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153static struct iommu_dev_data *find_dev_data(u16 devid)
 154{
 155	struct iommu_dev_data *dev_data;
 156
 157	dev_data = search_dev_data(devid);
 158
 159	if (dev_data == NULL)
 160		dev_data = alloc_dev_data(devid);
 161
 162	return dev_data;
 163}
 164
 165static inline u16 get_device_id(struct device *dev)
 166{
 167	struct pci_dev *pdev = to_pci_dev(dev);
 168
 169	return calc_devid(pdev->bus->number, pdev->devfn);
 170}
 171
 172static struct iommu_dev_data *get_dev_data(struct device *dev)
 173{
 174	return dev->archdata.iommu;
 175}
 176
 177static bool pci_iommuv2_capable(struct pci_dev *pdev)
 178{
 179	static const int caps[] = {
 180		PCI_EXT_CAP_ID_ATS,
 181		PCI_EXT_CAP_ID_PRI,
 182		PCI_EXT_CAP_ID_PASID,
 183	};
 184	int i, pos;
 185
 186	for (i = 0; i < 3; ++i) {
 187		pos = pci_find_ext_capability(pdev, caps[i]);
 188		if (pos == 0)
 189			return false;
 190	}
 191
 192	return true;
 193}
 194
 195static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
 196{
 197	struct iommu_dev_data *dev_data;
 198
 199	dev_data = get_dev_data(&pdev->dev);
 200
 201	return dev_data->errata & (1 << erratum) ? true : false;
 202}
 203
 204/*
 205 * In this function the list of preallocated protection domains is traversed to
 206 * find the domain for a specific device
 207 */
 208static struct dma_ops_domain *find_protection_domain(u16 devid)
 
 209{
 210	struct dma_ops_domain *entry, *ret = NULL;
 211	unsigned long flags;
 212	u16 alias = amd_iommu_alias_table[devid];
 
 
 
 
 
 
 213
 214	if (list_empty(&iommu_pd_list))
 215		return NULL;
 
 
 
 
 
 
 216
 217	spin_lock_irqsave(&iommu_pd_list_lock, flags);
 218
 219	list_for_each_entry(entry, &iommu_pd_list, list) {
 220		if (entry->target_dev == devid ||
 221		    entry->target_dev == alias) {
 222			ret = entry;
 223			break;
 224		}
 225	}
 226
 227	spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
 228
 229	return ret;
 230}
 231
 232/*
 233 * This function checks if the driver got a valid device from the caller to
 234 * avoid dereferencing invalid pointers.
 235 */
 236static bool check_device(struct device *dev)
 237{
 238	u16 devid;
 239
 240	if (!dev || !dev->dma_mask)
 241		return false;
 242
 243	/* No device or no PCI device */
 244	if (dev->bus != &pci_bus_type)
 245		return false;
 246
 247	devid = get_device_id(dev);
 248
 249	/* Out of our scope? */
 250	if (devid > amd_iommu_last_bdf)
 251		return false;
 252
 253	if (amd_iommu_rlookup_table[devid] == NULL)
 254		return false;
 255
 256	return true;
 257}
 258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 259static int iommu_init_device(struct device *dev)
 260{
 261	struct pci_dev *pdev = to_pci_dev(dev);
 262	struct iommu_dev_data *dev_data;
 263	u16 alias;
 264
 265	if (dev->archdata.iommu)
 266		return 0;
 267
 268	dev_data = find_dev_data(get_device_id(dev));
 269	if (!dev_data)
 270		return -ENOMEM;
 271
 272	alias = amd_iommu_alias_table[dev_data->devid];
 273	if (alias != dev_data->devid) {
 274		struct iommu_dev_data *alias_data;
 275
 276		alias_data = find_dev_data(alias);
 277		if (alias_data == NULL) {
 278			pr_err("AMD-Vi: Warning: Unhandled device %s\n",
 279					dev_name(dev));
 280			free_dev_data(dev_data);
 281			return -ENOTSUPP;
 282		}
 283		dev_data->alias_data = alias_data;
 284	}
 285
 286	if (pci_iommuv2_capable(pdev)) {
 287		struct amd_iommu *iommu;
 288
 289		iommu              = amd_iommu_rlookup_table[dev_data->devid];
 290		dev_data->iommu_v2 = iommu->is_iommu_v2;
 291	}
 292
 293	dev->archdata.iommu = dev_data;
 294
 
 
 
 295	return 0;
 296}
 297
 298static void iommu_ignore_device(struct device *dev)
 299{
 300	u16 devid, alias;
 301
 302	devid = get_device_id(dev);
 303	alias = amd_iommu_alias_table[devid];
 304
 305	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
 306	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
 307
 308	amd_iommu_rlookup_table[devid] = NULL;
 309	amd_iommu_rlookup_table[alias] = NULL;
 310}
 311
 312static void iommu_uninit_device(struct device *dev)
 313{
 314	/*
 315	 * Nothing to do here - we keep dev_data around for unplugged devices
 316	 * and reuse it when the device is re-plugged - not doing so would
 317	 * introduce a ton of races.
 318	 */
 319}
 320
 321void __init amd_iommu_uninit_devices(void)
 322{
 323	struct iommu_dev_data *dev_data, *n;
 324	struct pci_dev *pdev = NULL;
 325
 326	for_each_pci_dev(pdev) {
 
 327
 328		if (!check_device(&pdev->dev))
 329			continue;
 330
 331		iommu_uninit_device(&pdev->dev);
 332	}
 333
 334	/* Free all of our dev_data structures */
 335	list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
 336		free_dev_data(dev_data);
 
 337}
 338
 339int __init amd_iommu_init_devices(void)
 340{
 341	struct pci_dev *pdev = NULL;
 342	int ret = 0;
 343
 344	for_each_pci_dev(pdev) {
 345
 346		if (!check_device(&pdev->dev))
 347			continue;
 348
 349		ret = iommu_init_device(&pdev->dev);
 350		if (ret == -ENOTSUPP)
 351			iommu_ignore_device(&pdev->dev);
 352		else if (ret)
 353			goto out_free;
 354	}
 355
 356	return 0;
 357
 358out_free:
 359
 360	amd_iommu_uninit_devices();
 361
 362	return ret;
 363}
 364#ifdef CONFIG_AMD_IOMMU_STATS
 365
 366/*
 367 * Initialization code for statistics collection
 368 */
 369
 370DECLARE_STATS_COUNTER(compl_wait);
 371DECLARE_STATS_COUNTER(cnt_map_single);
 372DECLARE_STATS_COUNTER(cnt_unmap_single);
 373DECLARE_STATS_COUNTER(cnt_map_sg);
 374DECLARE_STATS_COUNTER(cnt_unmap_sg);
 375DECLARE_STATS_COUNTER(cnt_alloc_coherent);
 376DECLARE_STATS_COUNTER(cnt_free_coherent);
 377DECLARE_STATS_COUNTER(cross_page);
 378DECLARE_STATS_COUNTER(domain_flush_single);
 379DECLARE_STATS_COUNTER(domain_flush_all);
 380DECLARE_STATS_COUNTER(alloced_io_mem);
 381DECLARE_STATS_COUNTER(total_map_requests);
 382DECLARE_STATS_COUNTER(complete_ppr);
 383DECLARE_STATS_COUNTER(invalidate_iotlb);
 384DECLARE_STATS_COUNTER(invalidate_iotlb_all);
 385DECLARE_STATS_COUNTER(pri_requests);
 386
 387
 388static struct dentry *stats_dir;
 389static struct dentry *de_fflush;
 390
 391static void amd_iommu_stats_add(struct __iommu_counter *cnt)
 392{
 393	if (stats_dir == NULL)
 394		return;
 395
 396	cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
 397				       &cnt->value);
 398}
 399
 400static void amd_iommu_stats_init(void)
 401{
 402	stats_dir = debugfs_create_dir("amd-iommu", NULL);
 403	if (stats_dir == NULL)
 404		return;
 405
 406	de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
 407					 &amd_iommu_unmap_flush);
 408
 409	amd_iommu_stats_add(&compl_wait);
 410	amd_iommu_stats_add(&cnt_map_single);
 411	amd_iommu_stats_add(&cnt_unmap_single);
 412	amd_iommu_stats_add(&cnt_map_sg);
 413	amd_iommu_stats_add(&cnt_unmap_sg);
 414	amd_iommu_stats_add(&cnt_alloc_coherent);
 415	amd_iommu_stats_add(&cnt_free_coherent);
 416	amd_iommu_stats_add(&cross_page);
 417	amd_iommu_stats_add(&domain_flush_single);
 418	amd_iommu_stats_add(&domain_flush_all);
 419	amd_iommu_stats_add(&alloced_io_mem);
 420	amd_iommu_stats_add(&total_map_requests);
 421	amd_iommu_stats_add(&complete_ppr);
 422	amd_iommu_stats_add(&invalidate_iotlb);
 423	amd_iommu_stats_add(&invalidate_iotlb_all);
 424	amd_iommu_stats_add(&pri_requests);
 425}
 426
 427#endif
 428
 429/****************************************************************************
 430 *
 431 * Interrupt handling functions
 432 *
 433 ****************************************************************************/
 434
 435static void dump_dte_entry(u16 devid)
 436{
 437	int i;
 438
 439	for (i = 0; i < 4; ++i)
 440		pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
 441			amd_iommu_dev_table[devid].data[i]);
 442}
 443
 444static void dump_command(unsigned long phys_addr)
 445{
 446	struct iommu_cmd *cmd = phys_to_virt(phys_addr);
 447	int i;
 448
 449	for (i = 0; i < 4; ++i)
 450		pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
 451}
 452
 453static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 454{
 455	int type, devid, domid, flags;
 456	volatile u32 *event = __evt;
 457	int count = 0;
 458	u64 address;
 459
 460retry:
 461	type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
 462	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
 463	domid   = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
 464	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
 465	address = (u64)(((u64)event[3]) << 32) | event[2];
 466
 467	if (type == 0) {
 468		/* Did we hit the erratum? */
 469		if (++count == LOOP_TIMEOUT) {
 470			pr_err("AMD-Vi: No event written to event log\n");
 471			return;
 472		}
 473		udelay(1);
 474		goto retry;
 475	}
 476
 477	printk(KERN_ERR "AMD-Vi: Event logged [");
 478
 479	switch (type) {
 480	case EVENT_TYPE_ILL_DEV:
 481		printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
 482		       "address=0x%016llx flags=0x%04x]\n",
 483		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 484		       address, flags);
 485		dump_dte_entry(devid);
 486		break;
 487	case EVENT_TYPE_IO_FAULT:
 488		printk("IO_PAGE_FAULT device=%02x:%02x.%x "
 489		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 490		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 491		       domid, address, flags);
 492		break;
 493	case EVENT_TYPE_DEV_TAB_ERR:
 494		printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 495		       "address=0x%016llx flags=0x%04x]\n",
 496		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 497		       address, flags);
 498		break;
 499	case EVENT_TYPE_PAGE_TAB_ERR:
 500		printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
 501		       "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
 502		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 503		       domid, address, flags);
 504		break;
 505	case EVENT_TYPE_ILL_CMD:
 506		printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
 507		dump_command(address);
 508		break;
 509	case EVENT_TYPE_CMD_HARD_ERR:
 510		printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
 511		       "flags=0x%04x]\n", address, flags);
 512		break;
 513	case EVENT_TYPE_IOTLB_INV_TO:
 514		printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
 515		       "address=0x%016llx]\n",
 516		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 517		       address);
 518		break;
 519	case EVENT_TYPE_INV_DEV_REQ:
 520		printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
 521		       "address=0x%016llx flags=0x%04x]\n",
 522		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 523		       address, flags);
 524		break;
 525	default:
 526		printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
 527	}
 528
 529	memset(__evt, 0, 4 * sizeof(u32));
 530}
 531
 532static void iommu_poll_events(struct amd_iommu *iommu)
 533{
 534	u32 head, tail;
 535	unsigned long flags;
 536
 537	spin_lock_irqsave(&iommu->lock, flags);
 538
 539	head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 540	tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 541
 542	while (head != tail) {
 543		iommu_print_event(iommu, iommu->evt_buf + head);
 544		head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
 545	}
 546
 547	writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 548
 549	spin_unlock_irqrestore(&iommu->lock, flags);
 550}
 551
 552static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
 553{
 554	struct amd_iommu_fault fault;
 555
 556	INC_STATS_COUNTER(pri_requests);
 557
 558	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
 559		pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
 560		return;
 561	}
 562
 563	fault.address   = raw[1];
 564	fault.pasid     = PPR_PASID(raw[0]);
 565	fault.device_id = PPR_DEVID(raw[0]);
 566	fault.tag       = PPR_TAG(raw[0]);
 567	fault.flags     = PPR_FLAGS(raw[0]);
 568
 569	atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
 570}
 571
 572static void iommu_poll_ppr_log(struct amd_iommu *iommu)
 573{
 574	unsigned long flags;
 575	u32 head, tail;
 576
 577	if (iommu->ppr_log == NULL)
 578		return;
 579
 580	/* enable ppr interrupts again */
 581	writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
 582
 583	spin_lock_irqsave(&iommu->lock, flags);
 584
 585	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 586	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 587
 588	while (head != tail) {
 589		volatile u64 *raw;
 590		u64 entry[2];
 591		int i;
 592
 593		raw = (u64 *)(iommu->ppr_log + head);
 594
 595		/*
 596		 * Hardware bug: Interrupt may arrive before the entry is
 597		 * written to memory. If this happens we need to wait for the
 598		 * entry to arrive.
 599		 */
 600		for (i = 0; i < LOOP_TIMEOUT; ++i) {
 601			if (PPR_REQ_TYPE(raw[0]) != 0)
 602				break;
 603			udelay(1);
 604		}
 605
 606		/* Avoid memcpy function-call overhead */
 607		entry[0] = raw[0];
 608		entry[1] = raw[1];
 609
 610		/*
 611		 * To detect the hardware bug we need to clear the entry
 612		 * back to zero.
 613		 */
 614		raw[0] = raw[1] = 0UL;
 615
 616		/* Update head pointer of hardware ring-buffer */
 617		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
 618		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 619
 620		/*
 621		 * Release iommu->lock because ppr-handling might need to
 622		 * re-aquire it
 623		 */
 624		spin_unlock_irqrestore(&iommu->lock, flags);
 625
 626		/* Handle PPR entry */
 627		iommu_handle_ppr_entry(iommu, entry);
 628
 629		spin_lock_irqsave(&iommu->lock, flags);
 630
 631		/* Refresh ring-buffer information */
 632		head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 633		tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 634	}
 635
 636	spin_unlock_irqrestore(&iommu->lock, flags);
 637}
 638
 639irqreturn_t amd_iommu_int_thread(int irq, void *data)
 640{
 641	struct amd_iommu *iommu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 642
 643	for_each_iommu(iommu) {
 644		iommu_poll_events(iommu);
 645		iommu_poll_ppr_log(iommu);
 
 
 
 
 
 
 
 
 
 
 
 646	}
 647
 648	return IRQ_HANDLED;
 649}
 650
 651irqreturn_t amd_iommu_int_handler(int irq, void *data)
 652{
 653	return IRQ_WAKE_THREAD;
 654}
 655
 656/****************************************************************************
 657 *
 658 * IOMMU command queuing functions
 659 *
 660 ****************************************************************************/
 661
 662static int wait_on_sem(volatile u64 *sem)
 663{
 664	int i = 0;
 665
 666	while (*sem == 0 && i < LOOP_TIMEOUT) {
 667		udelay(1);
 668		i += 1;
 669	}
 670
 671	if (i == LOOP_TIMEOUT) {
 672		pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
 673		return -EIO;
 674	}
 675
 676	return 0;
 677}
 678
 679static void copy_cmd_to_buffer(struct amd_iommu *iommu,
 680			       struct iommu_cmd *cmd,
 681			       u32 tail)
 682{
 683	u8 *target;
 684
 685	target = iommu->cmd_buf + tail;
 686	tail   = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
 687
 688	/* Copy command to buffer */
 689	memcpy(target, cmd, sizeof(*cmd));
 690
 691	/* Tell the IOMMU about it */
 692	writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 693}
 694
 695static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 696{
 697	WARN_ON(address & 0x7ULL);
 698
 699	memset(cmd, 0, sizeof(*cmd));
 700	cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
 701	cmd->data[1] = upper_32_bits(__pa(address));
 702	cmd->data[2] = 1;
 703	CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 704}
 705
 706static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
 707{
 708	memset(cmd, 0, sizeof(*cmd));
 709	cmd->data[0] = devid;
 710	CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
 711}
 712
 713static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
 714				  size_t size, u16 domid, int pde)
 715{
 716	u64 pages;
 717	int s;
 718
 719	pages = iommu_num_pages(address, size, PAGE_SIZE);
 720	s     = 0;
 721
 722	if (pages > 1) {
 723		/*
 724		 * If we have to flush more than one page, flush all
 725		 * TLB entries for this domain
 726		 */
 727		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 728		s = 1;
 729	}
 730
 731	address &= PAGE_MASK;
 732
 733	memset(cmd, 0, sizeof(*cmd));
 734	cmd->data[1] |= domid;
 735	cmd->data[2]  = lower_32_bits(address);
 736	cmd->data[3]  = upper_32_bits(address);
 737	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 738	if (s) /* size bit - we flush more than one 4kb page */
 739		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 740	if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
 741		cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 742}
 743
 744static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
 745				  u64 address, size_t size)
 746{
 747	u64 pages;
 748	int s;
 749
 750	pages = iommu_num_pages(address, size, PAGE_SIZE);
 751	s     = 0;
 752
 753	if (pages > 1) {
 754		/*
 755		 * If we have to flush more than one page, flush all
 756		 * TLB entries for this domain
 757		 */
 758		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 759		s = 1;
 760	}
 761
 762	address &= PAGE_MASK;
 763
 764	memset(cmd, 0, sizeof(*cmd));
 765	cmd->data[0]  = devid;
 766	cmd->data[0] |= (qdep & 0xff) << 24;
 767	cmd->data[1]  = devid;
 768	cmd->data[2]  = lower_32_bits(address);
 769	cmd->data[3]  = upper_32_bits(address);
 770	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 771	if (s)
 772		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 773}
 774
 775static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
 776				  u64 address, bool size)
 777{
 778	memset(cmd, 0, sizeof(*cmd));
 779
 780	address &= ~(0xfffULL);
 781
 782	cmd->data[0]  = pasid & PASID_MASK;
 783	cmd->data[1]  = domid;
 784	cmd->data[2]  = lower_32_bits(address);
 785	cmd->data[3]  = upper_32_bits(address);
 786	cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 787	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 788	if (size)
 789		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 790	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
 791}
 792
 793static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
 794				  int qdep, u64 address, bool size)
 795{
 796	memset(cmd, 0, sizeof(*cmd));
 797
 798	address &= ~(0xfffULL);
 799
 800	cmd->data[0]  = devid;
 801	cmd->data[0] |= (pasid & 0xff) << 16;
 802	cmd->data[0] |= (qdep  & 0xff) << 24;
 803	cmd->data[1]  = devid;
 804	cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
 805	cmd->data[2]  = lower_32_bits(address);
 806	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
 807	cmd->data[3]  = upper_32_bits(address);
 808	if (size)
 809		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
 810	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 811}
 812
 813static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
 814			       int status, int tag, bool gn)
 815{
 816	memset(cmd, 0, sizeof(*cmd));
 817
 818	cmd->data[0]  = devid;
 819	if (gn) {
 820		cmd->data[1]  = pasid & PASID_MASK;
 821		cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
 822	}
 823	cmd->data[3]  = tag & 0x1ff;
 824	cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
 825
 826	CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
 827}
 828
 829static void build_inv_all(struct iommu_cmd *cmd)
 830{
 831	memset(cmd, 0, sizeof(*cmd));
 832	CMD_SET_TYPE(cmd, CMD_INV_ALL);
 833}
 834
 
 
 
 
 
 
 
 835/*
 836 * Writes the command to the IOMMUs command buffer and informs the
 837 * hardware about the new command.
 838 */
 839static int iommu_queue_command_sync(struct amd_iommu *iommu,
 840				    struct iommu_cmd *cmd,
 841				    bool sync)
 842{
 843	u32 left, tail, head, next_tail;
 844	unsigned long flags;
 845
 846	WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
 847
 848again:
 849	spin_lock_irqsave(&iommu->lock, flags);
 850
 851	head      = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
 852	tail      = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 853	next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
 854	left      = (head - next_tail) % iommu->cmd_buf_size;
 855
 856	if (left <= 2) {
 857		struct iommu_cmd sync_cmd;
 858		volatile u64 sem = 0;
 859		int ret;
 860
 861		build_completion_wait(&sync_cmd, (u64)&sem);
 862		copy_cmd_to_buffer(iommu, &sync_cmd, tail);
 863
 864		spin_unlock_irqrestore(&iommu->lock, flags);
 865
 866		if ((ret = wait_on_sem(&sem)) != 0)
 867			return ret;
 868
 869		goto again;
 870	}
 871
 872	copy_cmd_to_buffer(iommu, cmd, tail);
 873
 874	/* We need to sync now to make sure all commands are processed */
 875	iommu->need_sync = sync;
 876
 877	spin_unlock_irqrestore(&iommu->lock, flags);
 878
 879	return 0;
 880}
 881
 882static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
 883{
 884	return iommu_queue_command_sync(iommu, cmd, true);
 885}
 886
 887/*
 888 * This function queues a completion wait command into the command
 889 * buffer of an IOMMU
 890 */
 891static int iommu_completion_wait(struct amd_iommu *iommu)
 892{
 893	struct iommu_cmd cmd;
 894	volatile u64 sem = 0;
 895	int ret;
 896
 897	if (!iommu->need_sync)
 898		return 0;
 899
 900	build_completion_wait(&cmd, (u64)&sem);
 901
 902	ret = iommu_queue_command_sync(iommu, &cmd, false);
 903	if (ret)
 904		return ret;
 905
 906	return wait_on_sem(&sem);
 907}
 908
 909static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
 910{
 911	struct iommu_cmd cmd;
 912
 913	build_inv_dte(&cmd, devid);
 914
 915	return iommu_queue_command(iommu, &cmd);
 916}
 917
 918static void iommu_flush_dte_all(struct amd_iommu *iommu)
 919{
 920	u32 devid;
 921
 922	for (devid = 0; devid <= 0xffff; ++devid)
 923		iommu_flush_dte(iommu, devid);
 924
 925	iommu_completion_wait(iommu);
 926}
 927
 928/*
 929 * This function uses heavy locking and may disable irqs for some time. But
 930 * this is no issue because it is only called during resume.
 931 */
 932static void iommu_flush_tlb_all(struct amd_iommu *iommu)
 933{
 934	u32 dom_id;
 935
 936	for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
 937		struct iommu_cmd cmd;
 938		build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
 939				      dom_id, 1);
 940		iommu_queue_command(iommu, &cmd);
 941	}
 942
 943	iommu_completion_wait(iommu);
 944}
 945
 946static void iommu_flush_all(struct amd_iommu *iommu)
 947{
 948	struct iommu_cmd cmd;
 949
 950	build_inv_all(&cmd);
 951
 952	iommu_queue_command(iommu, &cmd);
 953	iommu_completion_wait(iommu);
 954}
 955
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 956void iommu_flush_all_caches(struct amd_iommu *iommu)
 957{
 958	if (iommu_feature(iommu, FEATURE_IA)) {
 959		iommu_flush_all(iommu);
 960	} else {
 961		iommu_flush_dte_all(iommu);
 
 962		iommu_flush_tlb_all(iommu);
 963	}
 964}
 965
 966/*
 967 * Command send function for flushing on-device TLB
 968 */
 969static int device_flush_iotlb(struct iommu_dev_data *dev_data,
 970			      u64 address, size_t size)
 971{
 972	struct amd_iommu *iommu;
 973	struct iommu_cmd cmd;
 974	int qdep;
 975
 976	qdep     = dev_data->ats.qdep;
 977	iommu    = amd_iommu_rlookup_table[dev_data->devid];
 978
 979	build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
 980
 981	return iommu_queue_command(iommu, &cmd);
 982}
 983
 984/*
 985 * Command send function for invalidating a device table entry
 986 */
 987static int device_flush_dte(struct iommu_dev_data *dev_data)
 988{
 989	struct amd_iommu *iommu;
 
 990	int ret;
 991
 992	iommu = amd_iommu_rlookup_table[dev_data->devid];
 
 993
 994	ret = iommu_flush_dte(iommu, dev_data->devid);
 
 
 995	if (ret)
 996		return ret;
 997
 998	if (dev_data->ats.enabled)
 999		ret = device_flush_iotlb(dev_data, 0, ~0UL);
1000
1001	return ret;
1002}
1003
1004/*
1005 * TLB invalidation function which is called from the mapping functions.
1006 * It invalidates a single PTE if the range to flush is within a single
1007 * page. Otherwise it flushes the whole TLB of the IOMMU.
1008 */
1009static void __domain_flush_pages(struct protection_domain *domain,
1010				 u64 address, size_t size, int pde)
1011{
1012	struct iommu_dev_data *dev_data;
1013	struct iommu_cmd cmd;
1014	int ret = 0, i;
1015
1016	build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
1017
1018	for (i = 0; i < amd_iommus_present; ++i) {
1019		if (!domain->dev_iommu[i])
1020			continue;
1021
1022		/*
1023		 * Devices of this domain are behind this IOMMU
1024		 * We need a TLB flush
1025		 */
1026		ret |= iommu_queue_command(amd_iommus[i], &cmd);
1027	}
1028
1029	list_for_each_entry(dev_data, &domain->dev_list, list) {
1030
1031		if (!dev_data->ats.enabled)
1032			continue;
1033
1034		ret |= device_flush_iotlb(dev_data, address, size);
1035	}
1036
1037	WARN_ON(ret);
1038}
1039
1040static void domain_flush_pages(struct protection_domain *domain,
1041			       u64 address, size_t size)
1042{
1043	__domain_flush_pages(domain, address, size, 0);
1044}
1045
1046/* Flush the whole IO/TLB for a given protection domain */
1047static void domain_flush_tlb(struct protection_domain *domain)
1048{
1049	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1050}
1051
1052/* Flush the whole IO/TLB for a given protection domain - including PDE */
1053static void domain_flush_tlb_pde(struct protection_domain *domain)
1054{
1055	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1056}
1057
1058static void domain_flush_complete(struct protection_domain *domain)
1059{
1060	int i;
1061
1062	for (i = 0; i < amd_iommus_present; ++i) {
1063		if (!domain->dev_iommu[i])
1064			continue;
1065
1066		/*
1067		 * Devices of this domain are behind this IOMMU
1068		 * We need to wait for completion of all commands.
1069		 */
1070		iommu_completion_wait(amd_iommus[i]);
1071	}
1072}
1073
1074
1075/*
1076 * This function flushes the DTEs for all devices in domain
1077 */
1078static void domain_flush_devices(struct protection_domain *domain)
1079{
1080	struct iommu_dev_data *dev_data;
1081
1082	list_for_each_entry(dev_data, &domain->dev_list, list)
1083		device_flush_dte(dev_data);
1084}
1085
1086/****************************************************************************
1087 *
1088 * The functions below are used the create the page table mappings for
1089 * unity mapped regions.
1090 *
1091 ****************************************************************************/
1092
1093/*
1094 * This function is used to add another level to an IO page table. Adding
1095 * another level increases the size of the address space by 9 bits to a size up
1096 * to 64 bits.
1097 */
1098static bool increase_address_space(struct protection_domain *domain,
1099				   gfp_t gfp)
1100{
1101	u64 *pte;
1102
1103	if (domain->mode == PAGE_MODE_6_LEVEL)
1104		/* address space already 64 bit large */
1105		return false;
1106
1107	pte = (void *)get_zeroed_page(gfp);
1108	if (!pte)
1109		return false;
1110
1111	*pte             = PM_LEVEL_PDE(domain->mode,
1112					virt_to_phys(domain->pt_root));
1113	domain->pt_root  = pte;
1114	domain->mode    += 1;
1115	domain->updated  = true;
1116
1117	return true;
1118}
1119
1120static u64 *alloc_pte(struct protection_domain *domain,
1121		      unsigned long address,
1122		      unsigned long page_size,
1123		      u64 **pte_page,
1124		      gfp_t gfp)
1125{
1126	int level, end_lvl;
1127	u64 *pte, *page;
1128
1129	BUG_ON(!is_power_of_2(page_size));
1130
1131	while (address > PM_LEVEL_SIZE(domain->mode))
1132		increase_address_space(domain, gfp);
1133
1134	level   = domain->mode - 1;
1135	pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1136	address = PAGE_SIZE_ALIGN(address, page_size);
1137	end_lvl = PAGE_SIZE_LEVEL(page_size);
1138
1139	while (level > end_lvl) {
1140		if (!IOMMU_PTE_PRESENT(*pte)) {
 
 
 
 
1141			page = (u64 *)get_zeroed_page(gfp);
1142			if (!page)
1143				return NULL;
1144			*pte = PM_LEVEL_PDE(level, virt_to_phys(page));
 
 
 
 
 
 
1145		}
1146
1147		/* No level skipping support yet */
1148		if (PM_PTE_LEVEL(*pte) != level)
1149			return NULL;
1150
1151		level -= 1;
1152
1153		pte = IOMMU_PTE_PAGE(*pte);
1154
1155		if (pte_page && level == end_lvl)
1156			*pte_page = pte;
1157
1158		pte = &pte[PM_LEVEL_INDEX(level, address)];
1159	}
1160
1161	return pte;
1162}
1163
1164/*
1165 * This function checks if there is a PTE for a given dma address. If
1166 * there is one, it returns the pointer to it.
1167 */
1168static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
 
 
1169{
1170	int level;
1171	u64 *pte;
1172
1173	if (address > PM_LEVEL_SIZE(domain->mode))
1174		return NULL;
1175
1176	level   =  domain->mode - 1;
1177	pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
 
1178
1179	while (level > 0) {
1180
1181		/* Not Present */
1182		if (!IOMMU_PTE_PRESENT(*pte))
1183			return NULL;
1184
1185		/* Large PTE */
1186		if (PM_PTE_LEVEL(*pte) == 0x07) {
1187			unsigned long pte_mask, __pte;
1188
1189			/*
1190			 * If we have a series of large PTEs, make
1191			 * sure to return a pointer to the first one.
1192			 */
1193			pte_mask = PTE_PAGE_SIZE(*pte);
1194			pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1195			__pte    = ((unsigned long)pte) & pte_mask;
1196
1197			return (u64 *)__pte;
1198		}
1199
1200		/* No level skipping support yet */
1201		if (PM_PTE_LEVEL(*pte) != level)
1202			return NULL;
1203
1204		level -= 1;
1205
1206		/* Walk to the next level */
1207		pte = IOMMU_PTE_PAGE(*pte);
1208		pte = &pte[PM_LEVEL_INDEX(level, address)];
 
 
 
 
 
 
 
 
 
 
 
 
 
1209	}
1210
1211	return pte;
1212}
1213
1214/*
1215 * Generic mapping functions. It maps a physical address into a DMA
1216 * address space. It allocates the page table pages if necessary.
1217 * In the future it can be extended to a generic mapping function
1218 * supporting all features of AMD IOMMU page tables like level skipping
1219 * and full 64 bit address spaces.
1220 */
1221static int iommu_map_page(struct protection_domain *dom,
1222			  unsigned long bus_addr,
1223			  unsigned long phys_addr,
1224			  int prot,
1225			  unsigned long page_size)
1226{
1227	u64 __pte, *pte;
1228	int i, count;
1229
 
 
 
1230	if (!(prot & IOMMU_PROT_MASK))
1231		return -EINVAL;
1232
1233	bus_addr  = PAGE_ALIGN(bus_addr);
1234	phys_addr = PAGE_ALIGN(phys_addr);
1235	count     = PAGE_SIZE_PTE_COUNT(page_size);
1236	pte       = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
 
1237
1238	for (i = 0; i < count; ++i)
1239		if (IOMMU_PTE_PRESENT(pte[i]))
1240			return -EBUSY;
1241
1242	if (page_size > PAGE_SIZE) {
1243		__pte = PAGE_SIZE_PTE(phys_addr, page_size);
1244		__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
1245	} else
1246		__pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
1247
1248	if (prot & IOMMU_PROT_IR)
1249		__pte |= IOMMU_PTE_IR;
1250	if (prot & IOMMU_PROT_IW)
1251		__pte |= IOMMU_PTE_IW;
1252
1253	for (i = 0; i < count; ++i)
1254		pte[i] = __pte;
1255
1256	update_domain(dom);
1257
1258	return 0;
1259}
1260
1261static unsigned long iommu_unmap_page(struct protection_domain *dom,
1262				      unsigned long bus_addr,
1263				      unsigned long page_size)
1264{
1265	unsigned long long unmap_size, unmapped;
 
1266	u64 *pte;
1267
1268	BUG_ON(!is_power_of_2(page_size));
1269
1270	unmapped = 0;
1271
1272	while (unmapped < page_size) {
1273
1274		pte = fetch_pte(dom, bus_addr);
 
 
 
1275
1276		if (!pte) {
1277			/*
1278			 * No PTE for this address
1279			 * move forward in 4kb steps
1280			 */
1281			unmap_size = PAGE_SIZE;
1282		} else if (PM_PTE_LEVEL(*pte) == 0) {
1283			/* 4kb PTE found for this address */
1284			unmap_size = PAGE_SIZE;
1285			*pte       = 0ULL;
1286		} else {
1287			int count, i;
1288
1289			/* Large PTE found which maps this address */
1290			unmap_size = PTE_PAGE_SIZE(*pte);
1291			count      = PAGE_SIZE_PTE_COUNT(unmap_size);
1292			for (i = 0; i < count; i++)
1293				pte[i] = 0ULL;
1294		}
1295
1296		bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1297		unmapped += unmap_size;
1298	}
1299
1300	BUG_ON(!is_power_of_2(unmapped));
1301
1302	return unmapped;
1303}
1304
1305/*
1306 * This function checks if a specific unity mapping entry is needed for
1307 * this specific IOMMU.
1308 */
1309static int iommu_for_unity_map(struct amd_iommu *iommu,
1310			       struct unity_map_entry *entry)
1311{
1312	u16 bdf, i;
1313
1314	for (i = entry->devid_start; i <= entry->devid_end; ++i) {
1315		bdf = amd_iommu_alias_table[i];
1316		if (amd_iommu_rlookup_table[bdf] == iommu)
1317			return 1;
1318	}
1319
1320	return 0;
1321}
1322
1323/*
1324 * This function actually applies the mapping to the page table of the
1325 * dma_ops domain.
1326 */
1327static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
1328			     struct unity_map_entry *e)
1329{
1330	u64 addr;
1331	int ret;
1332
1333	for (addr = e->address_start; addr < e->address_end;
1334	     addr += PAGE_SIZE) {
1335		ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
1336				     PAGE_SIZE);
1337		if (ret)
1338			return ret;
1339		/*
1340		 * if unity mapping is in aperture range mark the page
1341		 * as allocated in the aperture
1342		 */
1343		if (addr < dma_dom->aperture_size)
1344			__set_bit(addr >> PAGE_SHIFT,
1345				  dma_dom->aperture[0]->bitmap);
1346	}
1347
1348	return 0;
1349}
1350
1351/*
1352 * Init the unity mappings for a specific IOMMU in the system
1353 *
1354 * Basically iterates over all unity mapping entries and applies them to
1355 * the default domain DMA of that IOMMU if necessary.
1356 */
1357static int iommu_init_unity_mappings(struct amd_iommu *iommu)
1358{
1359	struct unity_map_entry *entry;
1360	int ret;
1361
1362	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
1363		if (!iommu_for_unity_map(iommu, entry))
1364			continue;
1365		ret = dma_ops_unity_map(iommu->default_dom, entry);
1366		if (ret)
1367			return ret;
1368	}
1369
1370	return 0;
1371}
1372
1373/*
1374 * Inits the unity mappings required for a specific device
1375 */
1376static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
1377					  u16 devid)
1378{
1379	struct unity_map_entry *e;
1380	int ret;
1381
1382	list_for_each_entry(e, &amd_iommu_unity_map, list) {
1383		if (!(devid >= e->devid_start && devid <= e->devid_end))
1384			continue;
1385		ret = dma_ops_unity_map(dma_dom, e);
1386		if (ret)
1387			return ret;
1388	}
1389
1390	return 0;
1391}
1392
1393/****************************************************************************
1394 *
1395 * The next functions belong to the address allocator for the dma_ops
1396 * interface functions. They work like the allocators in the other IOMMU
1397 * drivers. Its basically a bitmap which marks the allocated pages in
1398 * the aperture. Maybe it could be enhanced in the future to a more
1399 * efficient allocator.
1400 *
1401 ****************************************************************************/
1402
1403/*
1404 * The address allocator core functions.
1405 *
1406 * called with domain->lock held
1407 */
1408
1409/*
1410 * Used to reserve address ranges in the aperture (e.g. for exclusion
1411 * ranges.
1412 */
1413static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1414				      unsigned long start_page,
1415				      unsigned int pages)
1416{
1417	unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1418
1419	if (start_page + pages > last_page)
1420		pages = last_page - start_page;
1421
1422	for (i = start_page; i < start_page + pages; ++i) {
1423		int index = i / APERTURE_RANGE_PAGES;
1424		int page  = i % APERTURE_RANGE_PAGES;
1425		__set_bit(page, dom->aperture[index]->bitmap);
1426	}
1427}
1428
1429/*
1430 * This function is used to add a new aperture range to an existing
1431 * aperture in case of dma_ops domain allocation or address allocation
1432 * failure.
1433 */
1434static int alloc_new_range(struct dma_ops_domain *dma_dom,
1435			   bool populate, gfp_t gfp)
1436{
1437	int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
 
 
1438	struct amd_iommu *iommu;
1439	unsigned long i, old_size;
1440
1441#ifdef CONFIG_IOMMU_STRESS
1442	populate = false;
1443#endif
1444
1445	if (index >= APERTURE_MAX_RANGES)
1446		return -ENOMEM;
1447
1448	dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
1449	if (!dma_dom->aperture[index])
1450		return -ENOMEM;
1451
1452	dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
1453	if (!dma_dom->aperture[index]->bitmap)
1454		goto out_free;
1455
1456	dma_dom->aperture[index]->offset = dma_dom->aperture_size;
 
 
1457
1458	if (populate) {
1459		unsigned long address = dma_dom->aperture_size;
1460		int i, num_ptes = APERTURE_RANGE_PAGES / 512;
1461		u64 *pte, *pte_page;
1462
1463		for (i = 0; i < num_ptes; ++i) {
1464			pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
1465					&pte_page, gfp);
1466			if (!pte)
1467				goto out_free;
1468
1469			dma_dom->aperture[index]->pte_pages[i] = pte_page;
1470
1471			address += APERTURE_RANGE_SIZE / 64;
1472		}
1473	}
1474
1475	old_size                = dma_dom->aperture_size;
1476	dma_dom->aperture_size += APERTURE_RANGE_SIZE;
 
 
 
 
 
 
1477
1478	/* Reserve address range used for MSI messages */
1479	if (old_size < MSI_ADDR_BASE_LO &&
1480	    dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
1481		unsigned long spage;
1482		int pages;
1483
1484		pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
1485		spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
1486
1487		dma_ops_reserve_addresses(dma_dom, spage, pages);
1488	}
1489
1490	/* Initialize the exclusion range if necessary */
1491	for_each_iommu(iommu) {
1492		if (iommu->exclusion_start &&
1493		    iommu->exclusion_start >= dma_dom->aperture[index]->offset
1494		    && iommu->exclusion_start < dma_dom->aperture_size) {
1495			unsigned long startpage;
1496			int pages = iommu_num_pages(iommu->exclusion_start,
1497						    iommu->exclusion_length,
1498						    PAGE_SIZE);
1499			startpage = iommu->exclusion_start >> PAGE_SHIFT;
1500			dma_ops_reserve_addresses(dma_dom, startpage, pages);
1501		}
1502	}
1503
1504	/*
1505	 * Check for areas already mapped as present in the new aperture
1506	 * range and mark those pages as reserved in the allocator. Such
1507	 * mappings may already exist as a result of requested unity
1508	 * mappings for devices.
1509	 */
1510	for (i = dma_dom->aperture[index]->offset;
1511	     i < dma_dom->aperture_size;
1512	     i += PAGE_SIZE) {
1513		u64 *pte = fetch_pte(&dma_dom->domain, i);
1514		if (!pte || !IOMMU_PTE_PRESENT(*pte))
1515			continue;
1516
1517		dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
 
1518	}
1519
1520	update_domain(&dma_dom->domain);
1521
 
 
 
 
1522	return 0;
1523
1524out_free:
1525	update_domain(&dma_dom->domain);
1526
1527	free_page((unsigned long)dma_dom->aperture[index]->bitmap);
1528
1529	kfree(dma_dom->aperture[index]);
1530	dma_dom->aperture[index] = NULL;
1531
1532	return -ENOMEM;
1533}
1534
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1535static unsigned long dma_ops_area_alloc(struct device *dev,
1536					struct dma_ops_domain *dom,
1537					unsigned int pages,
1538					unsigned long align_mask,
1539					u64 dma_mask,
1540					unsigned long start)
1541{
1542	unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
1543	int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
1544	int i = start >> APERTURE_RANGE_SHIFT;
1545	unsigned long boundary_size;
1546	unsigned long address = -1;
1547	unsigned long limit;
 
1548
1549	next_bit >>= PAGE_SHIFT;
1550
1551	boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1552			PAGE_SIZE) >> PAGE_SHIFT;
1553
1554	for (;i < max_index; ++i) {
1555		unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
1556
1557		if (dom->aperture[i]->offset >= dma_mask)
1558			break;
 
 
 
1559
1560		limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1561					       dma_mask >> PAGE_SHIFT);
1562
1563		address = iommu_area_alloc(dom->aperture[i]->bitmap,
1564					   limit, next_bit, pages, 0,
1565					    boundary_size, align_mask);
 
 
 
1566		if (address != -1) {
1567			address = dom->aperture[i]->offset +
1568				  (address << PAGE_SHIFT);
1569			dom->next_address = address + (pages << PAGE_SHIFT);
1570			break;
1571		}
 
1572
1573		next_bit = 0;
 
 
1574	}
1575
 
 
1576	return address;
1577}
1578
1579static unsigned long dma_ops_alloc_addresses(struct device *dev,
1580					     struct dma_ops_domain *dom,
1581					     unsigned int pages,
1582					     unsigned long align_mask,
1583					     u64 dma_mask)
1584{
1585	unsigned long address;
1586
1587#ifdef CONFIG_IOMMU_STRESS
1588	dom->next_address = 0;
1589	dom->need_flush = true;
1590#endif
1591
1592	address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1593				     dma_mask, dom->next_address);
 
1594
1595	if (address == -1) {
1596		dom->next_address = 0;
1597		address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1598					     dma_mask, 0);
1599		dom->need_flush = true;
1600	}
1601
1602	if (unlikely(address == -1))
1603		address = DMA_ERROR_CODE;
1604
1605	WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1606
1607	return address;
1608}
1609
1610/*
1611 * The address free function.
1612 *
1613 * called with domain->lock held
1614 */
1615static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1616				   unsigned long address,
1617				   unsigned int pages)
1618{
1619	unsigned i = address >> APERTURE_RANGE_SHIFT;
1620	struct aperture_range *range = dom->aperture[i];
 
1621
1622	BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1623
1624#ifdef CONFIG_IOMMU_STRESS
1625	if (i < 4)
1626		return;
1627#endif
1628
1629	if (address >= dom->next_address)
1630		dom->need_flush = true;
 
 
1631
1632	address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1633
 
 
 
1634	bitmap_clear(range->bitmap, address, pages);
 
1635
1636}
1637
1638/****************************************************************************
1639 *
1640 * The next functions belong to the domain allocation. A domain is
1641 * allocated for every IOMMU as the default domain. If device isolation
1642 * is enabled, every device get its own domain. The most important thing
1643 * about domains is the page table mapping the DMA address space they
1644 * contain.
1645 *
1646 ****************************************************************************/
1647
1648/*
1649 * This function adds a protection domain to the global protection domain list
1650 */
1651static void add_domain_to_list(struct protection_domain *domain)
1652{
1653	unsigned long flags;
1654
1655	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1656	list_add(&domain->list, &amd_iommu_pd_list);
1657	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1658}
1659
1660/*
1661 * This function removes a protection domain to the global
1662 * protection domain list
1663 */
1664static void del_domain_from_list(struct protection_domain *domain)
1665{
1666	unsigned long flags;
1667
1668	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1669	list_del(&domain->list);
1670	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1671}
1672
1673static u16 domain_id_alloc(void)
1674{
1675	unsigned long flags;
1676	int id;
1677
1678	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1679	id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1680	BUG_ON(id == 0);
1681	if (id > 0 && id < MAX_DOMAIN_ID)
1682		__set_bit(id, amd_iommu_pd_alloc_bitmap);
1683	else
1684		id = 0;
1685	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1686
1687	return id;
1688}
1689
1690static void domain_id_free(int id)
1691{
1692	unsigned long flags;
1693
1694	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1695	if (id > 0 && id < MAX_DOMAIN_ID)
1696		__clear_bit(id, amd_iommu_pd_alloc_bitmap);
1697	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1698}
1699
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1700static void free_pagetable(struct protection_domain *domain)
1701{
1702	int i, j;
1703	u64 *p1, *p2, *p3;
1704
1705	p1 = domain->pt_root;
1706
1707	if (!p1)
1708		return;
1709
1710	for (i = 0; i < 512; ++i) {
1711		if (!IOMMU_PTE_PRESENT(p1[i]))
1712			continue;
1713
1714		p2 = IOMMU_PTE_PAGE(p1[i]);
1715		for (j = 0; j < 512; ++j) {
1716			if (!IOMMU_PTE_PRESENT(p2[j]))
1717				continue;
1718			p3 = IOMMU_PTE_PAGE(p2[j]);
1719			free_page((unsigned long)p3);
1720		}
1721
1722		free_page((unsigned long)p2);
 
 
 
 
 
1723	}
1724
1725	free_page((unsigned long)p1);
1726
1727	domain->pt_root = NULL;
1728}
1729
1730static void free_gcr3_tbl_level1(u64 *tbl)
1731{
1732	u64 *ptr;
1733	int i;
1734
1735	for (i = 0; i < 512; ++i) {
1736		if (!(tbl[i] & GCR3_VALID))
1737			continue;
1738
1739		ptr = __va(tbl[i] & PAGE_MASK);
1740
1741		free_page((unsigned long)ptr);
1742	}
1743}
1744
1745static void free_gcr3_tbl_level2(u64 *tbl)
1746{
1747	u64 *ptr;
1748	int i;
1749
1750	for (i = 0; i < 512; ++i) {
1751		if (!(tbl[i] & GCR3_VALID))
1752			continue;
1753
1754		ptr = __va(tbl[i] & PAGE_MASK);
1755
1756		free_gcr3_tbl_level1(ptr);
1757	}
1758}
1759
1760static void free_gcr3_table(struct protection_domain *domain)
1761{
1762	if (domain->glx == 2)
1763		free_gcr3_tbl_level2(domain->gcr3_tbl);
1764	else if (domain->glx == 1)
1765		free_gcr3_tbl_level1(domain->gcr3_tbl);
1766	else if (domain->glx != 0)
1767		BUG();
1768
1769	free_page((unsigned long)domain->gcr3_tbl);
1770}
1771
1772/*
1773 * Free a domain, only used if something went wrong in the
1774 * allocation path and we need to free an already allocated page table
1775 */
1776static void dma_ops_domain_free(struct dma_ops_domain *dom)
1777{
1778	int i;
1779
1780	if (!dom)
1781		return;
1782
 
 
1783	del_domain_from_list(&dom->domain);
1784
1785	free_pagetable(&dom->domain);
1786
1787	for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1788		if (!dom->aperture[i])
1789			continue;
1790		free_page((unsigned long)dom->aperture[i]->bitmap);
1791		kfree(dom->aperture[i]);
1792	}
1793
1794	kfree(dom);
1795}
1796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1797/*
1798 * Allocates a new protection domain usable for the dma_ops functions.
1799 * It also initializes the page table and the address allocator data
1800 * structures required for the dma_ops interface
1801 */
1802static struct dma_ops_domain *dma_ops_domain_alloc(void)
1803{
1804	struct dma_ops_domain *dma_dom;
 
1805
1806	dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1807	if (!dma_dom)
1808		return NULL;
1809
1810	spin_lock_init(&dma_dom->domain.lock);
 
1811
1812	dma_dom->domain.id = domain_id_alloc();
1813	if (dma_dom->domain.id == 0)
1814		goto free_dma_dom;
1815	INIT_LIST_HEAD(&dma_dom->domain.dev_list);
1816	dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
1817	dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1818	dma_dom->domain.flags = PD_DMA_OPS_MASK;
1819	dma_dom->domain.priv = dma_dom;
1820	if (!dma_dom->domain.pt_root)
1821		goto free_dma_dom;
1822
1823	dma_dom->need_flush = false;
1824	dma_dom->target_dev = 0xffff;
1825
1826	add_domain_to_list(&dma_dom->domain);
1827
1828	if (alloc_new_range(dma_dom, true, GFP_KERNEL))
1829		goto free_dma_dom;
1830
1831	/*
1832	 * mark the first page as allocated so we never return 0 as
1833	 * a valid dma-address. So we can use 0 as error value
1834	 */
1835	dma_dom->aperture[0]->bitmap[0] = 1;
1836	dma_dom->next_address = 0;
1837
 
 
1838
1839	return dma_dom;
1840
1841free_dma_dom:
1842	dma_ops_domain_free(dma_dom);
1843
1844	return NULL;
1845}
1846
1847/*
1848 * little helper function to check whether a given protection domain is a
1849 * dma_ops domain
1850 */
1851static bool dma_ops_domain(struct protection_domain *domain)
1852{
1853	return domain->flags & PD_DMA_OPS_MASK;
1854}
1855
1856static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
1857{
1858	u64 pte_root = 0;
1859	u64 flags = 0;
1860
1861	if (domain->mode != PAGE_MODE_NONE)
1862		pte_root = virt_to_phys(domain->pt_root);
1863
1864	pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1865		    << DEV_ENTRY_MODE_SHIFT;
1866	pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
1867
1868	flags = amd_iommu_dev_table[devid].data[1];
1869
1870	if (ats)
1871		flags |= DTE_FLAG_IOTLB;
1872
1873	if (domain->flags & PD_IOMMUV2_MASK) {
1874		u64 gcr3 = __pa(domain->gcr3_tbl);
1875		u64 glx  = domain->glx;
1876		u64 tmp;
1877
1878		pte_root |= DTE_FLAG_GV;
1879		pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1880
1881		/* First mask out possible old values for GCR3 table */
1882		tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1883		flags    &= ~tmp;
1884
1885		tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1886		flags    &= ~tmp;
1887
1888		/* Encode GCR3 table into DTE */
1889		tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1890		pte_root |= tmp;
1891
1892		tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1893		flags    |= tmp;
1894
1895		tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1896		flags    |= tmp;
1897	}
1898
1899	flags &= ~(0xffffUL);
1900	flags |= domain->id;
1901
1902	amd_iommu_dev_table[devid].data[1]  = flags;
1903	amd_iommu_dev_table[devid].data[0]  = pte_root;
1904}
1905
1906static void clear_dte_entry(u16 devid)
1907{
1908	/* remove entry from the device table seen by the hardware */
1909	amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1910	amd_iommu_dev_table[devid].data[1] = 0;
1911
1912	amd_iommu_apply_erratum_63(devid);
1913}
1914
1915static void do_attach(struct iommu_dev_data *dev_data,
1916		      struct protection_domain *domain)
1917{
1918	struct amd_iommu *iommu;
 
1919	bool ats;
1920
1921	iommu = amd_iommu_rlookup_table[dev_data->devid];
 
1922	ats   = dev_data->ats.enabled;
1923
1924	/* Update data structures */
1925	dev_data->domain = domain;
1926	list_add(&dev_data->list, &domain->dev_list);
1927	set_dte_entry(dev_data->devid, domain, ats);
1928
1929	/* Do reference counting */
1930	domain->dev_iommu[iommu->index] += 1;
1931	domain->dev_cnt                 += 1;
1932
1933	/* Flush the DTE entry */
 
 
 
 
1934	device_flush_dte(dev_data);
1935}
1936
1937static void do_detach(struct iommu_dev_data *dev_data)
1938{
1939	struct amd_iommu *iommu;
 
 
 
 
 
 
 
 
 
 
1940
1941	iommu = amd_iommu_rlookup_table[dev_data->devid];
 
1942
1943	/* decrease reference counters */
1944	dev_data->domain->dev_iommu[iommu->index] -= 1;
1945	dev_data->domain->dev_cnt                 -= 1;
1946
1947	/* Update data structures */
1948	dev_data->domain = NULL;
1949	list_del(&dev_data->list);
1950	clear_dte_entry(dev_data->devid);
 
 
1951
1952	/* Flush the DTE entry */
1953	device_flush_dte(dev_data);
1954}
1955
1956/*
1957 * If a device is not yet associated with a domain, this function does
1958 * assigns it visible for the hardware
1959 */
1960static int __attach_device(struct iommu_dev_data *dev_data,
1961			   struct protection_domain *domain)
1962{
1963	int ret;
1964
 
 
 
 
 
 
1965	/* lock domain */
1966	spin_lock(&domain->lock);
1967
1968	if (dev_data->alias_data != NULL) {
1969		struct iommu_dev_data *alias_data = dev_data->alias_data;
1970
1971		/* Some sanity checks */
1972		ret = -EBUSY;
1973		if (alias_data->domain != NULL &&
1974				alias_data->domain != domain)
1975			goto out_unlock;
1976
1977		if (dev_data->domain != NULL &&
1978				dev_data->domain != domain)
1979			goto out_unlock;
1980
1981		/* Do real assignment */
1982		if (alias_data->domain == NULL)
1983			do_attach(alias_data, domain);
1984
1985		atomic_inc(&alias_data->bind);
1986	}
1987
1988	if (dev_data->domain == NULL)
1989		do_attach(dev_data, domain);
1990
1991	atomic_inc(&dev_data->bind);
1992
1993	ret = 0;
1994
1995out_unlock:
1996
1997	/* ready */
1998	spin_unlock(&domain->lock);
1999
2000	return ret;
2001}
2002
2003
2004static void pdev_iommuv2_disable(struct pci_dev *pdev)
2005{
2006	pci_disable_ats(pdev);
2007	pci_disable_pri(pdev);
2008	pci_disable_pasid(pdev);
2009}
2010
2011/* FIXME: Change generic reset-function to do the same */
2012static int pri_reset_while_enabled(struct pci_dev *pdev)
2013{
2014	u16 control;
2015	int pos;
2016
2017	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2018	if (!pos)
2019		return -EINVAL;
2020
2021	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
2022	control |= PCI_PRI_CTRL_RESET;
2023	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
2024
2025	return 0;
2026}
2027
2028static int pdev_iommuv2_enable(struct pci_dev *pdev)
2029{
2030	bool reset_enable;
2031	int reqs, ret;
2032
2033	/* FIXME: Hardcode number of outstanding requests for now */
2034	reqs = 32;
2035	if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
2036		reqs = 1;
2037	reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
2038
2039	/* Only allow access to user-accessible pages */
2040	ret = pci_enable_pasid(pdev, 0);
2041	if (ret)
2042		goto out_err;
2043
2044	/* First reset the PRI state of the device */
2045	ret = pci_reset_pri(pdev);
2046	if (ret)
2047		goto out_err;
2048
2049	/* Enable PRI */
2050	ret = pci_enable_pri(pdev, reqs);
2051	if (ret)
2052		goto out_err;
2053
2054	if (reset_enable) {
2055		ret = pri_reset_while_enabled(pdev);
2056		if (ret)
2057			goto out_err;
2058	}
2059
2060	ret = pci_enable_ats(pdev, PAGE_SHIFT);
2061	if (ret)
2062		goto out_err;
2063
2064	return 0;
2065
2066out_err:
2067	pci_disable_pri(pdev);
2068	pci_disable_pasid(pdev);
2069
2070	return ret;
2071}
2072
2073/* FIXME: Move this to PCI code */
2074#define PCI_PRI_TLP_OFF		(1 << 15)
2075
2076bool pci_pri_tlp_required(struct pci_dev *pdev)
2077{
2078	u16 status;
2079	int pos;
2080
2081	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
2082	if (!pos)
2083		return false;
2084
2085	pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
2086
2087	return (status & PCI_PRI_TLP_OFF) ? true : false;
2088}
2089
2090/*
2091 * If a device is not yet associated with a domain, this function does
2092 * assigns it visible for the hardware
2093 */
2094static int attach_device(struct device *dev,
2095			 struct protection_domain *domain)
2096{
2097	struct pci_dev *pdev = to_pci_dev(dev);
2098	struct iommu_dev_data *dev_data;
2099	unsigned long flags;
2100	int ret;
2101
2102	dev_data = get_dev_data(dev);
2103
2104	if (domain->flags & PD_IOMMUV2_MASK) {
2105		if (!dev_data->iommu_v2 || !dev_data->passthrough)
2106			return -EINVAL;
2107
2108		if (pdev_iommuv2_enable(pdev) != 0)
2109			return -EINVAL;
2110
2111		dev_data->ats.enabled = true;
2112		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2113		dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
 
 
2114	} else if (amd_iommu_iotlb_sup &&
2115		   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2116		dev_data->ats.enabled = true;
2117		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
2118	}
2119
2120	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2121	ret = __attach_device(dev_data, domain);
2122	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2123
2124	/*
2125	 * We might boot into a crash-kernel here. The crashed kernel
2126	 * left the caches in the IOMMU dirty. So we have to flush
2127	 * here to evict all dirty stuff.
2128	 */
2129	domain_flush_tlb_pde(domain);
2130
2131	return ret;
2132}
2133
2134/*
2135 * Removes a device from a protection domain (unlocked)
2136 */
2137static void __detach_device(struct iommu_dev_data *dev_data)
2138{
2139	struct protection_domain *domain;
2140	unsigned long flags;
2141
2142	BUG_ON(!dev_data->domain);
 
 
 
 
 
 
 
2143
2144	domain = dev_data->domain;
2145
2146	spin_lock_irqsave(&domain->lock, flags);
2147
2148	if (dev_data->alias_data != NULL) {
2149		struct iommu_dev_data *alias_data = dev_data->alias_data;
2150
2151		if (atomic_dec_and_test(&alias_data->bind))
2152			do_detach(alias_data);
2153	}
2154
2155	if (atomic_dec_and_test(&dev_data->bind))
2156		do_detach(dev_data);
2157
2158	spin_unlock_irqrestore(&domain->lock, flags);
2159
2160	/*
2161	 * If we run in passthrough mode the device must be assigned to the
2162	 * passthrough domain if it is detached from any other domain.
2163	 * Make sure we can deassign from the pt_domain itself.
2164	 */
2165	if (dev_data->passthrough &&
2166	    (dev_data->domain == NULL && domain != pt_domain))
2167		__attach_device(dev_data, pt_domain);
2168}
2169
2170/*
2171 * Removes a device from a protection domain (with devtable_lock held)
2172 */
2173static void detach_device(struct device *dev)
2174{
2175	struct protection_domain *domain;
2176	struct iommu_dev_data *dev_data;
2177	unsigned long flags;
2178
2179	dev_data = get_dev_data(dev);
2180	domain   = dev_data->domain;
2181
2182	/* lock device table */
2183	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2184	__detach_device(dev_data);
2185	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2186
2187	if (domain->flags & PD_IOMMUV2_MASK)
2188		pdev_iommuv2_disable(to_pci_dev(dev));
2189	else if (dev_data->ats.enabled)
2190		pci_disable_ats(to_pci_dev(dev));
2191
2192	dev_data->ats.enabled = false;
2193}
2194
2195/*
2196 * Find out the protection domain structure for a given PCI device. This
2197 * will give us the pointer to the page table root for example.
2198 */
2199static struct protection_domain *domain_for_device(struct device *dev)
2200{
2201	struct iommu_dev_data *dev_data;
2202	struct protection_domain *dom = NULL;
2203	unsigned long flags;
2204
2205	dev_data   = get_dev_data(dev);
2206
2207	if (dev_data->domain)
2208		return dev_data->domain;
2209
2210	if (dev_data->alias_data != NULL) {
2211		struct iommu_dev_data *alias_data = dev_data->alias_data;
2212
2213		read_lock_irqsave(&amd_iommu_devtable_lock, flags);
2214		if (alias_data->domain != NULL) {
2215			__attach_device(dev_data, alias_data->domain);
2216			dom = alias_data->domain;
2217		}
2218		read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2219	}
2220
2221	return dom;
2222}
2223
2224static int device_change_notifier(struct notifier_block *nb,
2225				  unsigned long action, void *data)
2226{
2227	struct dma_ops_domain *dma_domain;
2228	struct protection_domain *domain;
2229	struct iommu_dev_data *dev_data;
2230	struct device *dev = data;
2231	struct amd_iommu *iommu;
2232	unsigned long flags;
2233	u16 devid;
 
2234
2235	if (!check_device(dev))
2236		return 0;
2237
2238	devid    = get_device_id(dev);
2239	iommu    = amd_iommu_rlookup_table[devid];
2240	dev_data = get_dev_data(dev);
2241
2242	switch (action) {
2243	case BUS_NOTIFY_UNBOUND_DRIVER:
 
 
 
2244
2245		domain = domain_for_device(dev);
 
 
 
 
2246
2247		if (!domain)
2248			goto out;
2249		if (dev_data->passthrough)
2250			break;
2251		detach_device(dev);
2252		break;
2253	case BUS_NOTIFY_ADD_DEVICE:
2254
2255		iommu_init_device(dev);
2256
2257		/*
2258		 * dev_data is still NULL and
2259		 * got initialized in iommu_init_device
2260		 */
2261		dev_data = get_dev_data(dev);
2262
2263		if (iommu_pass_through || dev_data->iommu_v2) {
2264			dev_data->passthrough = true;
2265			attach_device(dev, pt_domain);
2266			break;
2267		}
2268
2269		domain = domain_for_device(dev);
2270
2271		/* allocate a protection domain if a device is added */
2272		dma_domain = find_protection_domain(devid);
2273		if (dma_domain)
2274			goto out;
2275		dma_domain = dma_ops_domain_alloc();
2276		if (!dma_domain)
2277			goto out;
2278		dma_domain->target_dev = devid;
2279
2280		spin_lock_irqsave(&iommu_pd_list_lock, flags);
2281		list_add_tail(&dma_domain->list, &iommu_pd_list);
2282		spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
2283
2284		dev_data = get_dev_data(dev);
2285
 
 
 
 
 
2286		dev->archdata.dma_ops = &amd_iommu_dma_ops;
2287
2288		break;
2289	case BUS_NOTIFY_DEL_DEVICE:
2290
2291		iommu_uninit_device(dev);
2292
2293	default:
2294		goto out;
2295	}
2296
2297	iommu_completion_wait(iommu);
2298
2299out:
2300	return 0;
2301}
2302
2303static struct notifier_block device_nb = {
2304	.notifier_call = device_change_notifier,
2305};
 
 
 
 
 
 
 
2306
2307void amd_iommu_init_notifier(void)
2308{
2309	bus_register_notifier(&pci_bus_type, &device_nb);
2310}
2311
2312/*****************************************************************************
2313 *
2314 * The next functions belong to the dma_ops mapping/unmapping code.
2315 *
2316 *****************************************************************************/
2317
2318/*
2319 * In the dma_ops path we only have the struct device. This function
2320 * finds the corresponding IOMMU, the protection domain and the
2321 * requestor id for a given device.
2322 * If the device is not yet associated with a domain this is also done
2323 * in this function.
2324 */
2325static struct protection_domain *get_domain(struct device *dev)
2326{
2327	struct protection_domain *domain;
2328	struct dma_ops_domain *dma_dom;
2329	u16 devid = get_device_id(dev);
2330
2331	if (!check_device(dev))
2332		return ERR_PTR(-EINVAL);
2333
2334	domain = domain_for_device(dev);
2335	if (domain != NULL && !dma_ops_domain(domain))
 
 
 
 
2336		return ERR_PTR(-EBUSY);
2337
2338	if (domain != NULL)
2339		return domain;
2340
2341	/* Device not bount yet - bind it */
2342	dma_dom = find_protection_domain(devid);
2343	if (!dma_dom)
2344		dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
2345	attach_device(dev, &dma_dom->domain);
2346	DUMP_printk("Using protection domain %d for device %s\n",
2347		    dma_dom->domain.id, dev_name(dev));
2348
2349	return &dma_dom->domain;
2350}
2351
2352static void update_device_table(struct protection_domain *domain)
2353{
2354	struct iommu_dev_data *dev_data;
2355
2356	list_for_each_entry(dev_data, &domain->dev_list, list)
2357		set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
2358}
2359
2360static void update_domain(struct protection_domain *domain)
2361{
2362	if (!domain->updated)
2363		return;
2364
2365	update_device_table(domain);
2366
2367	domain_flush_devices(domain);
2368	domain_flush_tlb_pde(domain);
2369
2370	domain->updated = false;
2371}
2372
2373/*
2374 * This function fetches the PTE for a given address in the aperture
2375 */
2376static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
2377			    unsigned long address)
2378{
2379	struct aperture_range *aperture;
2380	u64 *pte, *pte_page;
2381
2382	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2383	if (!aperture)
2384		return NULL;
2385
2386	pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2387	if (!pte) {
2388		pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
2389				GFP_ATOMIC);
2390		aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
2391	} else
2392		pte += PM_LEVEL_INDEX(0, address);
2393
2394	update_domain(&dom->domain);
2395
2396	return pte;
2397}
2398
2399/*
2400 * This is the generic map function. It maps one 4kb page at paddr to
2401 * the given address in the DMA address space for the domain.
2402 */
2403static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
2404				     unsigned long address,
2405				     phys_addr_t paddr,
2406				     int direction)
2407{
2408	u64 *pte, __pte;
2409
2410	WARN_ON(address > dom->aperture_size);
2411
2412	paddr &= PAGE_MASK;
2413
2414	pte  = dma_ops_get_pte(dom, address);
2415	if (!pte)
2416		return DMA_ERROR_CODE;
2417
2418	__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
2419
2420	if (direction == DMA_TO_DEVICE)
2421		__pte |= IOMMU_PTE_IR;
2422	else if (direction == DMA_FROM_DEVICE)
2423		__pte |= IOMMU_PTE_IW;
2424	else if (direction == DMA_BIDIRECTIONAL)
2425		__pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
2426
2427	WARN_ON(*pte);
2428
2429	*pte = __pte;
2430
2431	return (dma_addr_t)address;
2432}
2433
2434/*
2435 * The generic unmapping function for on page in the DMA address space.
2436 */
2437static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
2438				 unsigned long address)
2439{
2440	struct aperture_range *aperture;
2441	u64 *pte;
2442
2443	if (address >= dom->aperture_size)
2444		return;
2445
2446	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2447	if (!aperture)
2448		return;
2449
2450	pte  = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2451	if (!pte)
2452		return;
2453
2454	pte += PM_LEVEL_INDEX(0, address);
2455
2456	WARN_ON(!*pte);
2457
2458	*pte = 0ULL;
2459}
2460
2461/*
2462 * This function contains common code for mapping of a physically
2463 * contiguous memory region into DMA address space. It is used by all
2464 * mapping functions provided with this IOMMU driver.
2465 * Must be called with the domain lock held.
2466 */
2467static dma_addr_t __map_single(struct device *dev,
2468			       struct dma_ops_domain *dma_dom,
2469			       phys_addr_t paddr,
2470			       size_t size,
2471			       int dir,
2472			       bool align,
2473			       u64 dma_mask)
2474{
2475	dma_addr_t offset = paddr & ~PAGE_MASK;
2476	dma_addr_t address, start, ret;
2477	unsigned int pages;
2478	unsigned long align_mask = 0;
2479	int i;
2480
2481	pages = iommu_num_pages(paddr, size, PAGE_SIZE);
2482	paddr &= PAGE_MASK;
2483
2484	INC_STATS_COUNTER(total_map_requests);
2485
2486	if (pages > 1)
2487		INC_STATS_COUNTER(cross_page);
2488
2489	if (align)
2490		align_mask = (1UL << get_order(size)) - 1;
2491
2492retry:
2493	address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
2494					  dma_mask);
2495	if (unlikely(address == DMA_ERROR_CODE)) {
2496		/*
2497		 * setting next_address here will let the address
2498		 * allocator only scan the new allocated range in the
2499		 * first run. This is a small optimization.
2500		 */
2501		dma_dom->next_address = dma_dom->aperture_size;
2502
2503		if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
2504			goto out;
2505
2506		/*
2507		 * aperture was successfully enlarged by 128 MB, try
2508		 * allocation again
2509		 */
2510		goto retry;
2511	}
2512
2513	start = address;
2514	for (i = 0; i < pages; ++i) {
2515		ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
2516		if (ret == DMA_ERROR_CODE)
2517			goto out_unmap;
2518
2519		paddr += PAGE_SIZE;
2520		start += PAGE_SIZE;
2521	}
2522	address += offset;
2523
2524	ADD_STATS_COUNTER(alloced_io_mem, size);
2525
2526	if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
2527		domain_flush_tlb(&dma_dom->domain);
2528		dma_dom->need_flush = false;
2529	} else if (unlikely(amd_iommu_np_cache))
2530		domain_flush_pages(&dma_dom->domain, address, size);
 
 
2531
2532out:
2533	return address;
2534
2535out_unmap:
2536
2537	for (--i; i >= 0; --i) {
2538		start -= PAGE_SIZE;
2539		dma_ops_domain_unmap(dma_dom, start);
2540	}
2541
2542	dma_ops_free_addresses(dma_dom, address, pages);
2543
2544	return DMA_ERROR_CODE;
2545}
2546
2547/*
2548 * Does the reverse of the __map_single function. Must be called with
2549 * the domain lock held too
2550 */
2551static void __unmap_single(struct dma_ops_domain *dma_dom,
2552			   dma_addr_t dma_addr,
2553			   size_t size,
2554			   int dir)
2555{
2556	dma_addr_t flush_addr;
2557	dma_addr_t i, start;
2558	unsigned int pages;
2559
2560	if ((dma_addr == DMA_ERROR_CODE) ||
2561	    (dma_addr + size > dma_dom->aperture_size))
2562		return;
2563
2564	flush_addr = dma_addr;
2565	pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
2566	dma_addr &= PAGE_MASK;
2567	start = dma_addr;
2568
2569	for (i = 0; i < pages; ++i) {
2570		dma_ops_domain_unmap(dma_dom, start);
2571		start += PAGE_SIZE;
2572	}
2573
2574	SUB_STATS_COUNTER(alloced_io_mem, size);
2575
2576	dma_ops_free_addresses(dma_dom, dma_addr, pages);
2577
2578	if (amd_iommu_unmap_flush || dma_dom->need_flush) {
2579		domain_flush_pages(&dma_dom->domain, flush_addr, size);
2580		dma_dom->need_flush = false;
2581	}
2582}
2583
2584/*
2585 * The exported map_single function for dma_ops.
2586 */
2587static dma_addr_t map_page(struct device *dev, struct page *page,
2588			   unsigned long offset, size_t size,
2589			   enum dma_data_direction dir,
2590			   struct dma_attrs *attrs)
2591{
2592	unsigned long flags;
2593	struct protection_domain *domain;
2594	dma_addr_t addr;
2595	u64 dma_mask;
2596	phys_addr_t paddr = page_to_phys(page) + offset;
2597
2598	INC_STATS_COUNTER(cnt_map_single);
2599
2600	domain = get_domain(dev);
2601	if (PTR_ERR(domain) == -EINVAL)
2602		return (dma_addr_t)paddr;
2603	else if (IS_ERR(domain))
2604		return DMA_ERROR_CODE;
2605
2606	dma_mask = *dev->dma_mask;
2607
2608	spin_lock_irqsave(&domain->lock, flags);
2609
2610	addr = __map_single(dev, domain->priv, paddr, size, dir, false,
2611			    dma_mask);
2612	if (addr == DMA_ERROR_CODE)
2613		goto out;
2614
2615	domain_flush_complete(domain);
2616
2617out:
2618	spin_unlock_irqrestore(&domain->lock, flags);
2619
2620	return addr;
2621}
2622
2623/*
2624 * The exported unmap_single function for dma_ops.
2625 */
2626static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2627		       enum dma_data_direction dir, struct dma_attrs *attrs)
2628{
2629	unsigned long flags;
2630	struct protection_domain *domain;
2631
2632	INC_STATS_COUNTER(cnt_unmap_single);
2633
2634	domain = get_domain(dev);
2635	if (IS_ERR(domain))
2636		return;
2637
2638	spin_lock_irqsave(&domain->lock, flags);
2639
2640	__unmap_single(domain->priv, dma_addr, size, dir);
2641
2642	domain_flush_complete(domain);
2643
2644	spin_unlock_irqrestore(&domain->lock, flags);
2645}
2646
2647/*
2648 * This is a special map_sg function which is used if we should map a
2649 * device which is not handled by an AMD IOMMU in the system.
2650 */
2651static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
2652			   int nelems, int dir)
2653{
2654	struct scatterlist *s;
2655	int i;
2656
2657	for_each_sg(sglist, s, nelems, i) {
2658		s->dma_address = (dma_addr_t)sg_phys(s);
2659		s->dma_length  = s->length;
2660	}
2661
2662	return nelems;
2663}
2664
2665/*
2666 * The exported map_sg function for dma_ops (handles scatter-gather
2667 * lists).
2668 */
2669static int map_sg(struct device *dev, struct scatterlist *sglist,
2670		  int nelems, enum dma_data_direction dir,
2671		  struct dma_attrs *attrs)
2672{
2673	unsigned long flags;
2674	struct protection_domain *domain;
2675	int i;
2676	struct scatterlist *s;
2677	phys_addr_t paddr;
2678	int mapped_elems = 0;
2679	u64 dma_mask;
2680
2681	INC_STATS_COUNTER(cnt_map_sg);
2682
2683	domain = get_domain(dev);
2684	if (PTR_ERR(domain) == -EINVAL)
2685		return map_sg_no_iommu(dev, sglist, nelems, dir);
2686	else if (IS_ERR(domain))
2687		return 0;
2688
2689	dma_mask = *dev->dma_mask;
2690
2691	spin_lock_irqsave(&domain->lock, flags);
2692
2693	for_each_sg(sglist, s, nelems, i) {
2694		paddr = sg_phys(s);
2695
2696		s->dma_address = __map_single(dev, domain->priv,
2697					      paddr, s->length, dir, false,
2698					      dma_mask);
2699
2700		if (s->dma_address) {
2701			s->dma_length = s->length;
2702			mapped_elems++;
2703		} else
2704			goto unmap;
2705	}
2706
2707	domain_flush_complete(domain);
2708
2709out:
2710	spin_unlock_irqrestore(&domain->lock, flags);
2711
2712	return mapped_elems;
2713unmap:
2714	for_each_sg(sglist, s, mapped_elems, i) {
2715		if (s->dma_address)
2716			__unmap_single(domain->priv, s->dma_address,
2717				       s->dma_length, dir);
2718		s->dma_address = s->dma_length = 0;
2719	}
2720
2721	mapped_elems = 0;
2722
2723	goto out;
2724}
2725
2726/*
2727 * The exported map_sg function for dma_ops (handles scatter-gather
2728 * lists).
2729 */
2730static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2731		     int nelems, enum dma_data_direction dir,
2732		     struct dma_attrs *attrs)
2733{
2734	unsigned long flags;
2735	struct protection_domain *domain;
2736	struct scatterlist *s;
2737	int i;
2738
2739	INC_STATS_COUNTER(cnt_unmap_sg);
2740
2741	domain = get_domain(dev);
2742	if (IS_ERR(domain))
2743		return;
2744
2745	spin_lock_irqsave(&domain->lock, flags);
2746
2747	for_each_sg(sglist, s, nelems, i) {
2748		__unmap_single(domain->priv, s->dma_address,
2749			       s->dma_length, dir);
2750		s->dma_address = s->dma_length = 0;
2751	}
2752
2753	domain_flush_complete(domain);
2754
2755	spin_unlock_irqrestore(&domain->lock, flags);
2756}
2757
2758/*
2759 * The exported alloc_coherent function for dma_ops.
2760 */
2761static void *alloc_coherent(struct device *dev, size_t size,
2762			    dma_addr_t *dma_addr, gfp_t flag,
2763			    struct dma_attrs *attrs)
2764{
2765	unsigned long flags;
2766	void *virt_addr;
2767	struct protection_domain *domain;
2768	phys_addr_t paddr;
2769	u64 dma_mask = dev->coherent_dma_mask;
2770
2771	INC_STATS_COUNTER(cnt_alloc_coherent);
2772
2773	domain = get_domain(dev);
2774	if (PTR_ERR(domain) == -EINVAL) {
2775		virt_addr = (void *)__get_free_pages(flag, get_order(size));
2776		*dma_addr = __pa(virt_addr);
2777		return virt_addr;
2778	} else if (IS_ERR(domain))
2779		return NULL;
2780
 
2781	dma_mask  = dev->coherent_dma_mask;
2782	flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2783	flag     |= __GFP_ZERO;
2784
2785	virt_addr = (void *)__get_free_pages(flag, get_order(size));
2786	if (!virt_addr)
2787		return NULL;
 
2788
2789	paddr = virt_to_phys(virt_addr);
 
 
 
 
2790
2791	if (!dma_mask)
2792		dma_mask = *dev->dma_mask;
2793
2794	spin_lock_irqsave(&domain->lock, flags);
2795
2796	*dma_addr = __map_single(dev, domain->priv, paddr,
2797				 size, DMA_BIDIRECTIONAL, true, dma_mask);
2798
2799	if (*dma_addr == DMA_ERROR_CODE) {
2800		spin_unlock_irqrestore(&domain->lock, flags);
2801		goto out_free;
2802	}
2803
2804	domain_flush_complete(domain);
2805
2806	spin_unlock_irqrestore(&domain->lock, flags);
2807
2808	return virt_addr;
2809
2810out_free:
2811
2812	free_pages((unsigned long)virt_addr, get_order(size));
 
2813
2814	return NULL;
2815}
2816
2817/*
2818 * The exported free_coherent function for dma_ops.
2819 */
2820static void free_coherent(struct device *dev, size_t size,
2821			  void *virt_addr, dma_addr_t dma_addr,
2822			  struct dma_attrs *attrs)
2823{
2824	unsigned long flags;
2825	struct protection_domain *domain;
 
2826
2827	INC_STATS_COUNTER(cnt_free_coherent);
2828
 
 
 
2829	domain = get_domain(dev);
2830	if (IS_ERR(domain))
2831		goto free_mem;
2832
2833	spin_lock_irqsave(&domain->lock, flags);
2834
2835	__unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
2836
2837	domain_flush_complete(domain);
2838
2839	spin_unlock_irqrestore(&domain->lock, flags);
2840
2841free_mem:
2842	free_pages((unsigned long)virt_addr, get_order(size));
 
2843}
2844
2845/*
2846 * This function is called by the DMA layer to find out if we can handle a
2847 * particular device. It is part of the dma_ops.
2848 */
2849static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2850{
2851	return check_device(dev);
2852}
2853
2854/*
2855 * The function for pre-allocating protection domains.
2856 *
2857 * If the driver core informs the DMA layer if a driver grabs a device
2858 * we don't need to preallocate the protection domains anymore.
2859 * For now we have to.
2860 */
2861static void __init prealloc_protection_domains(void)
2862{
2863	struct iommu_dev_data *dev_data;
2864	struct dma_ops_domain *dma_dom;
2865	struct pci_dev *dev = NULL;
2866	u16 devid;
2867
2868	for_each_pci_dev(dev) {
 
 
2869
2870		/* Do we handle this device? */
2871		if (!check_device(&dev->dev))
2872			continue;
 
2873
2874		dev_data = get_dev_data(&dev->dev);
2875		if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
2876			/* Make sure passthrough domain is allocated */
2877			alloc_passthrough_domain();
2878			dev_data->passthrough = true;
2879			attach_device(&dev->dev, pt_domain);
2880			pr_info("AMD-Vi: Using passthough domain for device %s\n",
2881				dev_name(&dev->dev));
2882		}
2883
2884		/* Is there already any domain for it? */
2885		if (domain_for_device(&dev->dev))
2886			continue;
2887
2888		devid = get_device_id(&dev->dev);
2889
2890		dma_dom = dma_ops_domain_alloc();
2891		if (!dma_dom)
2892			continue;
2893		init_unity_mappings_for_device(dma_dom, devid);
2894		dma_dom->target_dev = devid;
2895
2896		attach_device(&dev->dev, &dma_dom->domain);
2897
2898		list_add_tail(&dma_dom->list, &iommu_pd_list);
2899	}
2900}
2901
2902static struct dma_map_ops amd_iommu_dma_ops = {
2903	.alloc = alloc_coherent,
2904	.free = free_coherent,
2905	.map_page = map_page,
2906	.unmap_page = unmap_page,
2907	.map_sg = map_sg,
2908	.unmap_sg = unmap_sg,
2909	.dma_supported = amd_iommu_dma_supported,
 
2910};
2911
2912static unsigned device_dma_ops_init(void)
2913{
2914	struct iommu_dev_data *dev_data;
2915	struct pci_dev *pdev = NULL;
2916	unsigned unhandled = 0;
2917
2918	for_each_pci_dev(pdev) {
2919		if (!check_device(&pdev->dev)) {
2920
2921			iommu_ignore_device(&pdev->dev);
2922
2923			unhandled += 1;
2924			continue;
2925		}
2926
2927		dev_data = get_dev_data(&pdev->dev);
2928
2929		if (!dev_data->passthrough)
2930			pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
2931		else
2932			pdev->dev.archdata.dma_ops = &nommu_dma_ops;
2933	}
2934
2935	return unhandled;
2936}
2937
2938/*
2939 * The function which clues the AMD IOMMU driver into dma_ops.
2940 */
2941
2942void __init amd_iommu_init_api(void)
2943{
2944	bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
2945}
2946
2947int __init amd_iommu_init_dma_ops(void)
2948{
2949	struct amd_iommu *iommu;
2950	int ret, unhandled;
2951
2952	/*
2953	 * first allocate a default protection domain for every IOMMU we
2954	 * found in the system. Devices not assigned to any other
2955	 * protection domain will be assigned to the default one.
 
2956	 */
2957	for_each_iommu(iommu) {
2958		iommu->default_dom = dma_ops_domain_alloc();
2959		if (iommu->default_dom == NULL)
2960			return -ENOMEM;
2961		iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
2962		ret = iommu_init_unity_mappings(iommu);
2963		if (ret)
2964			goto free_domains;
2965	}
2966
2967	/*
2968	 * Pre-allocate the protection domains for each device.
2969	 */
2970	prealloc_protection_domains();
2971
2972	iommu_detected = 1;
2973	swiotlb = 0;
2974
2975	/* Make the driver finally visible to the drivers */
2976	unhandled = device_dma_ops_init();
2977	if (unhandled && max_pfn > MAX_DMA32_PFN) {
2978		/* There are unhandled devices - initialize swiotlb for them */
2979		swiotlb = 1;
2980	}
2981
2982	amd_iommu_stats_init();
2983
2984	return 0;
2985
2986free_domains:
2987
2988	for_each_iommu(iommu) {
2989		if (iommu->default_dom)
2990			dma_ops_domain_free(iommu->default_dom);
2991	}
2992
2993	return ret;
2994}
2995
2996/*****************************************************************************
2997 *
2998 * The following functions belong to the exported interface of AMD IOMMU
2999 *
3000 * This interface allows access to lower level functions of the IOMMU
3001 * like protection domain handling and assignement of devices to domains
3002 * which is not possible with the dma_ops interface.
3003 *
3004 *****************************************************************************/
3005
3006static void cleanup_domain(struct protection_domain *domain)
3007{
3008	struct iommu_dev_data *dev_data, *next;
3009	unsigned long flags;
3010
3011	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3012
3013	list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
3014		__detach_device(dev_data);
3015		atomic_set(&dev_data->bind, 0);
 
3016	}
3017
3018	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3019}
3020
3021static void protection_domain_free(struct protection_domain *domain)
3022{
3023	if (!domain)
3024		return;
3025
3026	del_domain_from_list(domain);
3027
3028	if (domain->id)
3029		domain_id_free(domain->id);
3030
3031	kfree(domain);
3032}
3033
 
 
 
 
 
 
 
 
 
 
 
 
3034static struct protection_domain *protection_domain_alloc(void)
3035{
3036	struct protection_domain *domain;
3037
3038	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
3039	if (!domain)
3040		return NULL;
3041
3042	spin_lock_init(&domain->lock);
3043	mutex_init(&domain->api_lock);
3044	domain->id = domain_id_alloc();
3045	if (!domain->id)
3046		goto out_err;
3047	INIT_LIST_HEAD(&domain->dev_list);
3048
3049	add_domain_to_list(domain);
3050
3051	return domain;
3052
3053out_err:
3054	kfree(domain);
3055
3056	return NULL;
3057}
3058
3059static int __init alloc_passthrough_domain(void)
3060{
3061	if (pt_domain != NULL)
3062		return 0;
3063
3064	/* allocate passthrough domain */
3065	pt_domain = protection_domain_alloc();
3066	if (!pt_domain)
3067		return -ENOMEM;
 
3068
3069	pt_domain->mode = PAGE_MODE_NONE;
 
 
 
 
 
3070
3071	return 0;
3072}
3073static int amd_iommu_domain_init(struct iommu_domain *dom)
3074{
3075	struct protection_domain *domain;
3076
3077	domain = protection_domain_alloc();
3078	if (!domain)
3079		goto out_free;
 
 
 
 
 
 
 
 
 
 
3080
3081	domain->mode    = PAGE_MODE_3_LEVEL;
3082	domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
3083	if (!domain->pt_root)
3084		goto out_free;
3085
3086	domain->iommu_domain = dom;
3087
3088	dom->priv = domain;
3089
3090	return 0;
3091
3092out_free:
3093	protection_domain_free(domain);
3094
3095	return -ENOMEM;
3096}
3097
3098static void amd_iommu_domain_destroy(struct iommu_domain *dom)
3099{
3100	struct protection_domain *domain = dom->priv;
3101
3102	if (!domain)
3103		return;
3104
 
 
3105	if (domain->dev_cnt > 0)
3106		cleanup_domain(domain);
3107
3108	BUG_ON(domain->dev_cnt != 0);
3109
3110	if (domain->mode != PAGE_MODE_NONE)
3111		free_pagetable(domain);
3112
3113	if (domain->flags & PD_IOMMUV2_MASK)
3114		free_gcr3_table(domain);
3115
3116	protection_domain_free(domain);
3117
3118	dom->priv = NULL;
3119}
3120
3121static void amd_iommu_detach_device(struct iommu_domain *dom,
3122				    struct device *dev)
3123{
3124	struct iommu_dev_data *dev_data = dev->archdata.iommu;
3125	struct amd_iommu *iommu;
3126	u16 devid;
3127
3128	if (!check_device(dev))
3129		return;
3130
3131	devid = get_device_id(dev);
3132
3133	if (dev_data->domain != NULL)
3134		detach_device(dev);
3135
3136	iommu = amd_iommu_rlookup_table[devid];
3137	if (!iommu)
3138		return;
3139
3140	iommu_completion_wait(iommu);
3141}
3142
3143static int amd_iommu_attach_device(struct iommu_domain *dom,
3144				   struct device *dev)
3145{
3146	struct protection_domain *domain = dom->priv;
3147	struct iommu_dev_data *dev_data;
3148	struct amd_iommu *iommu;
3149	int ret;
3150
3151	if (!check_device(dev))
3152		return -EINVAL;
3153
3154	dev_data = dev->archdata.iommu;
3155
3156	iommu = amd_iommu_rlookup_table[dev_data->devid];
3157	if (!iommu)
3158		return -EINVAL;
3159
3160	if (dev_data->domain)
3161		detach_device(dev);
3162
3163	ret = attach_device(dev, domain);
3164
3165	iommu_completion_wait(iommu);
3166
3167	return ret;
3168}
3169
3170static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3171			 phys_addr_t paddr, size_t page_size, int iommu_prot)
3172{
3173	struct protection_domain *domain = dom->priv;
3174	int prot = 0;
3175	int ret;
3176
3177	if (domain->mode == PAGE_MODE_NONE)
3178		return -EINVAL;
3179
3180	if (iommu_prot & IOMMU_READ)
3181		prot |= IOMMU_PROT_IR;
3182	if (iommu_prot & IOMMU_WRITE)
3183		prot |= IOMMU_PROT_IW;
3184
3185	mutex_lock(&domain->api_lock);
3186	ret = iommu_map_page(domain, iova, paddr, prot, page_size);
3187	mutex_unlock(&domain->api_lock);
3188
3189	return ret;
3190}
3191
3192static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3193			   size_t page_size)
3194{
3195	struct protection_domain *domain = dom->priv;
3196	size_t unmap_size;
3197
3198	if (domain->mode == PAGE_MODE_NONE)
3199		return -EINVAL;
3200
3201	mutex_lock(&domain->api_lock);
3202	unmap_size = iommu_unmap_page(domain, iova, page_size);
3203	mutex_unlock(&domain->api_lock);
3204
3205	domain_flush_tlb_pde(domain);
3206
3207	return unmap_size;
3208}
3209
3210static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3211					  unsigned long iova)
3212{
3213	struct protection_domain *domain = dom->priv;
3214	unsigned long offset_mask;
3215	phys_addr_t paddr;
3216	u64 *pte, __pte;
3217
3218	if (domain->mode == PAGE_MODE_NONE)
3219		return iova;
3220
3221	pte = fetch_pte(domain, iova);
3222
3223	if (!pte || !IOMMU_PTE_PRESENT(*pte))
3224		return 0;
3225
3226	if (PM_PTE_LEVEL(*pte) == 0)
3227		offset_mask = PAGE_SIZE - 1;
3228	else
3229		offset_mask = PTE_PAGE_SIZE(*pte) - 1;
3230
3231	__pte = *pte & PM_ADDR_MASK;
3232	paddr = (__pte & ~offset_mask) | (iova & offset_mask);
3233
3234	return paddr;
3235}
3236
3237static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
3238				    unsigned long cap)
3239{
3240	switch (cap) {
3241	case IOMMU_CAP_CACHE_COHERENCY:
3242		return 1;
 
 
 
 
3243	}
3244
3245	return 0;
3246}
3247
3248static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
 
3249{
3250	struct iommu_dev_data *dev_data = dev->archdata.iommu;
3251	struct pci_dev *pdev = to_pci_dev(dev);
3252	u16 devid;
3253
3254	if (!dev_data)
3255		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3256
3257	if (pdev->is_virtfn || !iommu_group_mf)
3258		devid = dev_data->devid;
3259	else
3260		devid = calc_devid(pdev->bus->number,
3261				   PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
3262
3263	*groupid = amd_iommu_alias_table[devid];
 
 
 
3264
3265	return 0;
 
3266}
3267
3268static struct iommu_ops amd_iommu_ops = {
3269	.domain_init = amd_iommu_domain_init,
3270	.domain_destroy = amd_iommu_domain_destroy,
 
3271	.attach_dev = amd_iommu_attach_device,
3272	.detach_dev = amd_iommu_detach_device,
3273	.map = amd_iommu_map,
3274	.unmap = amd_iommu_unmap,
 
3275	.iova_to_phys = amd_iommu_iova_to_phys,
3276	.domain_has_cap = amd_iommu_domain_has_cap,
3277	.device_group = amd_iommu_device_group,
 
 
 
3278	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
3279};
3280
3281/*****************************************************************************
3282 *
3283 * The next functions do a basic initialization of IOMMU for pass through
3284 * mode
3285 *
3286 * In passthrough mode the IOMMU is initialized and enabled but not used for
3287 * DMA-API translation.
3288 *
3289 *****************************************************************************/
3290
3291int __init amd_iommu_init_passthrough(void)
3292{
3293	struct iommu_dev_data *dev_data;
3294	struct pci_dev *dev = NULL;
3295	struct amd_iommu *iommu;
3296	u16 devid;
3297	int ret;
3298
3299	ret = alloc_passthrough_domain();
3300	if (ret)
3301		return ret;
3302
3303	for_each_pci_dev(dev) {
3304		if (!check_device(&dev->dev))
3305			continue;
3306
3307		dev_data = get_dev_data(&dev->dev);
3308		dev_data->passthrough = true;
3309
3310		devid = get_device_id(&dev->dev);
3311
3312		iommu = amd_iommu_rlookup_table[devid];
3313		if (!iommu)
3314			continue;
3315
3316		attach_device(&dev->dev, pt_domain);
3317	}
3318
3319	amd_iommu_stats_init();
3320
3321	pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3322
3323	return 0;
3324}
3325
3326/* IOMMUv2 specific functions */
3327int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3328{
3329	return atomic_notifier_chain_register(&ppr_notifier, nb);
3330}
3331EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3332
3333int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3334{
3335	return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3336}
3337EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
3338
3339void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3340{
3341	struct protection_domain *domain = dom->priv;
3342	unsigned long flags;
3343
3344	spin_lock_irqsave(&domain->lock, flags);
3345
3346	/* Update data structure */
3347	domain->mode    = PAGE_MODE_NONE;
3348	domain->updated = true;
3349
3350	/* Make changes visible to IOMMUs */
3351	update_domain(domain);
3352
3353	/* Page-table is not visible to IOMMU anymore, so free it */
3354	free_pagetable(domain);
3355
3356	spin_unlock_irqrestore(&domain->lock, flags);
3357}
3358EXPORT_SYMBOL(amd_iommu_domain_direct_map);
3359
3360int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3361{
3362	struct protection_domain *domain = dom->priv;
3363	unsigned long flags;
3364	int levels, ret;
3365
3366	if (pasids <= 0 || pasids > (PASID_MASK + 1))
3367		return -EINVAL;
3368
3369	/* Number of GCR3 table levels required */
3370	for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3371		levels += 1;
3372
3373	if (levels > amd_iommu_max_glx_val)
3374		return -EINVAL;
3375
3376	spin_lock_irqsave(&domain->lock, flags);
3377
3378	/*
3379	 * Save us all sanity checks whether devices already in the
3380	 * domain support IOMMUv2. Just force that the domain has no
3381	 * devices attached when it is switched into IOMMUv2 mode.
3382	 */
3383	ret = -EBUSY;
3384	if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3385		goto out;
3386
3387	ret = -ENOMEM;
3388	domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3389	if (domain->gcr3_tbl == NULL)
3390		goto out;
3391
3392	domain->glx      = levels;
3393	domain->flags   |= PD_IOMMUV2_MASK;
3394	domain->updated  = true;
3395
3396	update_domain(domain);
3397
3398	ret = 0;
3399
3400out:
3401	spin_unlock_irqrestore(&domain->lock, flags);
3402
3403	return ret;
3404}
3405EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
3406
3407static int __flush_pasid(struct protection_domain *domain, int pasid,
3408			 u64 address, bool size)
3409{
3410	struct iommu_dev_data *dev_data;
3411	struct iommu_cmd cmd;
3412	int i, ret;
3413
3414	if (!(domain->flags & PD_IOMMUV2_MASK))
3415		return -EINVAL;
3416
3417	build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3418
3419	/*
3420	 * IOMMU TLB needs to be flushed before Device TLB to
3421	 * prevent device TLB refill from IOMMU TLB
3422	 */
3423	for (i = 0; i < amd_iommus_present; ++i) {
3424		if (domain->dev_iommu[i] == 0)
3425			continue;
3426
3427		ret = iommu_queue_command(amd_iommus[i], &cmd);
3428		if (ret != 0)
3429			goto out;
3430	}
3431
3432	/* Wait until IOMMU TLB flushes are complete */
3433	domain_flush_complete(domain);
3434
3435	/* Now flush device TLBs */
3436	list_for_each_entry(dev_data, &domain->dev_list, list) {
3437		struct amd_iommu *iommu;
3438		int qdep;
3439
3440		BUG_ON(!dev_data->ats.enabled);
 
 
 
 
 
3441
3442		qdep  = dev_data->ats.qdep;
3443		iommu = amd_iommu_rlookup_table[dev_data->devid];
3444
3445		build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3446				      qdep, address, size);
3447
3448		ret = iommu_queue_command(iommu, &cmd);
3449		if (ret != 0)
3450			goto out;
3451	}
3452
3453	/* Wait until all device TLBs are flushed */
3454	domain_flush_complete(domain);
3455
3456	ret = 0;
3457
3458out:
3459
3460	return ret;
3461}
3462
3463static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3464				  u64 address)
3465{
3466	INC_STATS_COUNTER(invalidate_iotlb);
3467
3468	return __flush_pasid(domain, pasid, address, false);
3469}
3470
3471int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3472			 u64 address)
3473{
3474	struct protection_domain *domain = dom->priv;
3475	unsigned long flags;
3476	int ret;
3477
3478	spin_lock_irqsave(&domain->lock, flags);
3479	ret = __amd_iommu_flush_page(domain, pasid, address);
3480	spin_unlock_irqrestore(&domain->lock, flags);
3481
3482	return ret;
3483}
3484EXPORT_SYMBOL(amd_iommu_flush_page);
3485
3486static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3487{
3488	INC_STATS_COUNTER(invalidate_iotlb_all);
3489
3490	return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3491			     true);
3492}
3493
3494int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3495{
3496	struct protection_domain *domain = dom->priv;
3497	unsigned long flags;
3498	int ret;
3499
3500	spin_lock_irqsave(&domain->lock, flags);
3501	ret = __amd_iommu_flush_tlb(domain, pasid);
3502	spin_unlock_irqrestore(&domain->lock, flags);
3503
3504	return ret;
3505}
3506EXPORT_SYMBOL(amd_iommu_flush_tlb);
3507
3508static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
3509{
3510	int index;
3511	u64 *pte;
3512
3513	while (true) {
3514
3515		index = (pasid >> (9 * level)) & 0x1ff;
3516		pte   = &root[index];
3517
3518		if (level == 0)
3519			break;
3520
3521		if (!(*pte & GCR3_VALID)) {
3522			if (!alloc)
3523				return NULL;
3524
3525			root = (void *)get_zeroed_page(GFP_ATOMIC);
3526			if (root == NULL)
3527				return NULL;
3528
3529			*pte = __pa(root) | GCR3_VALID;
3530		}
3531
3532		root = __va(*pte & PAGE_MASK);
3533
3534		level -= 1;
3535	}
3536
3537	return pte;
3538}
3539
3540static int __set_gcr3(struct protection_domain *domain, int pasid,
3541		      unsigned long cr3)
3542{
3543	u64 *pte;
3544
3545	if (domain->mode != PAGE_MODE_NONE)
3546		return -EINVAL;
3547
3548	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3549	if (pte == NULL)
3550		return -ENOMEM;
3551
3552	*pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3553
3554	return __amd_iommu_flush_tlb(domain, pasid);
3555}
3556
3557static int __clear_gcr3(struct protection_domain *domain, int pasid)
3558{
3559	u64 *pte;
3560
3561	if (domain->mode != PAGE_MODE_NONE)
3562		return -EINVAL;
3563
3564	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3565	if (pte == NULL)
3566		return 0;
3567
3568	*pte = 0;
3569
3570	return __amd_iommu_flush_tlb(domain, pasid);
3571}
3572
3573int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3574			      unsigned long cr3)
3575{
3576	struct protection_domain *domain = dom->priv;
3577	unsigned long flags;
3578	int ret;
3579
3580	spin_lock_irqsave(&domain->lock, flags);
3581	ret = __set_gcr3(domain, pasid, cr3);
3582	spin_unlock_irqrestore(&domain->lock, flags);
3583
3584	return ret;
3585}
3586EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3587
3588int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3589{
3590	struct protection_domain *domain = dom->priv;
3591	unsigned long flags;
3592	int ret;
3593
3594	spin_lock_irqsave(&domain->lock, flags);
3595	ret = __clear_gcr3(domain, pasid);
3596	spin_unlock_irqrestore(&domain->lock, flags);
3597
3598	return ret;
3599}
3600EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
3601
3602int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3603			   int status, int tag)
3604{
3605	struct iommu_dev_data *dev_data;
3606	struct amd_iommu *iommu;
3607	struct iommu_cmd cmd;
3608
3609	INC_STATS_COUNTER(complete_ppr);
3610
3611	dev_data = get_dev_data(&pdev->dev);
3612	iommu    = amd_iommu_rlookup_table[dev_data->devid];
3613
3614	build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3615			   tag, dev_data->pri_tlp);
3616
3617	return iommu_queue_command(iommu, &cmd);
3618}
3619EXPORT_SYMBOL(amd_iommu_complete_ppr);
3620
3621struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
3622{
3623	struct protection_domain *domain;
3624
3625	domain = get_domain(&pdev->dev);
3626	if (IS_ERR(domain))
3627		return NULL;
3628
3629	/* Only return IOMMUv2 domains */
3630	if (!(domain->flags & PD_IOMMUV2_MASK))
3631		return NULL;
3632
3633	return domain->iommu_domain;
3634}
3635EXPORT_SYMBOL(amd_iommu_get_v2_domain);
3636
3637void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3638{
3639	struct iommu_dev_data *dev_data;
3640
3641	if (!amd_iommu_v2_supported())
3642		return;
3643
3644	dev_data = get_dev_data(&pdev->dev);
3645	dev_data->errata |= (1 << erratum);
3646}
3647EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
3648
3649int amd_iommu_device_info(struct pci_dev *pdev,
3650                          struct amd_iommu_device_info *info)
3651{
3652	int max_pasids;
3653	int pos;
3654
3655	if (pdev == NULL || info == NULL)
3656		return -EINVAL;
3657
3658	if (!amd_iommu_v2_supported())
3659		return -EINVAL;
3660
3661	memset(info, 0, sizeof(*info));
3662
3663	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3664	if (pos)
3665		info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3666
3667	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3668	if (pos)
3669		info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
3670
3671	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
3672	if (pos) {
3673		int features;
3674
3675		max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
3676		max_pasids = min(max_pasids, (1 << 20));
3677
3678		info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
3679		info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
3680
3681		features = pci_pasid_features(pdev);
3682		if (features & PCI_PASID_CAP_EXEC)
3683			info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
3684		if (features & PCI_PASID_CAP_PRIV)
3685			info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
3686	}
3687
3688	return 0;
3689}
3690EXPORT_SYMBOL(amd_iommu_device_info);