Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1/*
   2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/pci.h>
  21#include <linux/acpi.h>
  22#include <linux/list.h>
  23#include <linux/bitmap.h>
  24#include <linux/slab.h>
  25#include <linux/syscore_ops.h>
  26#include <linux/interrupt.h>
  27#include <linux/msi.h>
  28#include <linux/amd-iommu.h>
  29#include <linux/export.h>
  30#include <linux/iommu.h>
  31#include <linux/kmemleak.h>
  32#include <asm/pci-direct.h>
  33#include <asm/iommu.h>
  34#include <asm/gart.h>
  35#include <asm/x86_init.h>
  36#include <asm/iommu_table.h>
  37#include <asm/io_apic.h>
  38#include <asm/irq_remapping.h>
  39
  40#include "amd_iommu_proto.h"
  41#include "amd_iommu_types.h"
  42#include "irq_remapping.h"
  43
  44/*
  45 * definitions for the ACPI scanning code
  46 */
  47#define IVRS_HEADER_LENGTH 48
  48
  49#define ACPI_IVHD_TYPE_MAX_SUPPORTED	0x40
  50#define ACPI_IVMD_TYPE_ALL              0x20
  51#define ACPI_IVMD_TYPE                  0x21
  52#define ACPI_IVMD_TYPE_RANGE            0x22
  53
  54#define IVHD_DEV_ALL                    0x01
  55#define IVHD_DEV_SELECT                 0x02
  56#define IVHD_DEV_SELECT_RANGE_START     0x03
  57#define IVHD_DEV_RANGE_END              0x04
  58#define IVHD_DEV_ALIAS                  0x42
  59#define IVHD_DEV_ALIAS_RANGE            0x43
  60#define IVHD_DEV_EXT_SELECT             0x46
  61#define IVHD_DEV_EXT_SELECT_RANGE       0x47
  62#define IVHD_DEV_SPECIAL		0x48
  63#define IVHD_DEV_ACPI_HID		0xf0
  64
  65#define UID_NOT_PRESENT                 0
  66#define UID_IS_INTEGER                  1
  67#define UID_IS_CHARACTER                2
  68
  69#define IVHD_SPECIAL_IOAPIC		1
  70#define IVHD_SPECIAL_HPET		2
  71
  72#define IVHD_FLAG_HT_TUN_EN_MASK        0x01
  73#define IVHD_FLAG_PASSPW_EN_MASK        0x02
  74#define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
  75#define IVHD_FLAG_ISOC_EN_MASK          0x08
  76
  77#define IVMD_FLAG_EXCL_RANGE            0x08
  78#define IVMD_FLAG_UNITY_MAP             0x01
  79
  80#define ACPI_DEVFLAG_INITPASS           0x01
  81#define ACPI_DEVFLAG_EXTINT             0x02
  82#define ACPI_DEVFLAG_NMI                0x04
  83#define ACPI_DEVFLAG_SYSMGT1            0x10
  84#define ACPI_DEVFLAG_SYSMGT2            0x20
  85#define ACPI_DEVFLAG_LINT0              0x40
  86#define ACPI_DEVFLAG_LINT1              0x80
  87#define ACPI_DEVFLAG_ATSDIS             0x10000000
  88
  89#define LOOP_TIMEOUT	100000
  90/*
  91 * ACPI table definitions
  92 *
  93 * These data structures are laid over the table to parse the important values
  94 * out of it.
  95 */
  96
  97/*
  98 * structure describing one IOMMU in the ACPI table. Typically followed by one
  99 * or more ivhd_entrys.
 100 */
 101struct ivhd_header {
 102	u8 type;
 103	u8 flags;
 104	u16 length;
 105	u16 devid;
 106	u16 cap_ptr;
 107	u64 mmio_phys;
 108	u16 pci_seg;
 109	u16 info;
 110	u32 efr_attr;
 111
 112	/* Following only valid on IVHD type 11h and 40h */
 113	u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
 114	u64 res;
 115} __attribute__((packed));
 116
 117/*
 118 * A device entry describing which devices a specific IOMMU translates and
 119 * which requestor ids they use.
 120 */
 121struct ivhd_entry {
 122	u8 type;
 123	u16 devid;
 124	u8 flags;
 125	u32 ext;
 126	u32 hidh;
 127	u64 cid;
 128	u8 uidf;
 129	u8 uidl;
 130	u8 uid;
 131} __attribute__((packed));
 132
 133/*
 134 * An AMD IOMMU memory definition structure. It defines things like exclusion
 135 * ranges for devices and regions that should be unity mapped.
 136 */
 137struct ivmd_header {
 138	u8 type;
 139	u8 flags;
 140	u16 length;
 141	u16 devid;
 142	u16 aux;
 143	u64 resv;
 144	u64 range_start;
 145	u64 range_length;
 146} __attribute__((packed));
 147
 148bool amd_iommu_dump;
 149bool amd_iommu_irq_remap __read_mostly;
 150
 151int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
 152
 153static bool amd_iommu_detected;
 154static bool __initdata amd_iommu_disabled;
 155static int amd_iommu_target_ivhd_type;
 156
 157u16 amd_iommu_last_bdf;			/* largest PCI device id we have
 158					   to handle */
 159LIST_HEAD(amd_iommu_unity_map);		/* a list of required unity mappings
 160					   we find in ACPI */
 161bool amd_iommu_unmap_flush;		/* if true, flush on every unmap */
 162
 163LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
 164					   system */
 165
 166/* Array to assign indices to IOMMUs*/
 167struct amd_iommu *amd_iommus[MAX_IOMMUS];
 168int amd_iommus_present;
 169
 170/* IOMMUs have a non-present cache? */
 171bool amd_iommu_np_cache __read_mostly;
 172bool amd_iommu_iotlb_sup __read_mostly = true;
 173
 174u32 amd_iommu_max_pasid __read_mostly = ~0;
 175
 176bool amd_iommu_v2_present __read_mostly;
 177static bool amd_iommu_pc_present __read_mostly;
 178
 179bool amd_iommu_force_isolation __read_mostly;
 180
 181/*
 182 * List of protection domains - used during resume
 183 */
 184LIST_HEAD(amd_iommu_pd_list);
 185spinlock_t amd_iommu_pd_lock;
 186
 187/*
 188 * Pointer to the device table which is shared by all AMD IOMMUs
 189 * it is indexed by the PCI device id or the HT unit id and contains
 190 * information about the domain the device belongs to as well as the
 191 * page table root pointer.
 192 */
 193struct dev_table_entry *amd_iommu_dev_table;
 194
 195/*
 196 * The alias table is a driver specific data structure which contains the
 197 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
 198 * More than one device can share the same requestor id.
 199 */
 200u16 *amd_iommu_alias_table;
 201
 202/*
 203 * The rlookup table is used to find the IOMMU which is responsible
 204 * for a specific device. It is also indexed by the PCI device id.
 205 */
 206struct amd_iommu **amd_iommu_rlookup_table;
 207
 208/*
 209 * This table is used to find the irq remapping table for a given device id
 210 * quickly.
 211 */
 212struct irq_remap_table **irq_lookup_table;
 213
 214/*
 215 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
 216 * to know which ones are already in use.
 217 */
 218unsigned long *amd_iommu_pd_alloc_bitmap;
 219
 220static u32 dev_table_size;	/* size of the device table */
 221static u32 alias_table_size;	/* size of the alias table */
 222static u32 rlookup_table_size;	/* size if the rlookup table */
 223
 224enum iommu_init_state {
 225	IOMMU_START_STATE,
 226	IOMMU_IVRS_DETECTED,
 227	IOMMU_ACPI_FINISHED,
 228	IOMMU_ENABLED,
 229	IOMMU_PCI_INIT,
 230	IOMMU_INTERRUPTS_EN,
 231	IOMMU_DMA_OPS,
 232	IOMMU_INITIALIZED,
 233	IOMMU_NOT_FOUND,
 234	IOMMU_INIT_ERROR,
 235};
 236
 237/* Early ioapic and hpet maps from kernel command line */
 238#define EARLY_MAP_SIZE		4
 239static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
 240static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
 241static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
 242
 243static int __initdata early_ioapic_map_size;
 244static int __initdata early_hpet_map_size;
 245static int __initdata early_acpihid_map_size;
 246
 247static bool __initdata cmdline_maps;
 248
 249static enum iommu_init_state init_state = IOMMU_START_STATE;
 250
 251static int amd_iommu_enable_interrupts(void);
 252static int __init iommu_go_to_state(enum iommu_init_state state);
 253static void init_device_table_dma(void);
 254
 255static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
 256				    u8 bank, u8 cntr, u8 fxn,
 257				    u64 *value, bool is_write);
 258
 259static inline void update_last_devid(u16 devid)
 260{
 261	if (devid > amd_iommu_last_bdf)
 262		amd_iommu_last_bdf = devid;
 263}
 264
 265static inline unsigned long tbl_size(int entry_size)
 266{
 267	unsigned shift = PAGE_SHIFT +
 268			 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
 269
 270	return 1UL << shift;
 271}
 272
 273/* Access to l1 and l2 indexed register spaces */
 274
 275static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
 276{
 277	u32 val;
 278
 279	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
 280	pci_read_config_dword(iommu->dev, 0xfc, &val);
 281	return val;
 282}
 283
 284static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
 285{
 286	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
 287	pci_write_config_dword(iommu->dev, 0xfc, val);
 288	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
 289}
 290
 291static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
 292{
 293	u32 val;
 294
 295	pci_write_config_dword(iommu->dev, 0xf0, address);
 296	pci_read_config_dword(iommu->dev, 0xf4, &val);
 297	return val;
 298}
 299
 300static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
 301{
 302	pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
 303	pci_write_config_dword(iommu->dev, 0xf4, val);
 304}
 305
 306/****************************************************************************
 307 *
 308 * AMD IOMMU MMIO register space handling functions
 309 *
 310 * These functions are used to program the IOMMU device registers in
 311 * MMIO space required for that driver.
 312 *
 313 ****************************************************************************/
 314
 315/*
 316 * This function set the exclusion range in the IOMMU. DMA accesses to the
 317 * exclusion range are passed through untranslated
 318 */
 319static void iommu_set_exclusion_range(struct amd_iommu *iommu)
 320{
 321	u64 start = iommu->exclusion_start & PAGE_MASK;
 322	u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
 323	u64 entry;
 324
 325	if (!iommu->exclusion_start)
 326		return;
 327
 328	entry = start | MMIO_EXCL_ENABLE_MASK;
 329	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
 330			&entry, sizeof(entry));
 331
 332	entry = limit;
 333	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
 334			&entry, sizeof(entry));
 335}
 336
 337/* Programs the physical address of the device table into the IOMMU hardware */
 338static void iommu_set_device_table(struct amd_iommu *iommu)
 339{
 340	u64 entry;
 341
 342	BUG_ON(iommu->mmio_base == NULL);
 343
 344	entry = virt_to_phys(amd_iommu_dev_table);
 345	entry |= (dev_table_size >> 12) - 1;
 346	memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
 347			&entry, sizeof(entry));
 348}
 349
 350/* Generic functions to enable/disable certain features of the IOMMU. */
 351static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
 352{
 353	u32 ctrl;
 354
 355	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 356	ctrl |= (1 << bit);
 357	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 358}
 359
 360static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
 361{
 362	u32 ctrl;
 363
 364	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 365	ctrl &= ~(1 << bit);
 366	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 367}
 368
 369static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
 370{
 371	u32 ctrl;
 372
 373	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 374	ctrl &= ~CTRL_INV_TO_MASK;
 375	ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
 376	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 377}
 378
 379/* Function to enable the hardware */
 380static void iommu_enable(struct amd_iommu *iommu)
 381{
 382	iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
 383}
 384
 385static void iommu_disable(struct amd_iommu *iommu)
 386{
 387	/* Disable command buffer */
 388	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 389
 390	/* Disable event logging and event interrupts */
 391	iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
 392	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
 393
 394	/* Disable IOMMU GA_LOG */
 395	iommu_feature_disable(iommu, CONTROL_GALOG_EN);
 396	iommu_feature_disable(iommu, CONTROL_GAINT_EN);
 397
 398	/* Disable IOMMU hardware itself */
 399	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
 400}
 401
 402/*
 403 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
 404 * the system has one.
 405 */
 406static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
 407{
 408	if (!request_mem_region(address, end, "amd_iommu")) {
 409		pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
 410			address, end);
 411		pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
 412		return NULL;
 413	}
 414
 415	return (u8 __iomem *)ioremap_nocache(address, end);
 416}
 417
 418static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
 419{
 420	if (iommu->mmio_base)
 421		iounmap(iommu->mmio_base);
 422	release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
 423}
 424
 425static inline u32 get_ivhd_header_size(struct ivhd_header *h)
 426{
 427	u32 size = 0;
 428
 429	switch (h->type) {
 430	case 0x10:
 431		size = 24;
 432		break;
 433	case 0x11:
 434	case 0x40:
 435		size = 40;
 436		break;
 437	}
 438	return size;
 439}
 440
 441/****************************************************************************
 442 *
 443 * The functions below belong to the first pass of AMD IOMMU ACPI table
 444 * parsing. In this pass we try to find out the highest device id this
 445 * code has to handle. Upon this information the size of the shared data
 446 * structures is determined later.
 447 *
 448 ****************************************************************************/
 449
 450/*
 451 * This function calculates the length of a given IVHD entry
 452 */
 453static inline int ivhd_entry_length(u8 *ivhd)
 454{
 455	u32 type = ((struct ivhd_entry *)ivhd)->type;
 456
 457	if (type < 0x80) {
 458		return 0x04 << (*ivhd >> 6);
 459	} else if (type == IVHD_DEV_ACPI_HID) {
 460		/* For ACPI_HID, offset 21 is uid len */
 461		return *((u8 *)ivhd + 21) + 22;
 462	}
 463	return 0;
 464}
 465
 466/*
 467 * After reading the highest device id from the IOMMU PCI capability header
 468 * this function looks if there is a higher device id defined in the ACPI table
 469 */
 470static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
 471{
 472	u8 *p = (void *)h, *end = (void *)h;
 473	struct ivhd_entry *dev;
 474
 475	u32 ivhd_size = get_ivhd_header_size(h);
 476
 477	if (!ivhd_size) {
 478		pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
 479		return -EINVAL;
 480	}
 481
 482	p += ivhd_size;
 483	end += h->length;
 484
 485	while (p < end) {
 486		dev = (struct ivhd_entry *)p;
 487		switch (dev->type) {
 488		case IVHD_DEV_ALL:
 489			/* Use maximum BDF value for DEV_ALL */
 490			update_last_devid(0xffff);
 491			break;
 492		case IVHD_DEV_SELECT:
 493		case IVHD_DEV_RANGE_END:
 494		case IVHD_DEV_ALIAS:
 495		case IVHD_DEV_EXT_SELECT:
 496			/* all the above subfield types refer to device ids */
 497			update_last_devid(dev->devid);
 498			break;
 499		default:
 500			break;
 501		}
 502		p += ivhd_entry_length(p);
 503	}
 504
 505	WARN_ON(p != end);
 506
 507	return 0;
 508}
 509
 510static int __init check_ivrs_checksum(struct acpi_table_header *table)
 511{
 512	int i;
 513	u8 checksum = 0, *p = (u8 *)table;
 514
 515	for (i = 0; i < table->length; ++i)
 516		checksum += p[i];
 517	if (checksum != 0) {
 518		/* ACPI table corrupt */
 519		pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
 520		return -ENODEV;
 521	}
 522
 523	return 0;
 524}
 525
 526/*
 527 * Iterate over all IVHD entries in the ACPI table and find the highest device
 528 * id which we need to handle. This is the first of three functions which parse
 529 * the ACPI table. So we check the checksum here.
 530 */
 531static int __init find_last_devid_acpi(struct acpi_table_header *table)
 532{
 533	u8 *p = (u8 *)table, *end = (u8 *)table;
 534	struct ivhd_header *h;
 535
 536	p += IVRS_HEADER_LENGTH;
 537
 538	end += table->length;
 539	while (p < end) {
 540		h = (struct ivhd_header *)p;
 541		if (h->type == amd_iommu_target_ivhd_type) {
 542			int ret = find_last_devid_from_ivhd(h);
 543
 544			if (ret)
 545				return ret;
 546		}
 547		p += h->length;
 548	}
 549	WARN_ON(p != end);
 550
 551	return 0;
 552}
 553
 554/****************************************************************************
 555 *
 556 * The following functions belong to the code path which parses the ACPI table
 557 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
 558 * data structures, initialize the device/alias/rlookup table and also
 559 * basically initialize the hardware.
 560 *
 561 ****************************************************************************/
 562
 563/*
 564 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
 565 * write commands to that buffer later and the IOMMU will execute them
 566 * asynchronously
 567 */
 568static int __init alloc_command_buffer(struct amd_iommu *iommu)
 569{
 570	iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 571						  get_order(CMD_BUFFER_SIZE));
 572
 573	return iommu->cmd_buf ? 0 : -ENOMEM;
 574}
 575
 576/*
 577 * This function resets the command buffer if the IOMMU stopped fetching
 578 * commands from it.
 579 */
 580void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
 581{
 582	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 583
 584	writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
 585	writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 586
 587	iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
 588}
 589
 590/*
 591 * This function writes the command buffer address to the hardware and
 592 * enables it.
 593 */
 594static void iommu_enable_command_buffer(struct amd_iommu *iommu)
 595{
 596	u64 entry;
 597
 598	BUG_ON(iommu->cmd_buf == NULL);
 599
 600	entry = (u64)virt_to_phys(iommu->cmd_buf);
 601	entry |= MMIO_CMD_SIZE_512;
 602
 603	memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
 604		    &entry, sizeof(entry));
 605
 606	amd_iommu_reset_cmd_buffer(iommu);
 607}
 608
 609static void __init free_command_buffer(struct amd_iommu *iommu)
 610{
 611	free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
 612}
 613
 614/* allocates the memory where the IOMMU will log its events to */
 615static int __init alloc_event_buffer(struct amd_iommu *iommu)
 616{
 617	iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 618						  get_order(EVT_BUFFER_SIZE));
 619
 620	return iommu->evt_buf ? 0 : -ENOMEM;
 621}
 622
 623static void iommu_enable_event_buffer(struct amd_iommu *iommu)
 624{
 625	u64 entry;
 626
 627	BUG_ON(iommu->evt_buf == NULL);
 628
 629	entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
 630
 631	memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
 632		    &entry, sizeof(entry));
 633
 634	/* set head and tail to zero manually */
 635	writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 636	writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 637
 638	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
 639}
 640
 641static void __init free_event_buffer(struct amd_iommu *iommu)
 642{
 643	free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
 644}
 645
 646/* allocates the memory where the IOMMU will log its events to */
 647static int __init alloc_ppr_log(struct amd_iommu *iommu)
 648{
 649	iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 650						  get_order(PPR_LOG_SIZE));
 651
 652	return iommu->ppr_log ? 0 : -ENOMEM;
 653}
 654
 655static void iommu_enable_ppr_log(struct amd_iommu *iommu)
 656{
 657	u64 entry;
 658
 659	if (iommu->ppr_log == NULL)
 660		return;
 661
 662	entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
 663
 664	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
 665		    &entry, sizeof(entry));
 666
 667	/* set head and tail to zero manually */
 668	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 669	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 670
 671	iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
 672	iommu_feature_enable(iommu, CONTROL_PPR_EN);
 673}
 674
 675static void __init free_ppr_log(struct amd_iommu *iommu)
 676{
 677	if (iommu->ppr_log == NULL)
 678		return;
 679
 680	free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
 681}
 682
 683static void free_ga_log(struct amd_iommu *iommu)
 684{
 685#ifdef CONFIG_IRQ_REMAP
 686	if (iommu->ga_log)
 687		free_pages((unsigned long)iommu->ga_log,
 688			    get_order(GA_LOG_SIZE));
 689	if (iommu->ga_log_tail)
 690		free_pages((unsigned long)iommu->ga_log_tail,
 691			    get_order(8));
 692#endif
 693}
 694
 695static int iommu_ga_log_enable(struct amd_iommu *iommu)
 696{
 697#ifdef CONFIG_IRQ_REMAP
 698	u32 status, i;
 699
 700	if (!iommu->ga_log)
 701		return -EINVAL;
 702
 703	status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 704
 705	/* Check if already running */
 706	if (status & (MMIO_STATUS_GALOG_RUN_MASK))
 707		return 0;
 708
 709	iommu_feature_enable(iommu, CONTROL_GAINT_EN);
 710	iommu_feature_enable(iommu, CONTROL_GALOG_EN);
 711
 712	for (i = 0; i < LOOP_TIMEOUT; ++i) {
 713		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 714		if (status & (MMIO_STATUS_GALOG_RUN_MASK))
 715			break;
 716	}
 717
 718	if (i >= LOOP_TIMEOUT)
 719		return -EINVAL;
 720#endif /* CONFIG_IRQ_REMAP */
 721	return 0;
 722}
 723
 724#ifdef CONFIG_IRQ_REMAP
 725static int iommu_init_ga_log(struct amd_iommu *iommu)
 726{
 727	u64 entry;
 728
 729	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
 730		return 0;
 731
 732	iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 733					get_order(GA_LOG_SIZE));
 734	if (!iommu->ga_log)
 735		goto err_out;
 736
 737	iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 738					get_order(8));
 739	if (!iommu->ga_log_tail)
 740		goto err_out;
 741
 742	entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
 743	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
 744		    &entry, sizeof(entry));
 745	entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
 746	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
 747		    &entry, sizeof(entry));
 748	writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
 749	writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
 750
 751	return 0;
 752err_out:
 753	free_ga_log(iommu);
 754	return -EINVAL;
 755}
 756#endif /* CONFIG_IRQ_REMAP */
 757
 758static int iommu_init_ga(struct amd_iommu *iommu)
 759{
 760	int ret = 0;
 761
 762#ifdef CONFIG_IRQ_REMAP
 763	/* Note: We have already checked GASup from IVRS table.
 764	 *       Now, we need to make sure that GAMSup is set.
 765	 */
 766	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
 767	    !iommu_feature(iommu, FEATURE_GAM_VAPIC))
 768		amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
 769
 770	ret = iommu_init_ga_log(iommu);
 771#endif /* CONFIG_IRQ_REMAP */
 772
 773	return ret;
 774}
 775
 776static void iommu_enable_gt(struct amd_iommu *iommu)
 777{
 778	if (!iommu_feature(iommu, FEATURE_GT))
 779		return;
 780
 781	iommu_feature_enable(iommu, CONTROL_GT_EN);
 782}
 783
 784/* sets a specific bit in the device table entry. */
 785static void set_dev_entry_bit(u16 devid, u8 bit)
 786{
 787	int i = (bit >> 6) & 0x03;
 788	int _bit = bit & 0x3f;
 789
 790	amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
 791}
 792
 793static int get_dev_entry_bit(u16 devid, u8 bit)
 794{
 795	int i = (bit >> 6) & 0x03;
 796	int _bit = bit & 0x3f;
 797
 798	return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
 799}
 800
 801
 802void amd_iommu_apply_erratum_63(u16 devid)
 803{
 804	int sysmgt;
 805
 806	sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
 807		 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
 808
 809	if (sysmgt == 0x01)
 810		set_dev_entry_bit(devid, DEV_ENTRY_IW);
 811}
 812
 813/* Writes the specific IOMMU for a device into the rlookup table */
 814static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
 815{
 816	amd_iommu_rlookup_table[devid] = iommu;
 817}
 818
 819/*
 820 * This function takes the device specific flags read from the ACPI
 821 * table and sets up the device table entry with that information
 822 */
 823static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
 824					   u16 devid, u32 flags, u32 ext_flags)
 825{
 826	if (flags & ACPI_DEVFLAG_INITPASS)
 827		set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
 828	if (flags & ACPI_DEVFLAG_EXTINT)
 829		set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
 830	if (flags & ACPI_DEVFLAG_NMI)
 831		set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
 832	if (flags & ACPI_DEVFLAG_SYSMGT1)
 833		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
 834	if (flags & ACPI_DEVFLAG_SYSMGT2)
 835		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
 836	if (flags & ACPI_DEVFLAG_LINT0)
 837		set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
 838	if (flags & ACPI_DEVFLAG_LINT1)
 839		set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
 840
 841	amd_iommu_apply_erratum_63(devid);
 842
 843	set_iommu_for_device(iommu, devid);
 844}
 845
 846static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
 847{
 848	struct devid_map *entry;
 849	struct list_head *list;
 850
 851	if (type == IVHD_SPECIAL_IOAPIC)
 852		list = &ioapic_map;
 853	else if (type == IVHD_SPECIAL_HPET)
 854		list = &hpet_map;
 855	else
 856		return -EINVAL;
 857
 858	list_for_each_entry(entry, list, list) {
 859		if (!(entry->id == id && entry->cmd_line))
 860			continue;
 861
 862		pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
 863			type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
 864
 865		*devid = entry->devid;
 866
 867		return 0;
 868	}
 869
 870	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 871	if (!entry)
 872		return -ENOMEM;
 873
 874	entry->id	= id;
 875	entry->devid	= *devid;
 876	entry->cmd_line	= cmd_line;
 877
 878	list_add_tail(&entry->list, list);
 879
 880	return 0;
 881}
 882
 883static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
 884				      bool cmd_line)
 885{
 886	struct acpihid_map_entry *entry;
 887	struct list_head *list = &acpihid_map;
 888
 889	list_for_each_entry(entry, list, list) {
 890		if (strcmp(entry->hid, hid) ||
 891		    (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
 892		    !entry->cmd_line)
 893			continue;
 894
 895		pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
 896			hid, uid);
 897		*devid = entry->devid;
 898		return 0;
 899	}
 900
 901	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 902	if (!entry)
 903		return -ENOMEM;
 904
 905	memcpy(entry->uid, uid, strlen(uid));
 906	memcpy(entry->hid, hid, strlen(hid));
 907	entry->devid = *devid;
 908	entry->cmd_line	= cmd_line;
 909	entry->root_devid = (entry->devid & (~0x7));
 910
 911	pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
 912		entry->cmd_line ? "cmd" : "ivrs",
 913		entry->hid, entry->uid, entry->root_devid);
 914
 915	list_add_tail(&entry->list, list);
 916	return 0;
 917}
 918
 919static int __init add_early_maps(void)
 920{
 921	int i, ret;
 922
 923	for (i = 0; i < early_ioapic_map_size; ++i) {
 924		ret = add_special_device(IVHD_SPECIAL_IOAPIC,
 925					 early_ioapic_map[i].id,
 926					 &early_ioapic_map[i].devid,
 927					 early_ioapic_map[i].cmd_line);
 928		if (ret)
 929			return ret;
 930	}
 931
 932	for (i = 0; i < early_hpet_map_size; ++i) {
 933		ret = add_special_device(IVHD_SPECIAL_HPET,
 934					 early_hpet_map[i].id,
 935					 &early_hpet_map[i].devid,
 936					 early_hpet_map[i].cmd_line);
 937		if (ret)
 938			return ret;
 939	}
 940
 941	for (i = 0; i < early_acpihid_map_size; ++i) {
 942		ret = add_acpi_hid_device(early_acpihid_map[i].hid,
 943					  early_acpihid_map[i].uid,
 944					  &early_acpihid_map[i].devid,
 945					  early_acpihid_map[i].cmd_line);
 946		if (ret)
 947			return ret;
 948	}
 949
 950	return 0;
 951}
 952
 953/*
 954 * Reads the device exclusion range from ACPI and initializes the IOMMU with
 955 * it
 956 */
 957static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
 958{
 959	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
 960
 961	if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
 962		return;
 963
 964	if (iommu) {
 965		/*
 966		 * We only can configure exclusion ranges per IOMMU, not
 967		 * per device. But we can enable the exclusion range per
 968		 * device. This is done here
 969		 */
 970		set_dev_entry_bit(devid, DEV_ENTRY_EX);
 971		iommu->exclusion_start = m->range_start;
 972		iommu->exclusion_length = m->range_length;
 973	}
 974}
 975
 976/*
 977 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
 978 * initializes the hardware and our data structures with it.
 979 */
 980static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
 981					struct ivhd_header *h)
 982{
 983	u8 *p = (u8 *)h;
 984	u8 *end = p, flags = 0;
 985	u16 devid = 0, devid_start = 0, devid_to = 0;
 986	u32 dev_i, ext_flags = 0;
 987	bool alias = false;
 988	struct ivhd_entry *e;
 989	u32 ivhd_size;
 990	int ret;
 991
 992
 993	ret = add_early_maps();
 994	if (ret)
 995		return ret;
 996
 997	/*
 998	 * First save the recommended feature enable bits from ACPI
 999	 */
1000	iommu->acpi_flags = h->flags;
1001
1002	/*
1003	 * Done. Now parse the device entries
1004	 */
1005	ivhd_size = get_ivhd_header_size(h);
1006	if (!ivhd_size) {
1007		pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
1008		return -EINVAL;
1009	}
1010
1011	p += ivhd_size;
1012
1013	end += h->length;
1014
1015
1016	while (p < end) {
1017		e = (struct ivhd_entry *)p;
1018		switch (e->type) {
1019		case IVHD_DEV_ALL:
1020
1021			DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
1022
1023			for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1024				set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1025			break;
1026		case IVHD_DEV_SELECT:
1027
1028			DUMP_printk("  DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1029				    "flags: %02x\n",
1030				    PCI_BUS_NUM(e->devid),
1031				    PCI_SLOT(e->devid),
1032				    PCI_FUNC(e->devid),
1033				    e->flags);
1034
1035			devid = e->devid;
1036			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1037			break;
1038		case IVHD_DEV_SELECT_RANGE_START:
1039
1040			DUMP_printk("  DEV_SELECT_RANGE_START\t "
1041				    "devid: %02x:%02x.%x flags: %02x\n",
1042				    PCI_BUS_NUM(e->devid),
1043				    PCI_SLOT(e->devid),
1044				    PCI_FUNC(e->devid),
1045				    e->flags);
1046
1047			devid_start = e->devid;
1048			flags = e->flags;
1049			ext_flags = 0;
1050			alias = false;
1051			break;
1052		case IVHD_DEV_ALIAS:
1053
1054			DUMP_printk("  DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1055				    "flags: %02x devid_to: %02x:%02x.%x\n",
1056				    PCI_BUS_NUM(e->devid),
1057				    PCI_SLOT(e->devid),
1058				    PCI_FUNC(e->devid),
1059				    e->flags,
1060				    PCI_BUS_NUM(e->ext >> 8),
1061				    PCI_SLOT(e->ext >> 8),
1062				    PCI_FUNC(e->ext >> 8));
1063
1064			devid = e->devid;
1065			devid_to = e->ext >> 8;
1066			set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
1067			set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1068			amd_iommu_alias_table[devid] = devid_to;
1069			break;
1070		case IVHD_DEV_ALIAS_RANGE:
1071
1072			DUMP_printk("  DEV_ALIAS_RANGE\t\t "
1073				    "devid: %02x:%02x.%x flags: %02x "
1074				    "devid_to: %02x:%02x.%x\n",
1075				    PCI_BUS_NUM(e->devid),
1076				    PCI_SLOT(e->devid),
1077				    PCI_FUNC(e->devid),
1078				    e->flags,
1079				    PCI_BUS_NUM(e->ext >> 8),
1080				    PCI_SLOT(e->ext >> 8),
1081				    PCI_FUNC(e->ext >> 8));
1082
1083			devid_start = e->devid;
1084			flags = e->flags;
1085			devid_to = e->ext >> 8;
1086			ext_flags = 0;
1087			alias = true;
1088			break;
1089		case IVHD_DEV_EXT_SELECT:
1090
1091			DUMP_printk("  DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1092				    "flags: %02x ext: %08x\n",
1093				    PCI_BUS_NUM(e->devid),
1094				    PCI_SLOT(e->devid),
1095				    PCI_FUNC(e->devid),
1096				    e->flags, e->ext);
1097
1098			devid = e->devid;
1099			set_dev_entry_from_acpi(iommu, devid, e->flags,
1100						e->ext);
1101			break;
1102		case IVHD_DEV_EXT_SELECT_RANGE:
1103
1104			DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
1105				    "%02x:%02x.%x flags: %02x ext: %08x\n",
1106				    PCI_BUS_NUM(e->devid),
1107				    PCI_SLOT(e->devid),
1108				    PCI_FUNC(e->devid),
1109				    e->flags, e->ext);
1110
1111			devid_start = e->devid;
1112			flags = e->flags;
1113			ext_flags = e->ext;
1114			alias = false;
1115			break;
1116		case IVHD_DEV_RANGE_END:
1117
1118			DUMP_printk("  DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1119				    PCI_BUS_NUM(e->devid),
1120				    PCI_SLOT(e->devid),
1121				    PCI_FUNC(e->devid));
1122
1123			devid = e->devid;
1124			for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1125				if (alias) {
1126					amd_iommu_alias_table[dev_i] = devid_to;
1127					set_dev_entry_from_acpi(iommu,
1128						devid_to, flags, ext_flags);
1129				}
1130				set_dev_entry_from_acpi(iommu, dev_i,
1131							flags, ext_flags);
1132			}
1133			break;
1134		case IVHD_DEV_SPECIAL: {
1135			u8 handle, type;
1136			const char *var;
1137			u16 devid;
1138			int ret;
1139
1140			handle = e->ext & 0xff;
1141			devid  = (e->ext >>  8) & 0xffff;
1142			type   = (e->ext >> 24) & 0xff;
1143
1144			if (type == IVHD_SPECIAL_IOAPIC)
1145				var = "IOAPIC";
1146			else if (type == IVHD_SPECIAL_HPET)
1147				var = "HPET";
1148			else
1149				var = "UNKNOWN";
1150
1151			DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1152				    var, (int)handle,
1153				    PCI_BUS_NUM(devid),
1154				    PCI_SLOT(devid),
1155				    PCI_FUNC(devid));
1156
1157			ret = add_special_device(type, handle, &devid, false);
1158			if (ret)
1159				return ret;
1160
1161			/*
1162			 * add_special_device might update the devid in case a
1163			 * command-line override is present. So call
1164			 * set_dev_entry_from_acpi after add_special_device.
1165			 */
1166			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1167
1168			break;
1169		}
1170		case IVHD_DEV_ACPI_HID: {
1171			u16 devid;
1172			u8 hid[ACPIHID_HID_LEN] = {0};
1173			u8 uid[ACPIHID_UID_LEN] = {0};
1174			int ret;
1175
1176			if (h->type != 0x40) {
1177				pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1178				       e->type);
1179				break;
1180			}
1181
1182			memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1183			hid[ACPIHID_HID_LEN - 1] = '\0';
1184
1185			if (!(*hid)) {
1186				pr_err(FW_BUG "Invalid HID.\n");
1187				break;
1188			}
1189
1190			switch (e->uidf) {
1191			case UID_NOT_PRESENT:
1192
1193				if (e->uidl != 0)
1194					pr_warn(FW_BUG "Invalid UID length.\n");
1195
1196				break;
1197			case UID_IS_INTEGER:
1198
1199				sprintf(uid, "%d", e->uid);
1200
1201				break;
1202			case UID_IS_CHARACTER:
1203
1204				memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
1205				uid[ACPIHID_UID_LEN - 1] = '\0';
1206
1207				break;
1208			default:
1209				break;
1210			}
1211
1212			devid = e->devid;
1213			DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1214				    hid, uid,
1215				    PCI_BUS_NUM(devid),
1216				    PCI_SLOT(devid),
1217				    PCI_FUNC(devid));
1218
1219			flags = e->flags;
1220
1221			ret = add_acpi_hid_device(hid, uid, &devid, false);
1222			if (ret)
1223				return ret;
1224
1225			/*
1226			 * add_special_device might update the devid in case a
1227			 * command-line override is present. So call
1228			 * set_dev_entry_from_acpi after add_special_device.
1229			 */
1230			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1231
1232			break;
1233		}
1234		default:
1235			break;
1236		}
1237
1238		p += ivhd_entry_length(p);
1239	}
1240
1241	return 0;
1242}
1243
1244static void __init free_iommu_one(struct amd_iommu *iommu)
1245{
1246	free_command_buffer(iommu);
1247	free_event_buffer(iommu);
1248	free_ppr_log(iommu);
1249	free_ga_log(iommu);
1250	iommu_unmap_mmio_space(iommu);
1251}
1252
1253static void __init free_iommu_all(void)
1254{
1255	struct amd_iommu *iommu, *next;
1256
1257	for_each_iommu_safe(iommu, next) {
1258		list_del(&iommu->list);
1259		free_iommu_one(iommu);
1260		kfree(iommu);
1261	}
1262}
1263
1264/*
1265 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1266 * Workaround:
1267 *     BIOS should disable L2B micellaneous clock gating by setting
1268 *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1269 */
1270static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1271{
1272	u32 value;
1273
1274	if ((boot_cpu_data.x86 != 0x15) ||
1275	    (boot_cpu_data.x86_model < 0x10) ||
1276	    (boot_cpu_data.x86_model > 0x1f))
1277		return;
1278
1279	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1280	pci_read_config_dword(iommu->dev, 0xf4, &value);
1281
1282	if (value & BIT(2))
1283		return;
1284
1285	/* Select NB indirect register 0x90 and enable writing */
1286	pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1287
1288	pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1289	pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1290		dev_name(&iommu->dev->dev));
1291
1292	/* Clear the enable writing bit */
1293	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1294}
1295
1296/*
1297 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1298 * Workaround:
1299 *     BIOS should enable ATS write permission check by setting
1300 *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1301 */
1302static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1303{
1304	u32 value;
1305
1306	if ((boot_cpu_data.x86 != 0x15) ||
1307	    (boot_cpu_data.x86_model < 0x30) ||
1308	    (boot_cpu_data.x86_model > 0x3f))
1309		return;
1310
1311	/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1312	value = iommu_read_l2(iommu, 0x47);
1313
1314	if (value & BIT(0))
1315		return;
1316
1317	/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1318	iommu_write_l2(iommu, 0x47, value | BIT(0));
1319
1320	pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1321		dev_name(&iommu->dev->dev));
1322}
1323
1324/*
1325 * This function clues the initialization function for one IOMMU
1326 * together and also allocates the command buffer and programs the
1327 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1328 */
1329static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1330{
1331	int ret;
1332
1333	spin_lock_init(&iommu->lock);
1334
1335	/* Add IOMMU to internal data structures */
1336	list_add_tail(&iommu->list, &amd_iommu_list);
1337	iommu->index             = amd_iommus_present++;
1338
1339	if (unlikely(iommu->index >= MAX_IOMMUS)) {
1340		WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1341		return -ENOSYS;
1342	}
1343
1344	/* Index is fine - add IOMMU to the array */
1345	amd_iommus[iommu->index] = iommu;
1346
1347	/*
1348	 * Copy data from ACPI table entry to the iommu struct
1349	 */
1350	iommu->devid   = h->devid;
1351	iommu->cap_ptr = h->cap_ptr;
1352	iommu->pci_seg = h->pci_seg;
1353	iommu->mmio_phys = h->mmio_phys;
1354
1355	switch (h->type) {
1356	case 0x10:
1357		/* Check if IVHD EFR contains proper max banks/counters */
1358		if ((h->efr_attr != 0) &&
1359		    ((h->efr_attr & (0xF << 13)) != 0) &&
1360		    ((h->efr_attr & (0x3F << 17)) != 0))
1361			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1362		else
1363			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1364		if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1365			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1366		break;
1367	case 0x11:
1368	case 0x40:
1369		if (h->efr_reg & (1 << 9))
1370			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1371		else
1372			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1373		if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1374			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1375		break;
1376	default:
1377		return -EINVAL;
1378	}
1379
1380	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1381						iommu->mmio_phys_end);
1382	if (!iommu->mmio_base)
1383		return -ENOMEM;
1384
1385	if (alloc_command_buffer(iommu))
1386		return -ENOMEM;
1387
1388	if (alloc_event_buffer(iommu))
1389		return -ENOMEM;
1390
1391	iommu->int_enabled = false;
1392
1393	ret = init_iommu_from_acpi(iommu, h);
1394	if (ret)
1395		return ret;
1396
1397	ret = amd_iommu_create_irq_domain(iommu);
1398	if (ret)
1399		return ret;
1400
1401	/*
1402	 * Make sure IOMMU is not considered to translate itself. The IVRS
1403	 * table tells us so, but this is a lie!
1404	 */
1405	amd_iommu_rlookup_table[iommu->devid] = NULL;
1406
1407	return 0;
1408}
1409
1410/**
1411 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1412 * @ivrs          Pointer to the IVRS header
1413 *
1414 * This function search through all IVDB of the maximum supported IVHD
1415 */
1416static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1417{
1418	u8 *base = (u8 *)ivrs;
1419	struct ivhd_header *ivhd = (struct ivhd_header *)
1420					(base + IVRS_HEADER_LENGTH);
1421	u8 last_type = ivhd->type;
1422	u16 devid = ivhd->devid;
1423
1424	while (((u8 *)ivhd - base < ivrs->length) &&
1425	       (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1426		u8 *p = (u8 *) ivhd;
1427
1428		if (ivhd->devid == devid)
1429			last_type = ivhd->type;
1430		ivhd = (struct ivhd_header *)(p + ivhd->length);
1431	}
1432
1433	return last_type;
1434}
1435
1436/*
1437 * Iterates over all IOMMU entries in the ACPI table, allocates the
1438 * IOMMU structure and initializes it with init_iommu_one()
1439 */
1440static int __init init_iommu_all(struct acpi_table_header *table)
1441{
1442	u8 *p = (u8 *)table, *end = (u8 *)table;
1443	struct ivhd_header *h;
1444	struct amd_iommu *iommu;
1445	int ret;
1446
1447	end += table->length;
1448	p += IVRS_HEADER_LENGTH;
1449
1450	while (p < end) {
1451		h = (struct ivhd_header *)p;
1452		if (*p == amd_iommu_target_ivhd_type) {
1453
1454			DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1455				    "seg: %d flags: %01x info %04x\n",
1456				    PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1457				    PCI_FUNC(h->devid), h->cap_ptr,
1458				    h->pci_seg, h->flags, h->info);
1459			DUMP_printk("       mmio-addr: %016llx\n",
1460				    h->mmio_phys);
1461
1462			iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1463			if (iommu == NULL)
1464				return -ENOMEM;
1465
1466			ret = init_iommu_one(iommu, h);
1467			if (ret)
1468				return ret;
1469		}
1470		p += h->length;
1471
1472	}
1473	WARN_ON(p != end);
1474
1475	return 0;
1476}
1477
1478
1479static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1480{
1481	u64 val = 0xabcd, val2 = 0;
1482
1483	if (!iommu_feature(iommu, FEATURE_PC))
1484		return;
1485
1486	amd_iommu_pc_present = true;
1487
1488	/* Check if the performance counters can be written to */
1489	if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
1490	    (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
1491	    (val != val2)) {
1492		pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1493		amd_iommu_pc_present = false;
1494		return;
1495	}
1496
1497	pr_info("AMD-Vi: IOMMU performance counters supported\n");
1498
1499	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1500	iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1501	iommu->max_counters = (u8) ((val >> 7) & 0xf);
1502}
1503
1504static ssize_t amd_iommu_show_cap(struct device *dev,
1505				  struct device_attribute *attr,
1506				  char *buf)
1507{
1508	struct amd_iommu *iommu = dev_get_drvdata(dev);
1509	return sprintf(buf, "%x\n", iommu->cap);
1510}
1511static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1512
1513static ssize_t amd_iommu_show_features(struct device *dev,
1514				       struct device_attribute *attr,
1515				       char *buf)
1516{
1517	struct amd_iommu *iommu = dev_get_drvdata(dev);
1518	return sprintf(buf, "%llx\n", iommu->features);
1519}
1520static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1521
1522static struct attribute *amd_iommu_attrs[] = {
1523	&dev_attr_cap.attr,
1524	&dev_attr_features.attr,
1525	NULL,
1526};
1527
1528static struct attribute_group amd_iommu_group = {
1529	.name = "amd-iommu",
1530	.attrs = amd_iommu_attrs,
1531};
1532
1533static const struct attribute_group *amd_iommu_groups[] = {
1534	&amd_iommu_group,
1535	NULL,
1536};
1537
1538static int iommu_init_pci(struct amd_iommu *iommu)
1539{
1540	int cap_ptr = iommu->cap_ptr;
1541	u32 range, misc, low, high;
1542	int ret;
1543
1544	iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
1545					  iommu->devid & 0xff);
1546	if (!iommu->dev)
1547		return -ENODEV;
1548
1549	/* Prevent binding other PCI device drivers to IOMMU devices */
1550	iommu->dev->match_driver = false;
1551
1552	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1553			      &iommu->cap);
1554	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1555			      &range);
1556	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1557			      &misc);
1558
1559	if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1560		amd_iommu_iotlb_sup = false;
1561
1562	/* read extended feature bits */
1563	low  = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1564	high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1565
1566	iommu->features = ((u64)high << 32) | low;
1567
1568	if (iommu_feature(iommu, FEATURE_GT)) {
1569		int glxval;
1570		u32 max_pasid;
1571		u64 pasmax;
1572
1573		pasmax = iommu->features & FEATURE_PASID_MASK;
1574		pasmax >>= FEATURE_PASID_SHIFT;
1575		max_pasid  = (1 << (pasmax + 1)) - 1;
1576
1577		amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1578
1579		BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1580
1581		glxval   = iommu->features & FEATURE_GLXVAL_MASK;
1582		glxval >>= FEATURE_GLXVAL_SHIFT;
1583
1584		if (amd_iommu_max_glx_val == -1)
1585			amd_iommu_max_glx_val = glxval;
1586		else
1587			amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1588	}
1589
1590	if (iommu_feature(iommu, FEATURE_GT) &&
1591	    iommu_feature(iommu, FEATURE_PPR)) {
1592		iommu->is_iommu_v2   = true;
1593		amd_iommu_v2_present = true;
1594	}
1595
1596	if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1597		return -ENOMEM;
1598
1599	ret = iommu_init_ga(iommu);
1600	if (ret)
1601		return ret;
1602
1603	if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1604		amd_iommu_np_cache = true;
1605
1606	init_iommu_perf_ctr(iommu);
1607
1608	if (is_rd890_iommu(iommu->dev)) {
1609		int i, j;
1610
1611		iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1612				PCI_DEVFN(0, 0));
1613
1614		/*
1615		 * Some rd890 systems may not be fully reconfigured by the
1616		 * BIOS, so it's necessary for us to store this information so
1617		 * it can be reprogrammed on resume
1618		 */
1619		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1620				&iommu->stored_addr_lo);
1621		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1622				&iommu->stored_addr_hi);
1623
1624		/* Low bit locks writes to configuration space */
1625		iommu->stored_addr_lo &= ~1;
1626
1627		for (i = 0; i < 6; i++)
1628			for (j = 0; j < 0x12; j++)
1629				iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1630
1631		for (i = 0; i < 0x83; i++)
1632			iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1633	}
1634
1635	amd_iommu_erratum_746_workaround(iommu);
1636	amd_iommu_ats_write_check_workaround(iommu);
1637
1638	iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
1639					       amd_iommu_groups, "ivhd%d",
1640					       iommu->index);
1641
1642	return pci_enable_device(iommu->dev);
1643}
1644
1645static void print_iommu_info(void)
1646{
1647	static const char * const feat_str[] = {
1648		"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1649		"IA", "GA", "HE", "PC"
1650	};
1651	struct amd_iommu *iommu;
1652
1653	for_each_iommu(iommu) {
1654		int i;
1655
1656		pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1657			dev_name(&iommu->dev->dev), iommu->cap_ptr);
1658
1659		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1660			pr_info("AMD-Vi: Extended features (%#llx):\n",
1661				iommu->features);
1662			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1663				if (iommu_feature(iommu, (1ULL << i)))
1664					pr_cont(" %s", feat_str[i]);
1665			}
1666
1667			if (iommu->features & FEATURE_GAM_VAPIC)
1668				pr_cont(" GA_vAPIC");
1669
1670			pr_cont("\n");
1671		}
1672	}
1673	if (irq_remapping_enabled) {
1674		pr_info("AMD-Vi: Interrupt remapping enabled\n");
1675		if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1676			pr_info("AMD-Vi: virtual APIC enabled\n");
1677	}
1678}
1679
1680static int __init amd_iommu_init_pci(void)
1681{
1682	struct amd_iommu *iommu;
1683	int ret = 0;
1684
1685	for_each_iommu(iommu) {
1686		ret = iommu_init_pci(iommu);
1687		if (ret)
1688			break;
1689	}
1690
1691	/*
1692	 * Order is important here to make sure any unity map requirements are
1693	 * fulfilled. The unity mappings are created and written to the device
1694	 * table during the amd_iommu_init_api() call.
1695	 *
1696	 * After that we call init_device_table_dma() to make sure any
1697	 * uninitialized DTE will block DMA, and in the end we flush the caches
1698	 * of all IOMMUs to make sure the changes to the device table are
1699	 * active.
1700	 */
1701	ret = amd_iommu_init_api();
1702
1703	init_device_table_dma();
1704
1705	for_each_iommu(iommu)
1706		iommu_flush_all_caches(iommu);
1707
1708	if (!ret)
1709		print_iommu_info();
1710
1711	return ret;
1712}
1713
1714/****************************************************************************
1715 *
1716 * The following functions initialize the MSI interrupts for all IOMMUs
1717 * in the system. It's a bit challenging because there could be multiple
1718 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1719 * pci_dev.
1720 *
1721 ****************************************************************************/
1722
1723static int iommu_setup_msi(struct amd_iommu *iommu)
1724{
1725	int r;
1726
1727	r = pci_enable_msi(iommu->dev);
1728	if (r)
1729		return r;
1730
1731	r = request_threaded_irq(iommu->dev->irq,
1732				 amd_iommu_int_handler,
1733				 amd_iommu_int_thread,
1734				 0, "AMD-Vi",
1735				 iommu);
1736
1737	if (r) {
1738		pci_disable_msi(iommu->dev);
1739		return r;
1740	}
1741
1742	iommu->int_enabled = true;
1743
1744	return 0;
1745}
1746
1747static int iommu_init_msi(struct amd_iommu *iommu)
1748{
1749	int ret;
1750
1751	if (iommu->int_enabled)
1752		goto enable_faults;
1753
1754	if (iommu->dev->msi_cap)
1755		ret = iommu_setup_msi(iommu);
1756	else
1757		ret = -ENODEV;
1758
1759	if (ret)
1760		return ret;
1761
1762enable_faults:
1763	iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1764
1765	if (iommu->ppr_log != NULL)
1766		iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1767
1768	iommu_ga_log_enable(iommu);
1769
1770	return 0;
1771}
1772
1773/****************************************************************************
1774 *
1775 * The next functions belong to the third pass of parsing the ACPI
1776 * table. In this last pass the memory mapping requirements are
1777 * gathered (like exclusion and unity mapping ranges).
1778 *
1779 ****************************************************************************/
1780
1781static void __init free_unity_maps(void)
1782{
1783	struct unity_map_entry *entry, *next;
1784
1785	list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1786		list_del(&entry->list);
1787		kfree(entry);
1788	}
1789}
1790
1791/* called when we find an exclusion range definition in ACPI */
1792static int __init init_exclusion_range(struct ivmd_header *m)
1793{
1794	int i;
1795
1796	switch (m->type) {
1797	case ACPI_IVMD_TYPE:
1798		set_device_exclusion_range(m->devid, m);
1799		break;
1800	case ACPI_IVMD_TYPE_ALL:
1801		for (i = 0; i <= amd_iommu_last_bdf; ++i)
1802			set_device_exclusion_range(i, m);
1803		break;
1804	case ACPI_IVMD_TYPE_RANGE:
1805		for (i = m->devid; i <= m->aux; ++i)
1806			set_device_exclusion_range(i, m);
1807		break;
1808	default:
1809		break;
1810	}
1811
1812	return 0;
1813}
1814
1815/* called for unity map ACPI definition */
1816static int __init init_unity_map_range(struct ivmd_header *m)
1817{
1818	struct unity_map_entry *e = NULL;
1819	char *s;
1820
1821	e = kzalloc(sizeof(*e), GFP_KERNEL);
1822	if (e == NULL)
1823		return -ENOMEM;
1824
1825	switch (m->type) {
1826	default:
1827		kfree(e);
1828		return 0;
1829	case ACPI_IVMD_TYPE:
1830		s = "IVMD_TYPEi\t\t\t";
1831		e->devid_start = e->devid_end = m->devid;
1832		break;
1833	case ACPI_IVMD_TYPE_ALL:
1834		s = "IVMD_TYPE_ALL\t\t";
1835		e->devid_start = 0;
1836		e->devid_end = amd_iommu_last_bdf;
1837		break;
1838	case ACPI_IVMD_TYPE_RANGE:
1839		s = "IVMD_TYPE_RANGE\t\t";
1840		e->devid_start = m->devid;
1841		e->devid_end = m->aux;
1842		break;
1843	}
1844	e->address_start = PAGE_ALIGN(m->range_start);
1845	e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1846	e->prot = m->flags >> 1;
1847
1848	DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1849		    " range_start: %016llx range_end: %016llx flags: %x\n", s,
1850		    PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
1851		    PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
1852		    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1853		    e->address_start, e->address_end, m->flags);
1854
1855	list_add_tail(&e->list, &amd_iommu_unity_map);
1856
1857	return 0;
1858}
1859
1860/* iterates over all memory definitions we find in the ACPI table */
1861static int __init init_memory_definitions(struct acpi_table_header *table)
1862{
1863	u8 *p = (u8 *)table, *end = (u8 *)table;
1864	struct ivmd_header *m;
1865
1866	end += table->length;
1867	p += IVRS_HEADER_LENGTH;
1868
1869	while (p < end) {
1870		m = (struct ivmd_header *)p;
1871		if (m->flags & IVMD_FLAG_EXCL_RANGE)
1872			init_exclusion_range(m);
1873		else if (m->flags & IVMD_FLAG_UNITY_MAP)
1874			init_unity_map_range(m);
1875
1876		p += m->length;
1877	}
1878
1879	return 0;
1880}
1881
1882/*
1883 * Init the device table to not allow DMA access for devices and
1884 * suppress all page faults
1885 */
1886static void init_device_table_dma(void)
1887{
1888	u32 devid;
1889
1890	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1891		set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1892		set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1893	}
1894}
1895
1896static void __init uninit_device_table_dma(void)
1897{
1898	u32 devid;
1899
1900	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1901		amd_iommu_dev_table[devid].data[0] = 0ULL;
1902		amd_iommu_dev_table[devid].data[1] = 0ULL;
1903	}
1904}
1905
1906static void init_device_table(void)
1907{
1908	u32 devid;
1909
1910	if (!amd_iommu_irq_remap)
1911		return;
1912
1913	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1914		set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
1915}
1916
1917static void iommu_init_flags(struct amd_iommu *iommu)
1918{
1919	iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1920		iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1921		iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1922
1923	iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1924		iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1925		iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1926
1927	iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1928		iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1929		iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1930
1931	iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1932		iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1933		iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1934
1935	/*
1936	 * make IOMMU memory accesses cache coherent
1937	 */
1938	iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1939
1940	/* Set IOTLB invalidation timeout to 1s */
1941	iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1942}
1943
1944static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1945{
1946	int i, j;
1947	u32 ioc_feature_control;
1948	struct pci_dev *pdev = iommu->root_pdev;
1949
1950	/* RD890 BIOSes may not have completely reconfigured the iommu */
1951	if (!is_rd890_iommu(iommu->dev) || !pdev)
1952		return;
1953
1954	/*
1955	 * First, we need to ensure that the iommu is enabled. This is
1956	 * controlled by a register in the northbridge
1957	 */
1958
1959	/* Select Northbridge indirect register 0x75 and enable writing */
1960	pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1961	pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1962
1963	/* Enable the iommu */
1964	if (!(ioc_feature_control & 0x1))
1965		pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1966
1967	/* Restore the iommu BAR */
1968	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1969			       iommu->stored_addr_lo);
1970	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1971			       iommu->stored_addr_hi);
1972
1973	/* Restore the l1 indirect regs for each of the 6 l1s */
1974	for (i = 0; i < 6; i++)
1975		for (j = 0; j < 0x12; j++)
1976			iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1977
1978	/* Restore the l2 indirect regs */
1979	for (i = 0; i < 0x83; i++)
1980		iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1981
1982	/* Lock PCI setup registers */
1983	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1984			       iommu->stored_addr_lo | 1);
1985}
1986
1987static void iommu_enable_ga(struct amd_iommu *iommu)
1988{
1989#ifdef CONFIG_IRQ_REMAP
1990	switch (amd_iommu_guest_ir) {
1991	case AMD_IOMMU_GUEST_IR_VAPIC:
1992		iommu_feature_enable(iommu, CONTROL_GAM_EN);
1993		/* Fall through */
1994	case AMD_IOMMU_GUEST_IR_LEGACY_GA:
1995		iommu_feature_enable(iommu, CONTROL_GA_EN);
1996		iommu->irte_ops = &irte_128_ops;
1997		break;
1998	default:
1999		iommu->irte_ops = &irte_32_ops;
2000		break;
2001	}
2002#endif
2003}
2004
2005/*
2006 * This function finally enables all IOMMUs found in the system after
2007 * they have been initialized
2008 */
2009static void early_enable_iommus(void)
2010{
2011	struct amd_iommu *iommu;
2012
2013	for_each_iommu(iommu) {
2014		iommu_disable(iommu);
2015		iommu_init_flags(iommu);
2016		iommu_set_device_table(iommu);
2017		iommu_enable_command_buffer(iommu);
2018		iommu_enable_event_buffer(iommu);
2019		iommu_set_exclusion_range(iommu);
2020		iommu_enable_ga(iommu);
2021		iommu_enable(iommu);
2022		iommu_flush_all_caches(iommu);
2023	}
2024
2025#ifdef CONFIG_IRQ_REMAP
2026	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2027		amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2028#endif
2029}
2030
2031static void enable_iommus_v2(void)
2032{
2033	struct amd_iommu *iommu;
2034
2035	for_each_iommu(iommu) {
2036		iommu_enable_ppr_log(iommu);
2037		iommu_enable_gt(iommu);
2038	}
2039}
2040
2041static void enable_iommus(void)
2042{
2043	early_enable_iommus();
2044
2045	enable_iommus_v2();
2046}
2047
2048static void disable_iommus(void)
2049{
2050	struct amd_iommu *iommu;
2051
2052	for_each_iommu(iommu)
2053		iommu_disable(iommu);
2054
2055#ifdef CONFIG_IRQ_REMAP
2056	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2057		amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2058#endif
2059}
2060
2061/*
2062 * Suspend/Resume support
2063 * disable suspend until real resume implemented
2064 */
2065
2066static void amd_iommu_resume(void)
2067{
2068	struct amd_iommu *iommu;
2069
2070	for_each_iommu(iommu)
2071		iommu_apply_resume_quirks(iommu);
2072
2073	/* re-load the hardware */
2074	enable_iommus();
2075
2076	amd_iommu_enable_interrupts();
2077}
2078
2079static int amd_iommu_suspend(void)
2080{
2081	/* disable IOMMUs to go out of the way for BIOS */
2082	disable_iommus();
2083
2084	return 0;
2085}
2086
2087static struct syscore_ops amd_iommu_syscore_ops = {
2088	.suspend = amd_iommu_suspend,
2089	.resume = amd_iommu_resume,
2090};
2091
2092static void __init free_on_init_error(void)
2093{
2094	kmemleak_free(irq_lookup_table);
2095	free_pages((unsigned long)irq_lookup_table,
2096		   get_order(rlookup_table_size));
2097
2098	kmem_cache_destroy(amd_iommu_irq_cache);
2099	amd_iommu_irq_cache = NULL;
2100
2101	free_pages((unsigned long)amd_iommu_rlookup_table,
2102		   get_order(rlookup_table_size));
2103
2104	free_pages((unsigned long)amd_iommu_alias_table,
2105		   get_order(alias_table_size));
2106
2107	free_pages((unsigned long)amd_iommu_dev_table,
2108		   get_order(dev_table_size));
2109
2110	free_iommu_all();
2111
2112#ifdef CONFIG_GART_IOMMU
2113	/*
2114	 * We failed to initialize the AMD IOMMU - try fallback to GART
2115	 * if possible.
2116	 */
2117	gart_iommu_init();
2118
2119#endif
2120}
2121
2122/* SB IOAPIC is always on this device in AMD systems */
2123#define IOAPIC_SB_DEVID		((0x00 << 8) | PCI_DEVFN(0x14, 0))
2124
2125static bool __init check_ioapic_information(void)
2126{
2127	const char *fw_bug = FW_BUG;
2128	bool ret, has_sb_ioapic;
2129	int idx;
2130
2131	has_sb_ioapic = false;
2132	ret           = false;
2133
2134	/*
2135	 * If we have map overrides on the kernel command line the
2136	 * messages in this function might not describe firmware bugs
2137	 * anymore - so be careful
2138	 */
2139	if (cmdline_maps)
2140		fw_bug = "";
2141
2142	for (idx = 0; idx < nr_ioapics; idx++) {
2143		int devid, id = mpc_ioapic_id(idx);
2144
2145		devid = get_ioapic_devid(id);
2146		if (devid < 0) {
2147			pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
2148				fw_bug, id);
2149			ret = false;
2150		} else if (devid == IOAPIC_SB_DEVID) {
2151			has_sb_ioapic = true;
2152			ret           = true;
2153		}
2154	}
2155
2156	if (!has_sb_ioapic) {
2157		/*
2158		 * We expect the SB IOAPIC to be listed in the IVRS
2159		 * table. The system timer is connected to the SB IOAPIC
2160		 * and if we don't have it in the list the system will
2161		 * panic at boot time.  This situation usually happens
2162		 * when the BIOS is buggy and provides us the wrong
2163		 * device id for the IOAPIC in the system.
2164		 */
2165		pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
2166	}
2167
2168	if (!ret)
2169		pr_err("AMD-Vi: Disabling interrupt remapping\n");
2170
2171	return ret;
2172}
2173
2174static void __init free_dma_resources(void)
2175{
2176	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2177		   get_order(MAX_DOMAIN_ID/8));
2178
2179	free_unity_maps();
2180}
2181
2182/*
2183 * This is the hardware init function for AMD IOMMU in the system.
2184 * This function is called either from amd_iommu_init or from the interrupt
2185 * remapping setup code.
2186 *
2187 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2188 * four times:
2189 *
2190 *	1 pass) Discover the most comprehensive IVHD type to use.
2191 *
2192 *	2 pass) Find the highest PCI device id the driver has to handle.
2193 *		Upon this information the size of the data structures is
2194 *		determined that needs to be allocated.
2195 *
2196 *	3 pass) Initialize the data structures just allocated with the
2197 *		information in the ACPI table about available AMD IOMMUs
2198 *		in the system. It also maps the PCI devices in the
2199 *		system to specific IOMMUs
2200 *
2201 *	4 pass) After the basic data structures are allocated and
2202 *		initialized we update them with information about memory
2203 *		remapping requirements parsed out of the ACPI table in
2204 *		this last pass.
2205 *
2206 * After everything is set up the IOMMUs are enabled and the necessary
2207 * hotplug and suspend notifiers are registered.
2208 */
2209static int __init early_amd_iommu_init(void)
2210{
2211	struct acpi_table_header *ivrs_base;
2212	acpi_status status;
2213	int i, remap_cache_sz, ret = 0;
2214
2215	if (!amd_iommu_detected)
2216		return -ENODEV;
2217
2218	status = acpi_get_table("IVRS", 0, &ivrs_base);
2219	if (status == AE_NOT_FOUND)
2220		return -ENODEV;
2221	else if (ACPI_FAILURE(status)) {
2222		const char *err = acpi_format_exception(status);
2223		pr_err("AMD-Vi: IVRS table error: %s\n", err);
2224		return -EINVAL;
2225	}
2226
2227	/*
2228	 * Validate checksum here so we don't need to do it when
2229	 * we actually parse the table
2230	 */
2231	ret = check_ivrs_checksum(ivrs_base);
2232	if (ret)
2233		return ret;
2234
2235	amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2236	DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2237
2238	/*
2239	 * First parse ACPI tables to find the largest Bus/Dev/Func
2240	 * we need to handle. Upon this information the shared data
2241	 * structures for the IOMMUs in the system will be allocated
2242	 */
2243	ret = find_last_devid_acpi(ivrs_base);
2244	if (ret)
2245		goto out;
2246
2247	dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
2248	alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2249	rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2250
2251	/* Device table - directly used by all IOMMUs */
2252	ret = -ENOMEM;
2253	amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2254				      get_order(dev_table_size));
2255	if (amd_iommu_dev_table == NULL)
2256		goto out;
2257
2258	/*
2259	 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2260	 * IOMMU see for that device
2261	 */
2262	amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2263			get_order(alias_table_size));
2264	if (amd_iommu_alias_table == NULL)
2265		goto out;
2266
2267	/* IOMMU rlookup table - find the IOMMU for a specific device */
2268	amd_iommu_rlookup_table = (void *)__get_free_pages(
2269			GFP_KERNEL | __GFP_ZERO,
2270			get_order(rlookup_table_size));
2271	if (amd_iommu_rlookup_table == NULL)
2272		goto out;
2273
2274	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2275					    GFP_KERNEL | __GFP_ZERO,
2276					    get_order(MAX_DOMAIN_ID/8));
2277	if (amd_iommu_pd_alloc_bitmap == NULL)
2278		goto out;
2279
2280	/*
2281	 * let all alias entries point to itself
2282	 */
2283	for (i = 0; i <= amd_iommu_last_bdf; ++i)
2284		amd_iommu_alias_table[i] = i;
2285
2286	/*
2287	 * never allocate domain 0 because its used as the non-allocated and
2288	 * error value placeholder
2289	 */
2290	__set_bit(0, amd_iommu_pd_alloc_bitmap);
2291
2292	spin_lock_init(&amd_iommu_pd_lock);
2293
2294	/*
2295	 * now the data structures are allocated and basically initialized
2296	 * start the real acpi table scan
2297	 */
2298	ret = init_iommu_all(ivrs_base);
2299	if (ret)
2300		goto out;
2301
2302	if (amd_iommu_irq_remap)
2303		amd_iommu_irq_remap = check_ioapic_information();
2304
2305	if (amd_iommu_irq_remap) {
2306		/*
2307		 * Interrupt remapping enabled, create kmem_cache for the
2308		 * remapping tables.
2309		 */
2310		ret = -ENOMEM;
2311		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2312			remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2313		else
2314			remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2315		amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2316							remap_cache_sz,
2317							IRQ_TABLE_ALIGNMENT,
2318							0, NULL);
2319		if (!amd_iommu_irq_cache)
2320			goto out;
2321
2322		irq_lookup_table = (void *)__get_free_pages(
2323				GFP_KERNEL | __GFP_ZERO,
2324				get_order(rlookup_table_size));
2325		kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2326			       1, GFP_KERNEL);
2327		if (!irq_lookup_table)
2328			goto out;
2329	}
2330
2331	ret = init_memory_definitions(ivrs_base);
2332	if (ret)
2333		goto out;
2334
2335	/* init the device table */
2336	init_device_table();
2337
2338out:
2339	/* Don't leak any ACPI memory */
2340	acpi_put_table(ivrs_base);
2341	ivrs_base = NULL;
2342
2343	return ret;
2344}
2345
2346static int amd_iommu_enable_interrupts(void)
2347{
2348	struct amd_iommu *iommu;
2349	int ret = 0;
2350
2351	for_each_iommu(iommu) {
2352		ret = iommu_init_msi(iommu);
2353		if (ret)
2354			goto out;
2355	}
2356
2357out:
2358	return ret;
2359}
2360
2361static bool detect_ivrs(void)
2362{
2363	struct acpi_table_header *ivrs_base;
2364	acpi_status status;
2365
2366	status = acpi_get_table("IVRS", 0, &ivrs_base);
2367	if (status == AE_NOT_FOUND)
2368		return false;
2369	else if (ACPI_FAILURE(status)) {
2370		const char *err = acpi_format_exception(status);
2371		pr_err("AMD-Vi: IVRS table error: %s\n", err);
2372		return false;
2373	}
2374
2375	acpi_put_table(ivrs_base);
2376
2377	/* Make sure ACS will be enabled during PCI probe */
2378	pci_request_acs();
2379
2380	return true;
2381}
2382
2383/****************************************************************************
2384 *
2385 * AMD IOMMU Initialization State Machine
2386 *
2387 ****************************************************************************/
2388
2389static int __init state_next(void)
2390{
2391	int ret = 0;
2392
2393	switch (init_state) {
2394	case IOMMU_START_STATE:
2395		if (!detect_ivrs()) {
2396			init_state	= IOMMU_NOT_FOUND;
2397			ret		= -ENODEV;
2398		} else {
2399			init_state	= IOMMU_IVRS_DETECTED;
2400		}
2401		break;
2402	case IOMMU_IVRS_DETECTED:
2403		ret = early_amd_iommu_init();
2404		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2405		break;
2406	case IOMMU_ACPI_FINISHED:
2407		early_enable_iommus();
2408		register_syscore_ops(&amd_iommu_syscore_ops);
2409		x86_platform.iommu_shutdown = disable_iommus;
2410		init_state = IOMMU_ENABLED;
2411		break;
2412	case IOMMU_ENABLED:
2413		ret = amd_iommu_init_pci();
2414		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2415		enable_iommus_v2();
2416		break;
2417	case IOMMU_PCI_INIT:
2418		ret = amd_iommu_enable_interrupts();
2419		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2420		break;
2421	case IOMMU_INTERRUPTS_EN:
2422		ret = amd_iommu_init_dma_ops();
2423		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2424		break;
2425	case IOMMU_DMA_OPS:
2426		init_state = IOMMU_INITIALIZED;
2427		break;
2428	case IOMMU_INITIALIZED:
2429		/* Nothing to do */
2430		break;
2431	case IOMMU_NOT_FOUND:
2432	case IOMMU_INIT_ERROR:
2433		/* Error states => do nothing */
2434		ret = -EINVAL;
2435		break;
2436	default:
2437		/* Unknown state */
2438		BUG();
2439	}
2440
2441	return ret;
2442}
2443
2444static int __init iommu_go_to_state(enum iommu_init_state state)
2445{
2446	int ret = 0;
2447
2448	while (init_state != state) {
2449		ret = state_next();
2450		if (init_state == IOMMU_NOT_FOUND ||
2451		    init_state == IOMMU_INIT_ERROR)
2452			break;
2453	}
2454
2455	return ret;
2456}
2457
2458#ifdef CONFIG_IRQ_REMAP
2459int __init amd_iommu_prepare(void)
2460{
2461	int ret;
2462
2463	amd_iommu_irq_remap = true;
2464
2465	ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2466	if (ret)
2467		return ret;
2468	return amd_iommu_irq_remap ? 0 : -ENODEV;
2469}
2470
2471int __init amd_iommu_enable(void)
2472{
2473	int ret;
2474
2475	ret = iommu_go_to_state(IOMMU_ENABLED);
2476	if (ret)
2477		return ret;
2478
2479	irq_remapping_enabled = 1;
2480
2481	return 0;
2482}
2483
2484void amd_iommu_disable(void)
2485{
2486	amd_iommu_suspend();
2487}
2488
2489int amd_iommu_reenable(int mode)
2490{
2491	amd_iommu_resume();
2492
2493	return 0;
2494}
2495
2496int __init amd_iommu_enable_faulting(void)
2497{
2498	/* We enable MSI later when PCI is initialized */
2499	return 0;
2500}
2501#endif
2502
2503/*
2504 * This is the core init function for AMD IOMMU hardware in the system.
2505 * This function is called from the generic x86 DMA layer initialization
2506 * code.
2507 */
2508static int __init amd_iommu_init(void)
2509{
2510	int ret;
2511
2512	ret = iommu_go_to_state(IOMMU_INITIALIZED);
2513	if (ret) {
2514		free_dma_resources();
2515		if (!irq_remapping_enabled) {
2516			disable_iommus();
2517			free_on_init_error();
2518		} else {
2519			struct amd_iommu *iommu;
2520
2521			uninit_device_table_dma();
2522			for_each_iommu(iommu)
2523				iommu_flush_all_caches(iommu);
2524		}
2525	}
2526
2527	return ret;
2528}
2529
2530/****************************************************************************
2531 *
2532 * Early detect code. This code runs at IOMMU detection time in the DMA
2533 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2534 * IOMMUs
2535 *
2536 ****************************************************************************/
2537int __init amd_iommu_detect(void)
2538{
2539	int ret;
2540
2541	if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2542		return -ENODEV;
2543
2544	if (amd_iommu_disabled)
2545		return -ENODEV;
2546
2547	ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2548	if (ret)
2549		return ret;
2550
2551	amd_iommu_detected = true;
2552	iommu_detected = 1;
2553	x86_init.iommu.iommu_init = amd_iommu_init;
2554
2555	return 1;
2556}
2557
2558/****************************************************************************
2559 *
2560 * Parsing functions for the AMD IOMMU specific kernel command line
2561 * options.
2562 *
2563 ****************************************************************************/
2564
2565static int __init parse_amd_iommu_dump(char *str)
2566{
2567	amd_iommu_dump = true;
2568
2569	return 1;
2570}
2571
2572static int __init parse_amd_iommu_intr(char *str)
2573{
2574	for (; *str; ++str) {
2575		if (strncmp(str, "legacy", 6) == 0) {
2576			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2577			break;
2578		}
2579		if (strncmp(str, "vapic", 5) == 0) {
2580			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2581			break;
2582		}
2583	}
2584	return 1;
2585}
2586
2587static int __init parse_amd_iommu_options(char *str)
2588{
2589	for (; *str; ++str) {
2590		if (strncmp(str, "fullflush", 9) == 0)
2591			amd_iommu_unmap_flush = true;
2592		if (strncmp(str, "off", 3) == 0)
2593			amd_iommu_disabled = true;
2594		if (strncmp(str, "force_isolation", 15) == 0)
2595			amd_iommu_force_isolation = true;
2596	}
2597
2598	return 1;
2599}
2600
2601static int __init parse_ivrs_ioapic(char *str)
2602{
2603	unsigned int bus, dev, fn;
2604	int ret, id, i;
2605	u16 devid;
2606
2607	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2608
2609	if (ret != 4) {
2610		pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2611		return 1;
2612	}
2613
2614	if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2615		pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2616			str);
2617		return 1;
2618	}
2619
2620	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2621
2622	cmdline_maps			= true;
2623	i				= early_ioapic_map_size++;
2624	early_ioapic_map[i].id		= id;
2625	early_ioapic_map[i].devid	= devid;
2626	early_ioapic_map[i].cmd_line	= true;
2627
2628	return 1;
2629}
2630
2631static int __init parse_ivrs_hpet(char *str)
2632{
2633	unsigned int bus, dev, fn;
2634	int ret, id, i;
2635	u16 devid;
2636
2637	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2638
2639	if (ret != 4) {
2640		pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2641		return 1;
2642	}
2643
2644	if (early_hpet_map_size == EARLY_MAP_SIZE) {
2645		pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2646			str);
2647		return 1;
2648	}
2649
2650	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2651
2652	cmdline_maps			= true;
2653	i				= early_hpet_map_size++;
2654	early_hpet_map[i].id		= id;
2655	early_hpet_map[i].devid		= devid;
2656	early_hpet_map[i].cmd_line	= true;
2657
2658	return 1;
2659}
2660
2661static int __init parse_ivrs_acpihid(char *str)
2662{
2663	u32 bus, dev, fn;
2664	char *hid, *uid, *p;
2665	char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
2666	int ret, i;
2667
2668	ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
2669	if (ret != 4) {
2670		pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
2671		return 1;
2672	}
2673
2674	p = acpiid;
2675	hid = strsep(&p, ":");
2676	uid = p;
2677
2678	if (!hid || !(*hid) || !uid) {
2679		pr_err("AMD-Vi: Invalid command line: hid or uid\n");
2680		return 1;
2681	}
2682
2683	i = early_acpihid_map_size++;
2684	memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
2685	memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
2686	early_acpihid_map[i].devid =
2687		((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2688	early_acpihid_map[i].cmd_line	= true;
2689
2690	return 1;
2691}
2692
2693__setup("amd_iommu_dump",	parse_amd_iommu_dump);
2694__setup("amd_iommu=",		parse_amd_iommu_options);
2695__setup("amd_iommu_intr=",	parse_amd_iommu_intr);
2696__setup("ivrs_ioapic",		parse_ivrs_ioapic);
2697__setup("ivrs_hpet",		parse_ivrs_hpet);
2698__setup("ivrs_acpihid",		parse_ivrs_acpihid);
2699
2700IOMMU_INIT_FINISH(amd_iommu_detect,
2701		  gart_iommu_hole_init,
2702		  NULL,
2703		  NULL);
2704
2705bool amd_iommu_v2_supported(void)
2706{
2707	return amd_iommu_v2_present;
2708}
2709EXPORT_SYMBOL(amd_iommu_v2_supported);
2710
2711/****************************************************************************
2712 *
2713 * IOMMU EFR Performance Counter support functionality. This code allows
2714 * access to the IOMMU PC functionality.
2715 *
2716 ****************************************************************************/
2717
2718u8 amd_iommu_pc_get_max_banks(u16 devid)
2719{
2720	struct amd_iommu *iommu;
2721	u8 ret = 0;
2722
2723	/* locate the iommu governing the devid */
2724	iommu = amd_iommu_rlookup_table[devid];
2725	if (iommu)
2726		ret = iommu->max_banks;
2727
2728	return ret;
2729}
2730EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2731
2732bool amd_iommu_pc_supported(void)
2733{
2734	return amd_iommu_pc_present;
2735}
2736EXPORT_SYMBOL(amd_iommu_pc_supported);
2737
2738u8 amd_iommu_pc_get_max_counters(u16 devid)
2739{
2740	struct amd_iommu *iommu;
2741	u8 ret = 0;
2742
2743	/* locate the iommu governing the devid */
2744	iommu = amd_iommu_rlookup_table[devid];
2745	if (iommu)
2746		ret = iommu->max_counters;
2747
2748	return ret;
2749}
2750EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2751
2752static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
2753				    u8 bank, u8 cntr, u8 fxn,
2754				    u64 *value, bool is_write)
2755{
2756	u32 offset;
2757	u32 max_offset_lim;
2758
2759	/* Check for valid iommu and pc register indexing */
2760	if (WARN_ON((fxn > 0x28) || (fxn & 7)))
2761		return -ENODEV;
2762
2763	offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
2764
2765	/* Limit the offset to the hw defined mmio region aperture */
2766	max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) |
2767				(iommu->max_counters << 8) | 0x28);
2768	if ((offset < MMIO_CNTR_REG_OFFSET) ||
2769	    (offset > max_offset_lim))
2770		return -EINVAL;
2771
2772	if (is_write) {
2773		writel((u32)*value, iommu->mmio_base + offset);
2774		writel((*value >> 32), iommu->mmio_base + offset + 4);
2775	} else {
2776		*value = readl(iommu->mmio_base + offset + 4);
2777		*value <<= 32;
2778		*value = readl(iommu->mmio_base + offset);
2779	}
2780
2781	return 0;
2782}
2783EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
2784
2785int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2786				    u64 *value, bool is_write)
2787{
2788	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
2789
2790	/* Make sure the IOMMU PC resource is available */
2791	if (!amd_iommu_pc_present || iommu == NULL)
2792		return -ENODEV;
2793
2794	return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
2795					value, is_write);
2796}