Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/pci.h>
  21#include <linux/acpi.h>
  22#include <linux/list.h>
 
  23#include <linux/slab.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/interrupt.h>
  26#include <linux/msi.h>
  27#include <linux/amd-iommu.h>
  28#include <linux/export.h>
  29#include <linux/iommu.h>
 
 
  30#include <asm/pci-direct.h>
  31#include <asm/iommu.h>
  32#include <asm/gart.h>
  33#include <asm/x86_init.h>
  34#include <asm/iommu_table.h>
  35#include <asm/io_apic.h>
  36#include <asm/irq_remapping.h>
  37
 
  38#include "amd_iommu_proto.h"
  39#include "amd_iommu_types.h"
  40#include "irq_remapping.h"
  41
  42/*
  43 * definitions for the ACPI scanning code
  44 */
  45#define IVRS_HEADER_LENGTH 48
  46
  47#define ACPI_IVHD_TYPE                  0x10
  48#define ACPI_IVMD_TYPE_ALL              0x20
  49#define ACPI_IVMD_TYPE                  0x21
  50#define ACPI_IVMD_TYPE_RANGE            0x22
  51
  52#define IVHD_DEV_ALL                    0x01
  53#define IVHD_DEV_SELECT                 0x02
  54#define IVHD_DEV_SELECT_RANGE_START     0x03
  55#define IVHD_DEV_RANGE_END              0x04
  56#define IVHD_DEV_ALIAS                  0x42
  57#define IVHD_DEV_ALIAS_RANGE            0x43
  58#define IVHD_DEV_EXT_SELECT             0x46
  59#define IVHD_DEV_EXT_SELECT_RANGE       0x47
  60#define IVHD_DEV_SPECIAL		0x48
 
 
 
 
 
  61
  62#define IVHD_SPECIAL_IOAPIC		1
  63#define IVHD_SPECIAL_HPET		2
  64
  65#define IVHD_FLAG_HT_TUN_EN_MASK        0x01
  66#define IVHD_FLAG_PASSPW_EN_MASK        0x02
  67#define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
  68#define IVHD_FLAG_ISOC_EN_MASK          0x08
  69
  70#define IVMD_FLAG_EXCL_RANGE            0x08
  71#define IVMD_FLAG_UNITY_MAP             0x01
  72
  73#define ACPI_DEVFLAG_INITPASS           0x01
  74#define ACPI_DEVFLAG_EXTINT             0x02
  75#define ACPI_DEVFLAG_NMI                0x04
  76#define ACPI_DEVFLAG_SYSMGT1            0x10
  77#define ACPI_DEVFLAG_SYSMGT2            0x20
  78#define ACPI_DEVFLAG_LINT0              0x40
  79#define ACPI_DEVFLAG_LINT1              0x80
  80#define ACPI_DEVFLAG_ATSDIS             0x10000000
  81
 
  82/*
  83 * ACPI table definitions
  84 *
  85 * These data structures are laid over the table to parse the important values
  86 * out of it.
  87 */
  88
 
 
  89/*
  90 * structure describing one IOMMU in the ACPI table. Typically followed by one
  91 * or more ivhd_entrys.
  92 */
  93struct ivhd_header {
  94	u8 type;
  95	u8 flags;
  96	u16 length;
  97	u16 devid;
  98	u16 cap_ptr;
  99	u64 mmio_phys;
 100	u16 pci_seg;
 101	u16 info;
 102	u32 efr;
 
 
 
 
 103} __attribute__((packed));
 104
 105/*
 106 * A device entry describing which devices a specific IOMMU translates and
 107 * which requestor ids they use.
 108 */
 109struct ivhd_entry {
 110	u8 type;
 111	u16 devid;
 112	u8 flags;
 113	u32 ext;
 
 
 
 
 
 114} __attribute__((packed));
 115
 116/*
 117 * An AMD IOMMU memory definition structure. It defines things like exclusion
 118 * ranges for devices and regions that should be unity mapped.
 119 */
 120struct ivmd_header {
 121	u8 type;
 122	u8 flags;
 123	u16 length;
 124	u16 devid;
 125	u16 aux;
 126	u64 resv;
 127	u64 range_start;
 128	u64 range_length;
 129} __attribute__((packed));
 130
 131bool amd_iommu_dump;
 132bool amd_iommu_irq_remap __read_mostly;
 133
 
 
 134static bool amd_iommu_detected;
 135static bool __initdata amd_iommu_disabled;
 
 136
 137u16 amd_iommu_last_bdf;			/* largest PCI device id we have
 138					   to handle */
 139LIST_HEAD(amd_iommu_unity_map);		/* a list of required unity mappings
 140					   we find in ACPI */
 141bool amd_iommu_unmap_flush;		/* if true, flush on every unmap */
 142
 143LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
 144					   system */
 145
 146/* Array to assign indices to IOMMUs*/
 147struct amd_iommu *amd_iommus[MAX_IOMMUS];
 148int amd_iommus_present;
 
 
 149
 150/* IOMMUs have a non-present cache? */
 151bool amd_iommu_np_cache __read_mostly;
 152bool amd_iommu_iotlb_sup __read_mostly = true;
 153
 154u32 amd_iommu_max_pasid __read_mostly = ~0;
 155
 156bool amd_iommu_v2_present __read_mostly;
 157static bool amd_iommu_pc_present __read_mostly;
 158
 159bool amd_iommu_force_isolation __read_mostly;
 160
 161/*
 162 * List of protection domains - used during resume
 163 */
 164LIST_HEAD(amd_iommu_pd_list);
 165spinlock_t amd_iommu_pd_lock;
 166
 167/*
 168 * Pointer to the device table which is shared by all AMD IOMMUs
 169 * it is indexed by the PCI device id or the HT unit id and contains
 170 * information about the domain the device belongs to as well as the
 171 * page table root pointer.
 172 */
 173struct dev_table_entry *amd_iommu_dev_table;
 
 
 
 
 
 174
 175/*
 176 * The alias table is a driver specific data structure which contains the
 177 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
 178 * More than one device can share the same requestor id.
 179 */
 180u16 *amd_iommu_alias_table;
 181
 182/*
 183 * The rlookup table is used to find the IOMMU which is responsible
 184 * for a specific device. It is also indexed by the PCI device id.
 185 */
 186struct amd_iommu **amd_iommu_rlookup_table;
 
 187
 188/*
 189 * This table is used to find the irq remapping table for a given device id
 190 * quickly.
 191 */
 192struct irq_remap_table **irq_lookup_table;
 193
 194/*
 195 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
 196 * to know which ones are already in use.
 197 */
 198unsigned long *amd_iommu_pd_alloc_bitmap;
 199
 200static u32 dev_table_size;	/* size of the device table */
 201static u32 alias_table_size;	/* size of the alias table */
 202static u32 rlookup_table_size;	/* size if the rlookup table */
 203
 204enum iommu_init_state {
 205	IOMMU_START_STATE,
 206	IOMMU_IVRS_DETECTED,
 207	IOMMU_ACPI_FINISHED,
 208	IOMMU_ENABLED,
 209	IOMMU_PCI_INIT,
 210	IOMMU_INTERRUPTS_EN,
 211	IOMMU_DMA_OPS,
 212	IOMMU_INITIALIZED,
 213	IOMMU_NOT_FOUND,
 214	IOMMU_INIT_ERROR,
 
 215};
 216
 217/* Early ioapic and hpet maps from kernel command line */
 218#define EARLY_MAP_SIZE		4
 219static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
 220static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
 
 
 221static int __initdata early_ioapic_map_size;
 222static int __initdata early_hpet_map_size;
 
 
 223static bool __initdata cmdline_maps;
 224
 225static enum iommu_init_state init_state = IOMMU_START_STATE;
 226
 227static int amd_iommu_enable_interrupts(void);
 228static int __init iommu_go_to_state(enum iommu_init_state state);
 229static void init_device_table_dma(void);
 230
 231static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
 232				    u8 bank, u8 cntr, u8 fxn,
 233				    u64 *value, bool is_write);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 234
 235static inline void update_last_devid(u16 devid)
 236{
 237	if (devid > amd_iommu_last_bdf)
 238		amd_iommu_last_bdf = devid;
 239}
 240
 241static inline unsigned long tbl_size(int entry_size)
 242{
 243	unsigned shift = PAGE_SHIFT +
 244			 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
 245
 246	return 1UL << shift;
 247}
 248
 
 
 
 
 
 249/* Access to l1 and l2 indexed register spaces */
 250
 251static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
 252{
 253	u32 val;
 254
 255	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
 256	pci_read_config_dword(iommu->dev, 0xfc, &val);
 257	return val;
 258}
 259
 260static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
 261{
 262	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
 263	pci_write_config_dword(iommu->dev, 0xfc, val);
 264	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
 265}
 266
 267static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
 268{
 269	u32 val;
 270
 271	pci_write_config_dword(iommu->dev, 0xf0, address);
 272	pci_read_config_dword(iommu->dev, 0xf4, &val);
 273	return val;
 274}
 275
 276static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
 277{
 278	pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
 279	pci_write_config_dword(iommu->dev, 0xf4, val);
 280}
 281
 282/****************************************************************************
 283 *
 284 * AMD IOMMU MMIO register space handling functions
 285 *
 286 * These functions are used to program the IOMMU device registers in
 287 * MMIO space required for that driver.
 288 *
 289 ****************************************************************************/
 290
 291/*
 292 * This function set the exclusion range in the IOMMU. DMA accesses to the
 293 * exclusion range are passed through untranslated
 294 */
 295static void iommu_set_exclusion_range(struct amd_iommu *iommu)
 296{
 297	u64 start = iommu->exclusion_start & PAGE_MASK;
 298	u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
 299	u64 entry;
 300
 301	if (!iommu->exclusion_start)
 302		return;
 303
 304	entry = start | MMIO_EXCL_ENABLE_MASK;
 305	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
 306			&entry, sizeof(entry));
 307
 308	entry = limit;
 309	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
 310			&entry, sizeof(entry));
 311}
 312
 313/* Programs the physical address of the device table into the IOMMU hardware */
 314static void iommu_set_device_table(struct amd_iommu *iommu)
 315{
 316	u64 entry;
 317
 318	BUG_ON(iommu->mmio_base == NULL);
 319
 320	entry = virt_to_phys(amd_iommu_dev_table);
 321	entry |= (dev_table_size >> 12) - 1;
 322	memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
 323			&entry, sizeof(entry));
 324}
 325
 326/* Generic functions to enable/disable certain features of the IOMMU. */
 327static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
 328{
 329	u32 ctrl;
 330
 331	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 332	ctrl |= (1 << bit);
 333	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 334}
 335
 336static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
 337{
 338	u32 ctrl;
 339
 340	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 341	ctrl &= ~(1 << bit);
 342	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 343}
 344
 345static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
 346{
 347	u32 ctrl;
 348
 349	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 350	ctrl &= ~CTRL_INV_TO_MASK;
 351	ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
 352	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 353}
 354
 355/* Function to enable the hardware */
 356static void iommu_enable(struct amd_iommu *iommu)
 357{
 358	iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
 359}
 360
 361static void iommu_disable(struct amd_iommu *iommu)
 362{
 363	/* Disable command buffer */
 364	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 365
 366	/* Disable event logging and event interrupts */
 367	iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
 368	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
 369
 
 
 
 
 370	/* Disable IOMMU hardware itself */
 371	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
 372}
 373
 374/*
 375 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
 376 * the system has one.
 377 */
 378static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
 379{
 380	if (!request_mem_region(address, end, "amd_iommu")) {
 381		pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
 382			address, end);
 383		pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
 384		return NULL;
 385	}
 386
 387	return (u8 __iomem *)ioremap_nocache(address, end);
 388}
 389
 390static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
 391{
 392	if (iommu->mmio_base)
 393		iounmap(iommu->mmio_base);
 394	release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
 395}
 396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397/****************************************************************************
 398 *
 399 * The functions below belong to the first pass of AMD IOMMU ACPI table
 400 * parsing. In this pass we try to find out the highest device id this
 401 * code has to handle. Upon this information the size of the shared data
 402 * structures is determined later.
 403 *
 404 ****************************************************************************/
 405
 406/*
 407 * This function calculates the length of a given IVHD entry
 408 */
 409static inline int ivhd_entry_length(u8 *ivhd)
 410{
 411	return 0x04 << (*ivhd >> 6);
 
 
 
 
 
 
 
 
 412}
 413
 414/*
 415 * After reading the highest device id from the IOMMU PCI capability header
 416 * this function looks if there is a higher device id defined in the ACPI table
 417 */
 418static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
 419{
 420	u8 *p = (void *)h, *end = (void *)h;
 421	struct ivhd_entry *dev;
 422
 423	p += sizeof(*h);
 
 
 
 
 
 
 
 424	end += h->length;
 425
 426	while (p < end) {
 427		dev = (struct ivhd_entry *)p;
 428		switch (dev->type) {
 429		case IVHD_DEV_ALL:
 430			/* Use maximum BDF value for DEV_ALL */
 431			update_last_devid(0xffff);
 432			break;
 433		case IVHD_DEV_SELECT:
 434		case IVHD_DEV_RANGE_END:
 435		case IVHD_DEV_ALIAS:
 436		case IVHD_DEV_EXT_SELECT:
 437			/* all the above subfield types refer to device ids */
 438			update_last_devid(dev->devid);
 439			break;
 440		default:
 441			break;
 442		}
 443		p += ivhd_entry_length(p);
 444	}
 445
 446	WARN_ON(p != end);
 447
 448	return 0;
 449}
 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451/*
 452 * Iterate over all IVHD entries in the ACPI table and find the highest device
 453 * id which we need to handle. This is the first of three functions which parse
 454 * the ACPI table. So we check the checksum here.
 455 */
 456static int __init find_last_devid_acpi(struct acpi_table_header *table)
 457{
 458	int i;
 459	u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
 460	struct ivhd_header *h;
 461
 462	/*
 463	 * Validate checksum here so we don't need to do it when
 464	 * we actually parse the table
 465	 */
 466	for (i = 0; i < table->length; ++i)
 467		checksum += p[i];
 468	if (checksum != 0)
 469		/* ACPI table corrupt */
 470		return -ENODEV;
 471
 472	p += IVRS_HEADER_LENGTH;
 473
 474	end += table->length;
 475	while (p < end) {
 476		h = (struct ivhd_header *)p;
 477		switch (h->type) {
 478		case ACPI_IVHD_TYPE:
 479			find_last_devid_from_ivhd(h);
 480			break;
 481		default:
 482			break;
 483		}
 484		p += h->length;
 485	}
 486	WARN_ON(p != end);
 487
 488	return 0;
 489}
 490
 491/****************************************************************************
 492 *
 493 * The following functions belong to the code path which parses the ACPI table
 494 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
 495 * data structures, initialize the device/alias/rlookup table and also
 496 * basically initialize the hardware.
 497 *
 498 ****************************************************************************/
 499
 500/*
 501 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
 502 * write commands to that buffer later and the IOMMU will execute them
 503 * asynchronously
 504 */
 505static int __init alloc_command_buffer(struct amd_iommu *iommu)
 506{
 507	iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 508						  get_order(CMD_BUFFER_SIZE));
 509
 510	return iommu->cmd_buf ? 0 : -ENOMEM;
 511}
 512
 513/*
 514 * This function resets the command buffer if the IOMMU stopped fetching
 515 * commands from it.
 516 */
 517void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
 518{
 519	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 520
 521	writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
 522	writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 
 
 523
 524	iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
 525}
 526
 527/*
 528 * This function writes the command buffer address to the hardware and
 529 * enables it.
 530 */
 531static void iommu_enable_command_buffer(struct amd_iommu *iommu)
 532{
 533	u64 entry;
 534
 535	BUG_ON(iommu->cmd_buf == NULL);
 536
 537	entry = (u64)virt_to_phys(iommu->cmd_buf);
 538	entry |= MMIO_CMD_SIZE_512;
 539
 540	memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
 541		    &entry, sizeof(entry));
 542
 543	amd_iommu_reset_cmd_buffer(iommu);
 544}
 545
 
 
 
 
 
 
 
 
 546static void __init free_command_buffer(struct amd_iommu *iommu)
 547{
 548	free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
 549}
 550
 551/* allocates the memory where the IOMMU will log its events to */
 552static int __init alloc_event_buffer(struct amd_iommu *iommu)
 553{
 554	iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 555						  get_order(EVT_BUFFER_SIZE));
 556
 557	return iommu->evt_buf ? 0 : -ENOMEM;
 558}
 559
 560static void iommu_enable_event_buffer(struct amd_iommu *iommu)
 561{
 562	u64 entry;
 563
 564	BUG_ON(iommu->evt_buf == NULL);
 565
 566	entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
 567
 568	memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
 569		    &entry, sizeof(entry));
 570
 571	/* set head and tail to zero manually */
 572	writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 573	writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 574
 575	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
 576}
 577
 
 
 
 
 
 
 
 
 578static void __init free_event_buffer(struct amd_iommu *iommu)
 579{
 580	free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
 581}
 582
 583/* allocates the memory where the IOMMU will log its events to */
 584static int __init alloc_ppr_log(struct amd_iommu *iommu)
 585{
 586	iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 587						  get_order(PPR_LOG_SIZE));
 588
 589	return iommu->ppr_log ? 0 : -ENOMEM;
 590}
 591
 592static void iommu_enable_ppr_log(struct amd_iommu *iommu)
 593{
 594	u64 entry;
 595
 596	if (iommu->ppr_log == NULL)
 597		return;
 598
 599	entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
 600
 601	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
 602		    &entry, sizeof(entry));
 603
 604	/* set head and tail to zero manually */
 605	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 606	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 607
 608	iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
 609	iommu_feature_enable(iommu, CONTROL_PPR_EN);
 610}
 611
 612static void __init free_ppr_log(struct amd_iommu *iommu)
 613{
 614	if (iommu->ppr_log == NULL)
 615		return;
 616
 617	free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
 618}
 619
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 620static void iommu_enable_gt(struct amd_iommu *iommu)
 621{
 622	if (!iommu_feature(iommu, FEATURE_GT))
 623		return;
 624
 625	iommu_feature_enable(iommu, CONTROL_GT_EN);
 626}
 627
 628/* sets a specific bit in the device table entry. */
 629static void set_dev_entry_bit(u16 devid, u8 bit)
 630{
 631	int i = (bit >> 6) & 0x03;
 632	int _bit = bit & 0x3f;
 633
 634	amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
 635}
 636
 637static int get_dev_entry_bit(u16 devid, u8 bit)
 638{
 639	int i = (bit >> 6) & 0x03;
 640	int _bit = bit & 0x3f;
 641
 642	return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
 643}
 644
 645
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646void amd_iommu_apply_erratum_63(u16 devid)
 647{
 648	int sysmgt;
 649
 650	sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
 651		 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
 652
 653	if (sysmgt == 0x01)
 654		set_dev_entry_bit(devid, DEV_ENTRY_IW);
 655}
 656
 657/* Writes the specific IOMMU for a device into the rlookup table */
 658static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
 659{
 660	amd_iommu_rlookup_table[devid] = iommu;
 661}
 662
 663/*
 664 * This function takes the device specific flags read from the ACPI
 665 * table and sets up the device table entry with that information
 666 */
 667static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
 668					   u16 devid, u32 flags, u32 ext_flags)
 669{
 670	if (flags & ACPI_DEVFLAG_INITPASS)
 671		set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
 672	if (flags & ACPI_DEVFLAG_EXTINT)
 673		set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
 674	if (flags & ACPI_DEVFLAG_NMI)
 675		set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
 676	if (flags & ACPI_DEVFLAG_SYSMGT1)
 677		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
 678	if (flags & ACPI_DEVFLAG_SYSMGT2)
 679		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
 680	if (flags & ACPI_DEVFLAG_LINT0)
 681		set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
 682	if (flags & ACPI_DEVFLAG_LINT1)
 683		set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
 684
 685	amd_iommu_apply_erratum_63(devid);
 686
 687	set_iommu_for_device(iommu, devid);
 688}
 689
 690static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
 691{
 692	struct devid_map *entry;
 693	struct list_head *list;
 694
 695	if (type == IVHD_SPECIAL_IOAPIC)
 696		list = &ioapic_map;
 697	else if (type == IVHD_SPECIAL_HPET)
 698		list = &hpet_map;
 699	else
 700		return -EINVAL;
 701
 702	list_for_each_entry(entry, list, list) {
 703		if (!(entry->id == id && entry->cmd_line))
 704			continue;
 705
 706		pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
 707			type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
 708
 709		*devid = entry->devid;
 710
 711		return 0;
 712	}
 713
 714	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 715	if (!entry)
 716		return -ENOMEM;
 717
 718	entry->id	= id;
 719	entry->devid	= *devid;
 720	entry->cmd_line	= cmd_line;
 721
 722	list_add_tail(&entry->list, list);
 723
 724	return 0;
 725}
 726
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 727static int __init add_early_maps(void)
 728{
 729	int i, ret;
 730
 731	for (i = 0; i < early_ioapic_map_size; ++i) {
 732		ret = add_special_device(IVHD_SPECIAL_IOAPIC,
 733					 early_ioapic_map[i].id,
 734					 &early_ioapic_map[i].devid,
 735					 early_ioapic_map[i].cmd_line);
 736		if (ret)
 737			return ret;
 738	}
 739
 740	for (i = 0; i < early_hpet_map_size; ++i) {
 741		ret = add_special_device(IVHD_SPECIAL_HPET,
 742					 early_hpet_map[i].id,
 743					 &early_hpet_map[i].devid,
 744					 early_hpet_map[i].cmd_line);
 745		if (ret)
 746			return ret;
 747	}
 748
 
 
 
 
 
 
 
 
 
 749	return 0;
 750}
 751
 752/*
 753 * Reads the device exclusion range from ACPI and initializes the IOMMU with
 754 * it
 755 */
 756static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
 757{
 758	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
 759
 760	if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
 761		return;
 762
 763	if (iommu) {
 764		/*
 765		 * We only can configure exclusion ranges per IOMMU, not
 766		 * per device. But we can enable the exclusion range per
 767		 * device. This is done here
 768		 */
 769		set_dev_entry_bit(devid, DEV_ENTRY_EX);
 770		iommu->exclusion_start = m->range_start;
 771		iommu->exclusion_length = m->range_length;
 772	}
 773}
 774
 775/*
 776 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
 777 * initializes the hardware and our data structures with it.
 778 */
 779static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
 780					struct ivhd_header *h)
 781{
 782	u8 *p = (u8 *)h;
 783	u8 *end = p, flags = 0;
 784	u16 devid = 0, devid_start = 0, devid_to = 0;
 785	u32 dev_i, ext_flags = 0;
 786	bool alias = false;
 787	struct ivhd_entry *e;
 
 788	int ret;
 789
 790
 791	ret = add_early_maps();
 792	if (ret)
 793		return ret;
 794
 795	/*
 796	 * First save the recommended feature enable bits from ACPI
 797	 */
 798	iommu->acpi_flags = h->flags;
 799
 800	/*
 801	 * Done. Now parse the device entries
 802	 */
 803	p += sizeof(struct ivhd_header);
 
 
 
 
 
 
 
 804	end += h->length;
 805
 806
 807	while (p < end) {
 808		e = (struct ivhd_entry *)p;
 809		switch (e->type) {
 810		case IVHD_DEV_ALL:
 811
 812			DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
 813
 814			for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
 815				set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
 816			break;
 817		case IVHD_DEV_SELECT:
 818
 819			DUMP_printk("  DEV_SELECT\t\t\t devid: %02x:%02x.%x "
 820				    "flags: %02x\n",
 821				    PCI_BUS_NUM(e->devid),
 822				    PCI_SLOT(e->devid),
 823				    PCI_FUNC(e->devid),
 824				    e->flags);
 825
 826			devid = e->devid;
 827			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
 828			break;
 829		case IVHD_DEV_SELECT_RANGE_START:
 830
 831			DUMP_printk("  DEV_SELECT_RANGE_START\t "
 832				    "devid: %02x:%02x.%x flags: %02x\n",
 833				    PCI_BUS_NUM(e->devid),
 834				    PCI_SLOT(e->devid),
 835				    PCI_FUNC(e->devid),
 836				    e->flags);
 837
 838			devid_start = e->devid;
 839			flags = e->flags;
 840			ext_flags = 0;
 841			alias = false;
 842			break;
 843		case IVHD_DEV_ALIAS:
 844
 845			DUMP_printk("  DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
 846				    "flags: %02x devid_to: %02x:%02x.%x\n",
 847				    PCI_BUS_NUM(e->devid),
 848				    PCI_SLOT(e->devid),
 849				    PCI_FUNC(e->devid),
 850				    e->flags,
 851				    PCI_BUS_NUM(e->ext >> 8),
 852				    PCI_SLOT(e->ext >> 8),
 853				    PCI_FUNC(e->ext >> 8));
 854
 855			devid = e->devid;
 856			devid_to = e->ext >> 8;
 857			set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
 858			set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
 859			amd_iommu_alias_table[devid] = devid_to;
 860			break;
 861		case IVHD_DEV_ALIAS_RANGE:
 862
 863			DUMP_printk("  DEV_ALIAS_RANGE\t\t "
 864				    "devid: %02x:%02x.%x flags: %02x "
 865				    "devid_to: %02x:%02x.%x\n",
 866				    PCI_BUS_NUM(e->devid),
 867				    PCI_SLOT(e->devid),
 868				    PCI_FUNC(e->devid),
 869				    e->flags,
 870				    PCI_BUS_NUM(e->ext >> 8),
 871				    PCI_SLOT(e->ext >> 8),
 872				    PCI_FUNC(e->ext >> 8));
 873
 874			devid_start = e->devid;
 875			flags = e->flags;
 876			devid_to = e->ext >> 8;
 877			ext_flags = 0;
 878			alias = true;
 879			break;
 880		case IVHD_DEV_EXT_SELECT:
 881
 882			DUMP_printk("  DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
 883				    "flags: %02x ext: %08x\n",
 884				    PCI_BUS_NUM(e->devid),
 885				    PCI_SLOT(e->devid),
 886				    PCI_FUNC(e->devid),
 887				    e->flags, e->ext);
 888
 889			devid = e->devid;
 890			set_dev_entry_from_acpi(iommu, devid, e->flags,
 891						e->ext);
 892			break;
 893		case IVHD_DEV_EXT_SELECT_RANGE:
 894
 895			DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
 896				    "%02x:%02x.%x flags: %02x ext: %08x\n",
 897				    PCI_BUS_NUM(e->devid),
 898				    PCI_SLOT(e->devid),
 899				    PCI_FUNC(e->devid),
 900				    e->flags, e->ext);
 901
 902			devid_start = e->devid;
 903			flags = e->flags;
 904			ext_flags = e->ext;
 905			alias = false;
 906			break;
 907		case IVHD_DEV_RANGE_END:
 908
 909			DUMP_printk("  DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
 910				    PCI_BUS_NUM(e->devid),
 911				    PCI_SLOT(e->devid),
 912				    PCI_FUNC(e->devid));
 913
 914			devid = e->devid;
 915			for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
 916				if (alias) {
 917					amd_iommu_alias_table[dev_i] = devid_to;
 918					set_dev_entry_from_acpi(iommu,
 919						devid_to, flags, ext_flags);
 920				}
 921				set_dev_entry_from_acpi(iommu, dev_i,
 922							flags, ext_flags);
 923			}
 924			break;
 925		case IVHD_DEV_SPECIAL: {
 926			u8 handle, type;
 927			const char *var;
 928			u16 devid;
 929			int ret;
 930
 931			handle = e->ext & 0xff;
 932			devid  = (e->ext >>  8) & 0xffff;
 933			type   = (e->ext >> 24) & 0xff;
 934
 935			if (type == IVHD_SPECIAL_IOAPIC)
 936				var = "IOAPIC";
 937			else if (type == IVHD_SPECIAL_HPET)
 938				var = "HPET";
 939			else
 940				var = "UNKNOWN";
 941
 942			DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
 943				    var, (int)handle,
 944				    PCI_BUS_NUM(devid),
 945				    PCI_SLOT(devid),
 946				    PCI_FUNC(devid));
 947
 948			ret = add_special_device(type, handle, &devid, false);
 949			if (ret)
 950				return ret;
 951
 952			/*
 953			 * add_special_device might update the devid in case a
 954			 * command-line override is present. So call
 955			 * set_dev_entry_from_acpi after add_special_device.
 956			 */
 957			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
 958
 959			break;
 960		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 961		default:
 962			break;
 963		}
 964
 965		p += ivhd_entry_length(p);
 966	}
 967
 968	return 0;
 969}
 970
 971static void __init free_iommu_one(struct amd_iommu *iommu)
 972{
 973	free_command_buffer(iommu);
 974	free_event_buffer(iommu);
 975	free_ppr_log(iommu);
 
 976	iommu_unmap_mmio_space(iommu);
 977}
 978
 979static void __init free_iommu_all(void)
 980{
 981	struct amd_iommu *iommu, *next;
 982
 983	for_each_iommu_safe(iommu, next) {
 984		list_del(&iommu->list);
 985		free_iommu_one(iommu);
 986		kfree(iommu);
 987	}
 988}
 989
 990/*
 991 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
 992 * Workaround:
 993 *     BIOS should disable L2B micellaneous clock gating by setting
 994 *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
 995 */
 996static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
 997{
 998	u32 value;
 999
1000	if ((boot_cpu_data.x86 != 0x15) ||
1001	    (boot_cpu_data.x86_model < 0x10) ||
1002	    (boot_cpu_data.x86_model > 0x1f))
1003		return;
1004
1005	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1006	pci_read_config_dword(iommu->dev, 0xf4, &value);
1007
1008	if (value & BIT(2))
1009		return;
1010
1011	/* Select NB indirect register 0x90 and enable writing */
1012	pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1013
1014	pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1015	pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1016		dev_name(&iommu->dev->dev));
1017
1018	/* Clear the enable writing bit */
1019	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1020}
1021
1022/*
1023 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1024 * Workaround:
1025 *     BIOS should enable ATS write permission check by setting
1026 *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1027 */
1028static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1029{
1030	u32 value;
1031
1032	if ((boot_cpu_data.x86 != 0x15) ||
1033	    (boot_cpu_data.x86_model < 0x30) ||
1034	    (boot_cpu_data.x86_model > 0x3f))
1035		return;
1036
1037	/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1038	value = iommu_read_l2(iommu, 0x47);
1039
1040	if (value & BIT(0))
1041		return;
1042
1043	/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1044	iommu_write_l2(iommu, 0x47, value | BIT(0));
1045
1046	pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1047		dev_name(&iommu->dev->dev));
1048}
1049
1050/*
1051 * This function clues the initialization function for one IOMMU
1052 * together and also allocates the command buffer and programs the
1053 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1054 */
1055static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1056{
1057	int ret;
1058
1059	spin_lock_init(&iommu->lock);
1060
1061	/* Add IOMMU to internal data structures */
1062	list_add_tail(&iommu->list, &amd_iommu_list);
1063	iommu->index             = amd_iommus_present++;
1064
1065	if (unlikely(iommu->index >= MAX_IOMMUS)) {
1066		WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1067		return -ENOSYS;
1068	}
1069
1070	/* Index is fine - add IOMMU to the array */
1071	amd_iommus[iommu->index] = iommu;
1072
1073	/*
1074	 * Copy data from ACPI table entry to the iommu struct
1075	 */
1076	iommu->devid   = h->devid;
1077	iommu->cap_ptr = h->cap_ptr;
1078	iommu->pci_seg = h->pci_seg;
1079	iommu->mmio_phys = h->mmio_phys;
1080
1081	/* Check if IVHD EFR contains proper max banks/counters */
1082	if ((h->efr != 0) &&
1083	    ((h->efr & (0xF << 13)) != 0) &&
1084	    ((h->efr & (0x3F << 17)) != 0)) {
1085		iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1086	} else {
1087		iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1088	}
1089
1090	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1091						iommu->mmio_phys_end);
1092	if (!iommu->mmio_base)
1093		return -ENOMEM;
1094
1095	if (alloc_command_buffer(iommu))
1096		return -ENOMEM;
1097
1098	if (alloc_event_buffer(iommu))
1099		return -ENOMEM;
1100
1101	iommu->int_enabled = false;
1102
 
 
 
 
 
 
 
 
 
 
1103	ret = init_iommu_from_acpi(iommu, h);
1104	if (ret)
1105		return ret;
1106
1107	ret = amd_iommu_create_irq_domain(iommu);
1108	if (ret)
1109		return ret;
1110
1111	/*
1112	 * Make sure IOMMU is not considered to translate itself. The IVRS
1113	 * table tells us so, but this is a lie!
1114	 */
1115	amd_iommu_rlookup_table[iommu->devid] = NULL;
1116
1117	return 0;
1118}
1119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120/*
1121 * Iterates over all IOMMU entries in the ACPI table, allocates the
1122 * IOMMU structure and initializes it with init_iommu_one()
1123 */
1124static int __init init_iommu_all(struct acpi_table_header *table)
1125{
1126	u8 *p = (u8 *)table, *end = (u8 *)table;
1127	struct ivhd_header *h;
1128	struct amd_iommu *iommu;
1129	int ret;
1130
1131	end += table->length;
1132	p += IVRS_HEADER_LENGTH;
1133
1134	while (p < end) {
1135		h = (struct ivhd_header *)p;
1136		switch (*p) {
1137		case ACPI_IVHD_TYPE:
1138
1139			DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1140				    "seg: %d flags: %01x info %04x\n",
1141				    PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1142				    PCI_FUNC(h->devid), h->cap_ptr,
1143				    h->pci_seg, h->flags, h->info);
1144			DUMP_printk("       mmio-addr: %016llx\n",
1145				    h->mmio_phys);
1146
1147			iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1148			if (iommu == NULL)
1149				return -ENOMEM;
1150
1151			ret = init_iommu_one(iommu, h);
1152			if (ret)
1153				return ret;
1154			break;
1155		default:
1156			break;
1157		}
1158		p += h->length;
1159
1160	}
1161	WARN_ON(p != end);
1162
1163	return 0;
1164}
1165
 
 
1166
1167static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1168{
1169	u64 val = 0xabcd, val2 = 0;
1170
1171	if (!iommu_feature(iommu, FEATURE_PC))
1172		return;
1173
1174	amd_iommu_pc_present = true;
1175
1176	/* Check if the performance counters can be written to */
1177	if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
1178	    (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
1179	    (val != val2)) {
1180		pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1181		amd_iommu_pc_present = false;
1182		return;
1183	}
1184
1185	pr_info("AMD-Vi: IOMMU performance counters supported\n");
1186
1187	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1188	iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1189	iommu->max_counters = (u8) ((val >> 7) & 0xf);
1190}
1191
1192static ssize_t amd_iommu_show_cap(struct device *dev,
1193				  struct device_attribute *attr,
1194				  char *buf)
1195{
1196	struct amd_iommu *iommu = dev_get_drvdata(dev);
1197	return sprintf(buf, "%x\n", iommu->cap);
1198}
1199static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1200
1201static ssize_t amd_iommu_show_features(struct device *dev,
1202				       struct device_attribute *attr,
1203				       char *buf)
1204{
1205	struct amd_iommu *iommu = dev_get_drvdata(dev);
1206	return sprintf(buf, "%llx\n", iommu->features);
1207}
1208static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1209
1210static struct attribute *amd_iommu_attrs[] = {
1211	&dev_attr_cap.attr,
1212	&dev_attr_features.attr,
1213	NULL,
1214};
1215
1216static struct attribute_group amd_iommu_group = {
1217	.name = "amd-iommu",
1218	.attrs = amd_iommu_attrs,
1219};
1220
1221static const struct attribute_group *amd_iommu_groups[] = {
1222	&amd_iommu_group,
1223	NULL,
1224};
1225
1226static int iommu_init_pci(struct amd_iommu *iommu)
1227{
1228	int cap_ptr = iommu->cap_ptr;
1229	u32 range, misc, low, high;
 
1230
1231	iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
1232					  iommu->devid & 0xff);
1233	if (!iommu->dev)
1234		return -ENODEV;
1235
1236	/* Prevent binding other PCI device drivers to IOMMU devices */
1237	iommu->dev->match_driver = false;
1238
1239	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1240			      &iommu->cap);
1241	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1242			      &range);
1243	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1244			      &misc);
1245
1246	if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1247		amd_iommu_iotlb_sup = false;
1248
1249	/* read extended feature bits */
1250	low  = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1251	high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1252
1253	iommu->features = ((u64)high << 32) | low;
1254
1255	if (iommu_feature(iommu, FEATURE_GT)) {
1256		int glxval;
1257		u32 max_pasid;
1258		u64 pasmax;
1259
1260		pasmax = iommu->features & FEATURE_PASID_MASK;
1261		pasmax >>= FEATURE_PASID_SHIFT;
1262		max_pasid  = (1 << (pasmax + 1)) - 1;
1263
1264		amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1265
1266		BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1267
1268		glxval   = iommu->features & FEATURE_GLXVAL_MASK;
1269		glxval >>= FEATURE_GLXVAL_SHIFT;
1270
1271		if (amd_iommu_max_glx_val == -1)
1272			amd_iommu_max_glx_val = glxval;
1273		else
1274			amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1275	}
1276
1277	if (iommu_feature(iommu, FEATURE_GT) &&
1278	    iommu_feature(iommu, FEATURE_PPR)) {
1279		iommu->is_iommu_v2   = true;
1280		amd_iommu_v2_present = true;
1281	}
1282
1283	if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1284		return -ENOMEM;
1285
 
 
 
 
1286	if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1287		amd_iommu_np_cache = true;
1288
1289	init_iommu_perf_ctr(iommu);
1290
1291	if (is_rd890_iommu(iommu->dev)) {
1292		int i, j;
1293
1294		iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1295				PCI_DEVFN(0, 0));
 
1296
1297		/*
1298		 * Some rd890 systems may not be fully reconfigured by the
1299		 * BIOS, so it's necessary for us to store this information so
1300		 * it can be reprogrammed on resume
1301		 */
1302		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1303				&iommu->stored_addr_lo);
1304		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1305				&iommu->stored_addr_hi);
1306
1307		/* Low bit locks writes to configuration space */
1308		iommu->stored_addr_lo &= ~1;
1309
1310		for (i = 0; i < 6; i++)
1311			for (j = 0; j < 0x12; j++)
1312				iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1313
1314		for (i = 0; i < 0x83; i++)
1315			iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1316	}
1317
1318	amd_iommu_erratum_746_workaround(iommu);
1319	amd_iommu_ats_write_check_workaround(iommu);
1320
1321	iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
1322					       amd_iommu_groups, "ivhd%d",
1323					       iommu->index);
 
1324
1325	return pci_enable_device(iommu->dev);
1326}
1327
1328static void print_iommu_info(void)
1329{
1330	static const char * const feat_str[] = {
1331		"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1332		"IA", "GA", "HE", "PC"
1333	};
1334	struct amd_iommu *iommu;
1335
1336	for_each_iommu(iommu) {
1337		int i;
1338
1339		pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1340			dev_name(&iommu->dev->dev), iommu->cap_ptr);
1341
1342		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1343			pr_info("AMD-Vi:  Extended features: ");
 
1344			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1345				if (iommu_feature(iommu, (1ULL << i)))
1346					pr_cont(" %s", feat_str[i]);
1347			}
 
 
 
 
1348			pr_cont("\n");
1349		}
1350	}
1351	if (irq_remapping_enabled)
1352		pr_info("AMD-Vi: Interrupt remapping enabled\n");
 
 
 
1353}
1354
1355static int __init amd_iommu_init_pci(void)
1356{
1357	struct amd_iommu *iommu;
1358	int ret = 0;
1359
1360	for_each_iommu(iommu) {
1361		ret = iommu_init_pci(iommu);
1362		if (ret)
1363			break;
1364	}
1365
 
 
 
 
 
 
 
 
 
 
 
 
1366	init_device_table_dma();
1367
1368	for_each_iommu(iommu)
1369		iommu_flush_all_caches(iommu);
1370
1371	ret = amd_iommu_init_api();
1372
1373	if (!ret)
1374		print_iommu_info();
1375
1376	return ret;
1377}
1378
1379/****************************************************************************
1380 *
1381 * The following functions initialize the MSI interrupts for all IOMMUs
1382 * in the system. It's a bit challenging because there could be multiple
1383 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1384 * pci_dev.
1385 *
1386 ****************************************************************************/
1387
1388static int iommu_setup_msi(struct amd_iommu *iommu)
1389{
1390	int r;
1391
1392	r = pci_enable_msi(iommu->dev);
1393	if (r)
1394		return r;
1395
1396	r = request_threaded_irq(iommu->dev->irq,
1397				 amd_iommu_int_handler,
1398				 amd_iommu_int_thread,
1399				 0, "AMD-Vi",
1400				 iommu);
1401
1402	if (r) {
1403		pci_disable_msi(iommu->dev);
1404		return r;
1405	}
1406
1407	iommu->int_enabled = true;
1408
1409	return 0;
1410}
1411
1412static int iommu_init_msi(struct amd_iommu *iommu)
1413{
1414	int ret;
1415
1416	if (iommu->int_enabled)
1417		goto enable_faults;
1418
1419	if (iommu->dev->msi_cap)
1420		ret = iommu_setup_msi(iommu);
1421	else
1422		ret = -ENODEV;
1423
1424	if (ret)
1425		return ret;
1426
1427enable_faults:
1428	iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1429
1430	if (iommu->ppr_log != NULL)
1431		iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1432
 
 
1433	return 0;
1434}
1435
1436/****************************************************************************
1437 *
1438 * The next functions belong to the third pass of parsing the ACPI
1439 * table. In this last pass the memory mapping requirements are
1440 * gathered (like exclusion and unity mapping ranges).
1441 *
1442 ****************************************************************************/
1443
1444static void __init free_unity_maps(void)
1445{
1446	struct unity_map_entry *entry, *next;
1447
1448	list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1449		list_del(&entry->list);
1450		kfree(entry);
1451	}
1452}
1453
1454/* called when we find an exclusion range definition in ACPI */
1455static int __init init_exclusion_range(struct ivmd_header *m)
1456{
1457	int i;
1458
1459	switch (m->type) {
1460	case ACPI_IVMD_TYPE:
1461		set_device_exclusion_range(m->devid, m);
1462		break;
1463	case ACPI_IVMD_TYPE_ALL:
1464		for (i = 0; i <= amd_iommu_last_bdf; ++i)
1465			set_device_exclusion_range(i, m);
1466		break;
1467	case ACPI_IVMD_TYPE_RANGE:
1468		for (i = m->devid; i <= m->aux; ++i)
1469			set_device_exclusion_range(i, m);
1470		break;
1471	default:
1472		break;
1473	}
1474
1475	return 0;
1476}
1477
1478/* called for unity map ACPI definition */
1479static int __init init_unity_map_range(struct ivmd_header *m)
1480{
1481	struct unity_map_entry *e = NULL;
1482	char *s;
1483
1484	e = kzalloc(sizeof(*e), GFP_KERNEL);
1485	if (e == NULL)
1486		return -ENOMEM;
1487
1488	switch (m->type) {
1489	default:
1490		kfree(e);
1491		return 0;
1492	case ACPI_IVMD_TYPE:
1493		s = "IVMD_TYPEi\t\t\t";
1494		e->devid_start = e->devid_end = m->devid;
1495		break;
1496	case ACPI_IVMD_TYPE_ALL:
1497		s = "IVMD_TYPE_ALL\t\t";
1498		e->devid_start = 0;
1499		e->devid_end = amd_iommu_last_bdf;
1500		break;
1501	case ACPI_IVMD_TYPE_RANGE:
1502		s = "IVMD_TYPE_RANGE\t\t";
1503		e->devid_start = m->devid;
1504		e->devid_end = m->aux;
1505		break;
1506	}
1507	e->address_start = PAGE_ALIGN(m->range_start);
1508	e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1509	e->prot = m->flags >> 1;
1510
1511	DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1512		    " range_start: %016llx range_end: %016llx flags: %x\n", s,
1513		    PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
1514		    PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
1515		    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1516		    e->address_start, e->address_end, m->flags);
1517
1518	list_add_tail(&e->list, &amd_iommu_unity_map);
1519
1520	return 0;
1521}
1522
1523/* iterates over all memory definitions we find in the ACPI table */
1524static int __init init_memory_definitions(struct acpi_table_header *table)
1525{
1526	u8 *p = (u8 *)table, *end = (u8 *)table;
1527	struct ivmd_header *m;
1528
1529	end += table->length;
1530	p += IVRS_HEADER_LENGTH;
1531
1532	while (p < end) {
1533		m = (struct ivmd_header *)p;
1534		if (m->flags & IVMD_FLAG_EXCL_RANGE)
1535			init_exclusion_range(m);
1536		else if (m->flags & IVMD_FLAG_UNITY_MAP)
1537			init_unity_map_range(m);
1538
1539		p += m->length;
1540	}
1541
1542	return 0;
1543}
1544
1545/*
1546 * Init the device table to not allow DMA access for devices and
1547 * suppress all page faults
1548 */
1549static void init_device_table_dma(void)
1550{
1551	u32 devid;
1552
1553	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1554		set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1555		set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1556	}
1557}
1558
1559static void __init uninit_device_table_dma(void)
1560{
1561	u32 devid;
1562
1563	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1564		amd_iommu_dev_table[devid].data[0] = 0ULL;
1565		amd_iommu_dev_table[devid].data[1] = 0ULL;
1566	}
1567}
1568
1569static void init_device_table(void)
1570{
1571	u32 devid;
1572
1573	if (!amd_iommu_irq_remap)
1574		return;
1575
1576	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1577		set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
1578}
1579
1580static void iommu_init_flags(struct amd_iommu *iommu)
1581{
1582	iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1583		iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1584		iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1585
1586	iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1587		iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1588		iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1589
1590	iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1591		iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1592		iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1593
1594	iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1595		iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1596		iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1597
1598	/*
1599	 * make IOMMU memory accesses cache coherent
1600	 */
1601	iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1602
1603	/* Set IOTLB invalidation timeout to 1s */
1604	iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1605}
1606
1607static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1608{
1609	int i, j;
1610	u32 ioc_feature_control;
1611	struct pci_dev *pdev = iommu->root_pdev;
1612
1613	/* RD890 BIOSes may not have completely reconfigured the iommu */
1614	if (!is_rd890_iommu(iommu->dev) || !pdev)
1615		return;
1616
1617	/*
1618	 * First, we need to ensure that the iommu is enabled. This is
1619	 * controlled by a register in the northbridge
1620	 */
1621
1622	/* Select Northbridge indirect register 0x75 and enable writing */
1623	pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1624	pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1625
1626	/* Enable the iommu */
1627	if (!(ioc_feature_control & 0x1))
1628		pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1629
1630	/* Restore the iommu BAR */
1631	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1632			       iommu->stored_addr_lo);
1633	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1634			       iommu->stored_addr_hi);
1635
1636	/* Restore the l1 indirect regs for each of the 6 l1s */
1637	for (i = 0; i < 6; i++)
1638		for (j = 0; j < 0x12; j++)
1639			iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1640
1641	/* Restore the l2 indirect regs */
1642	for (i = 0; i < 0x83; i++)
1643		iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1644
1645	/* Lock PCI setup registers */
1646	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1647			       iommu->stored_addr_lo | 1);
1648}
1649
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1650/*
1651 * This function finally enables all IOMMUs found in the system after
1652 * they have been initialized
 
 
 
 
1653 */
1654static void early_enable_iommus(void)
1655{
1656	struct amd_iommu *iommu;
1657
1658	for_each_iommu(iommu) {
1659		iommu_disable(iommu);
1660		iommu_init_flags(iommu);
1661		iommu_set_device_table(iommu);
1662		iommu_enable_command_buffer(iommu);
1663		iommu_enable_event_buffer(iommu);
1664		iommu_set_exclusion_range(iommu);
1665		iommu_enable(iommu);
1666		iommu_flush_all_caches(iommu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1667	}
 
 
 
 
 
1668}
1669
1670static void enable_iommus_v2(void)
1671{
1672	struct amd_iommu *iommu;
1673
1674	for_each_iommu(iommu) {
1675		iommu_enable_ppr_log(iommu);
1676		iommu_enable_gt(iommu);
1677	}
1678}
1679
1680static void enable_iommus(void)
1681{
1682	early_enable_iommus();
1683
1684	enable_iommus_v2();
1685}
1686
1687static void disable_iommus(void)
1688{
1689	struct amd_iommu *iommu;
1690
1691	for_each_iommu(iommu)
1692		iommu_disable(iommu);
 
 
 
 
 
1693}
1694
1695/*
1696 * Suspend/Resume support
1697 * disable suspend until real resume implemented
1698 */
1699
1700static void amd_iommu_resume(void)
1701{
1702	struct amd_iommu *iommu;
1703
1704	for_each_iommu(iommu)
1705		iommu_apply_resume_quirks(iommu);
1706
1707	/* re-load the hardware */
1708	enable_iommus();
1709
1710	amd_iommu_enable_interrupts();
1711}
1712
1713static int amd_iommu_suspend(void)
1714{
1715	/* disable IOMMUs to go out of the way for BIOS */
1716	disable_iommus();
1717
1718	return 0;
1719}
1720
1721static struct syscore_ops amd_iommu_syscore_ops = {
1722	.suspend = amd_iommu_suspend,
1723	.resume = amd_iommu_resume,
1724};
1725
1726static void __init free_on_init_error(void)
1727{
 
1728	free_pages((unsigned long)irq_lookup_table,
1729		   get_order(rlookup_table_size));
 
1730
1731	kmem_cache_destroy(amd_iommu_irq_cache);
1732	amd_iommu_irq_cache = NULL;
1733
1734	free_pages((unsigned long)amd_iommu_rlookup_table,
1735		   get_order(rlookup_table_size));
 
1736
1737	free_pages((unsigned long)amd_iommu_alias_table,
1738		   get_order(alias_table_size));
 
1739
1740	free_pages((unsigned long)amd_iommu_dev_table,
1741		   get_order(dev_table_size));
 
1742
1743	free_iommu_all();
1744
1745#ifdef CONFIG_GART_IOMMU
1746	/*
1747	 * We failed to initialize the AMD IOMMU - try fallback to GART
1748	 * if possible.
1749	 */
1750	gart_iommu_init();
1751
1752#endif
1753}
1754
1755/* SB IOAPIC is always on this device in AMD systems */
1756#define IOAPIC_SB_DEVID		((0x00 << 8) | PCI_DEVFN(0x14, 0))
1757
1758static bool __init check_ioapic_information(void)
1759{
1760	const char *fw_bug = FW_BUG;
1761	bool ret, has_sb_ioapic;
1762	int idx;
1763
1764	has_sb_ioapic = false;
1765	ret           = false;
1766
1767	/*
1768	 * If we have map overrides on the kernel command line the
1769	 * messages in this function might not describe firmware bugs
1770	 * anymore - so be careful
1771	 */
1772	if (cmdline_maps)
1773		fw_bug = "";
1774
1775	for (idx = 0; idx < nr_ioapics; idx++) {
1776		int devid, id = mpc_ioapic_id(idx);
1777
1778		devid = get_ioapic_devid(id);
1779		if (devid < 0) {
1780			pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
1781				fw_bug, id);
1782			ret = false;
1783		} else if (devid == IOAPIC_SB_DEVID) {
1784			has_sb_ioapic = true;
1785			ret           = true;
1786		}
1787	}
1788
1789	if (!has_sb_ioapic) {
1790		/*
1791		 * We expect the SB IOAPIC to be listed in the IVRS
1792		 * table. The system timer is connected to the SB IOAPIC
1793		 * and if we don't have it in the list the system will
1794		 * panic at boot time.  This situation usually happens
1795		 * when the BIOS is buggy and provides us the wrong
1796		 * device id for the IOAPIC in the system.
1797		 */
1798		pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
1799	}
1800
1801	if (!ret)
1802		pr_err("AMD-Vi: Disabling interrupt remapping\n");
1803
1804	return ret;
1805}
1806
1807static void __init free_dma_resources(void)
1808{
1809	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1810		   get_order(MAX_DOMAIN_ID/8));
 
1811
1812	free_unity_maps();
1813}
1814
1815/*
1816 * This is the hardware init function for AMD IOMMU in the system.
1817 * This function is called either from amd_iommu_init or from the interrupt
1818 * remapping setup code.
1819 *
1820 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1821 * three times:
 
 
1822 *
1823 *	1 pass) Find the highest PCI device id the driver has to handle.
1824 *		Upon this information the size of the data structures is
1825 *		determined that needs to be allocated.
1826 *
1827 *	2 pass) Initialize the data structures just allocated with the
1828 *		information in the ACPI table about available AMD IOMMUs
1829 *		in the system. It also maps the PCI devices in the
1830 *		system to specific IOMMUs
1831 *
1832 *	3 pass) After the basic data structures are allocated and
1833 *		initialized we update them with information about memory
1834 *		remapping requirements parsed out of the ACPI table in
1835 *		this last pass.
1836 *
1837 * After everything is set up the IOMMUs are enabled and the necessary
1838 * hotplug and suspend notifiers are registered.
1839 */
1840static int __init early_amd_iommu_init(void)
1841{
1842	struct acpi_table_header *ivrs_base;
1843	acpi_size ivrs_size;
1844	acpi_status status;
1845	int i, ret = 0;
1846
1847	if (!amd_iommu_detected)
1848		return -ENODEV;
1849
1850	status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1851	if (status == AE_NOT_FOUND)
1852		return -ENODEV;
1853	else if (ACPI_FAILURE(status)) {
1854		const char *err = acpi_format_exception(status);
1855		pr_err("AMD-Vi: IVRS table error: %s\n", err);
1856		return -EINVAL;
1857	}
1858
1859	/*
 
 
 
 
 
 
 
 
 
 
 
1860	 * First parse ACPI tables to find the largest Bus/Dev/Func
1861	 * we need to handle. Upon this information the shared data
1862	 * structures for the IOMMUs in the system will be allocated
1863	 */
1864	ret = find_last_devid_acpi(ivrs_base);
1865	if (ret)
1866		goto out;
1867
1868	dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
1869	alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1870	rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1871
1872	/* Device table - directly used by all IOMMUs */
1873	ret = -ENOMEM;
1874	amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 
1875				      get_order(dev_table_size));
1876	if (amd_iommu_dev_table == NULL)
1877		goto out;
1878
1879	/*
1880	 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1881	 * IOMMU see for that device
1882	 */
1883	amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1884			get_order(alias_table_size));
1885	if (amd_iommu_alias_table == NULL)
1886		goto out;
1887
1888	/* IOMMU rlookup table - find the IOMMU for a specific device */
1889	amd_iommu_rlookup_table = (void *)__get_free_pages(
1890			GFP_KERNEL | __GFP_ZERO,
1891			get_order(rlookup_table_size));
1892	if (amd_iommu_rlookup_table == NULL)
1893		goto out;
1894
1895	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1896					    GFP_KERNEL | __GFP_ZERO,
1897					    get_order(MAX_DOMAIN_ID/8));
1898	if (amd_iommu_pd_alloc_bitmap == NULL)
1899		goto out;
1900
1901	/*
1902	 * let all alias entries point to itself
1903	 */
1904	for (i = 0; i <= amd_iommu_last_bdf; ++i)
1905		amd_iommu_alias_table[i] = i;
1906
1907	/*
1908	 * never allocate domain 0 because its used as the non-allocated and
1909	 * error value placeholder
1910	 */
1911	amd_iommu_pd_alloc_bitmap[0] = 1;
1912
1913	spin_lock_init(&amd_iommu_pd_lock);
1914
1915	/*
1916	 * now the data structures are allocated and basically initialized
1917	 * start the real acpi table scan
1918	 */
1919	ret = init_iommu_all(ivrs_base);
1920	if (ret)
1921		goto out;
1922
 
 
 
 
1923	if (amd_iommu_irq_remap)
1924		amd_iommu_irq_remap = check_ioapic_information();
1925
1926	if (amd_iommu_irq_remap) {
1927		/*
1928		 * Interrupt remapping enabled, create kmem_cache for the
1929		 * remapping tables.
1930		 */
1931		ret = -ENOMEM;
 
 
 
 
1932		amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
1933				MAX_IRQS_PER_TABLE * sizeof(u32),
1934				IRQ_TABLE_ALIGNMENT,
1935				0, NULL);
1936		if (!amd_iommu_irq_cache)
1937			goto out;
1938
1939		irq_lookup_table = (void *)__get_free_pages(
1940				GFP_KERNEL | __GFP_ZERO,
1941				get_order(rlookup_table_size));
 
 
1942		if (!irq_lookup_table)
1943			goto out;
1944	}
1945
1946	ret = init_memory_definitions(ivrs_base);
1947	if (ret)
1948		goto out;
1949
1950	/* init the device table */
1951	init_device_table();
1952
1953out:
1954	/* Don't leak any ACPI memory */
1955	early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1956	ivrs_base = NULL;
1957
1958	return ret;
1959}
1960
1961static int amd_iommu_enable_interrupts(void)
1962{
1963	struct amd_iommu *iommu;
1964	int ret = 0;
1965
1966	for_each_iommu(iommu) {
1967		ret = iommu_init_msi(iommu);
1968		if (ret)
1969			goto out;
1970	}
1971
1972out:
1973	return ret;
1974}
1975
1976static bool detect_ivrs(void)
1977{
1978	struct acpi_table_header *ivrs_base;
1979	acpi_size ivrs_size;
1980	acpi_status status;
1981
1982	status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1983	if (status == AE_NOT_FOUND)
1984		return false;
1985	else if (ACPI_FAILURE(status)) {
1986		const char *err = acpi_format_exception(status);
1987		pr_err("AMD-Vi: IVRS table error: %s\n", err);
1988		return false;
1989	}
1990
1991	early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1992
1993	/* Make sure ACS will be enabled during PCI probe */
1994	pci_request_acs();
1995
1996	return true;
1997}
1998
1999/****************************************************************************
2000 *
2001 * AMD IOMMU Initialization State Machine
2002 *
2003 ****************************************************************************/
2004
2005static int __init state_next(void)
2006{
2007	int ret = 0;
2008
2009	switch (init_state) {
2010	case IOMMU_START_STATE:
2011		if (!detect_ivrs()) {
2012			init_state	= IOMMU_NOT_FOUND;
2013			ret		= -ENODEV;
2014		} else {
2015			init_state	= IOMMU_IVRS_DETECTED;
2016		}
2017		break;
2018	case IOMMU_IVRS_DETECTED:
2019		ret = early_amd_iommu_init();
2020		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
 
 
 
 
 
 
 
2021		break;
2022	case IOMMU_ACPI_FINISHED:
2023		early_enable_iommus();
2024		register_syscore_ops(&amd_iommu_syscore_ops);
2025		x86_platform.iommu_shutdown = disable_iommus;
2026		init_state = IOMMU_ENABLED;
2027		break;
2028	case IOMMU_ENABLED:
 
2029		ret = amd_iommu_init_pci();
2030		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2031		enable_iommus_v2();
2032		break;
2033	case IOMMU_PCI_INIT:
2034		ret = amd_iommu_enable_interrupts();
2035		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2036		break;
2037	case IOMMU_INTERRUPTS_EN:
2038		ret = amd_iommu_init_dma_ops();
2039		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2040		break;
2041	case IOMMU_DMA_OPS:
2042		init_state = IOMMU_INITIALIZED;
2043		break;
2044	case IOMMU_INITIALIZED:
2045		/* Nothing to do */
2046		break;
2047	case IOMMU_NOT_FOUND:
2048	case IOMMU_INIT_ERROR:
 
2049		/* Error states => do nothing */
2050		ret = -EINVAL;
2051		break;
2052	default:
2053		/* Unknown state */
2054		BUG();
2055	}
2056
2057	return ret;
2058}
2059
2060static int __init iommu_go_to_state(enum iommu_init_state state)
2061{
2062	int ret = 0;
2063
2064	while (init_state != state) {
2065		ret = state_next();
2066		if (init_state == IOMMU_NOT_FOUND ||
2067		    init_state == IOMMU_INIT_ERROR)
2068			break;
 
2069	}
2070
2071	return ret;
2072}
2073
2074#ifdef CONFIG_IRQ_REMAP
2075int __init amd_iommu_prepare(void)
2076{
2077	int ret;
2078
2079	amd_iommu_irq_remap = true;
2080
2081	ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2082	if (ret)
2083		return ret;
2084	return amd_iommu_irq_remap ? 0 : -ENODEV;
2085}
2086
2087int __init amd_iommu_enable(void)
2088{
2089	int ret;
2090
2091	ret = iommu_go_to_state(IOMMU_ENABLED);
2092	if (ret)
2093		return ret;
2094
2095	irq_remapping_enabled = 1;
2096
2097	return 0;
2098}
2099
2100void amd_iommu_disable(void)
2101{
2102	amd_iommu_suspend();
2103}
2104
2105int amd_iommu_reenable(int mode)
2106{
2107	amd_iommu_resume();
2108
2109	return 0;
2110}
2111
2112int __init amd_iommu_enable_faulting(void)
2113{
2114	/* We enable MSI later when PCI is initialized */
2115	return 0;
2116}
2117#endif
2118
2119/*
2120 * This is the core init function for AMD IOMMU hardware in the system.
2121 * This function is called from the generic x86 DMA layer initialization
2122 * code.
2123 */
2124static int __init amd_iommu_init(void)
2125{
2126	int ret;
2127
2128	ret = iommu_go_to_state(IOMMU_INITIALIZED);
2129	if (ret) {
2130		free_dma_resources();
2131		if (!irq_remapping_enabled) {
2132			disable_iommus();
2133			free_on_init_error();
2134		} else {
2135			struct amd_iommu *iommu;
2136
2137			uninit_device_table_dma();
2138			for_each_iommu(iommu)
2139				iommu_flush_all_caches(iommu);
2140		}
2141	}
2142
2143	return ret;
2144}
2145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2146/****************************************************************************
2147 *
2148 * Early detect code. This code runs at IOMMU detection time in the DMA
2149 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2150 * IOMMUs
2151 *
2152 ****************************************************************************/
2153int __init amd_iommu_detect(void)
2154{
2155	int ret;
2156
2157	if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2158		return -ENODEV;
2159
2160	if (amd_iommu_disabled)
2161		return -ENODEV;
2162
2163	ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2164	if (ret)
2165		return ret;
2166
2167	amd_iommu_detected = true;
2168	iommu_detected = 1;
2169	x86_init.iommu.iommu_init = amd_iommu_init;
2170
2171	return 1;
2172}
2173
2174/****************************************************************************
2175 *
2176 * Parsing functions for the AMD IOMMU specific kernel command line
2177 * options.
2178 *
2179 ****************************************************************************/
2180
2181static int __init parse_amd_iommu_dump(char *str)
2182{
2183	amd_iommu_dump = true;
2184
2185	return 1;
2186}
2187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2188static int __init parse_amd_iommu_options(char *str)
2189{
2190	for (; *str; ++str) {
2191		if (strncmp(str, "fullflush", 9) == 0)
2192			amd_iommu_unmap_flush = true;
2193		if (strncmp(str, "off", 3) == 0)
2194			amd_iommu_disabled = true;
2195		if (strncmp(str, "force_isolation", 15) == 0)
2196			amd_iommu_force_isolation = true;
2197	}
2198
2199	return 1;
2200}
2201
2202static int __init parse_ivrs_ioapic(char *str)
2203{
2204	unsigned int bus, dev, fn;
2205	int ret, id, i;
2206	u16 devid;
2207
2208	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2209
2210	if (ret != 4) {
2211		pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2212		return 1;
2213	}
2214
2215	if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2216		pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2217			str);
2218		return 1;
2219	}
2220
2221	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2222
2223	cmdline_maps			= true;
2224	i				= early_ioapic_map_size++;
2225	early_ioapic_map[i].id		= id;
2226	early_ioapic_map[i].devid	= devid;
2227	early_ioapic_map[i].cmd_line	= true;
2228
2229	return 1;
2230}
2231
2232static int __init parse_ivrs_hpet(char *str)
2233{
2234	unsigned int bus, dev, fn;
2235	int ret, id, i;
2236	u16 devid;
2237
2238	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2239
2240	if (ret != 4) {
2241		pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2242		return 1;
2243	}
2244
2245	if (early_hpet_map_size == EARLY_MAP_SIZE) {
2246		pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2247			str);
2248		return 1;
2249	}
2250
2251	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2252
2253	cmdline_maps			= true;
2254	i				= early_hpet_map_size++;
2255	early_hpet_map[i].id		= id;
2256	early_hpet_map[i].devid		= devid;
2257	early_hpet_map[i].cmd_line	= true;
2258
2259	return 1;
2260}
2261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2262__setup("amd_iommu_dump",	parse_amd_iommu_dump);
2263__setup("amd_iommu=",		parse_amd_iommu_options);
 
2264__setup("ivrs_ioapic",		parse_ivrs_ioapic);
2265__setup("ivrs_hpet",		parse_ivrs_hpet);
 
2266
2267IOMMU_INIT_FINISH(amd_iommu_detect,
2268		  gart_iommu_hole_init,
2269		  NULL,
2270		  NULL);
2271
2272bool amd_iommu_v2_supported(void)
2273{
2274	return amd_iommu_v2_present;
2275}
2276EXPORT_SYMBOL(amd_iommu_v2_supported);
2277
 
 
 
 
 
 
 
 
 
 
 
 
2278/****************************************************************************
2279 *
2280 * IOMMU EFR Performance Counter support functionality. This code allows
2281 * access to the IOMMU PC functionality.
2282 *
2283 ****************************************************************************/
2284
2285u8 amd_iommu_pc_get_max_banks(u16 devid)
2286{
2287	struct amd_iommu *iommu;
2288	u8 ret = 0;
2289
2290	/* locate the iommu governing the devid */
2291	iommu = amd_iommu_rlookup_table[devid];
2292	if (iommu)
2293		ret = iommu->max_banks;
2294
2295	return ret;
2296}
2297EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2298
2299bool amd_iommu_pc_supported(void)
2300{
2301	return amd_iommu_pc_present;
2302}
2303EXPORT_SYMBOL(amd_iommu_pc_supported);
2304
2305u8 amd_iommu_pc_get_max_counters(u16 devid)
2306{
2307	struct amd_iommu *iommu;
2308	u8 ret = 0;
2309
2310	/* locate the iommu governing the devid */
2311	iommu = amd_iommu_rlookup_table[devid];
2312	if (iommu)
2313		ret = iommu->max_counters;
2314
2315	return ret;
2316}
2317EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2318
2319static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
2320				    u8 bank, u8 cntr, u8 fxn,
2321				    u64 *value, bool is_write)
2322{
2323	u32 offset;
2324	u32 max_offset_lim;
2325
 
 
 
 
2326	/* Check for valid iommu and pc register indexing */
2327	if (WARN_ON((fxn > 0x28) || (fxn & 7)))
2328		return -ENODEV;
2329
2330	offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
2331
2332	/* Limit the offset to the hw defined mmio region aperture */
2333	max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) |
2334				(iommu->max_counters << 8) | 0x28);
2335	if ((offset < MMIO_CNTR_REG_OFFSET) ||
2336	    (offset > max_offset_lim))
2337		return -EINVAL;
2338
2339	if (is_write) {
2340		writel((u32)*value, iommu->mmio_base + offset);
2341		writel((*value >> 32), iommu->mmio_base + offset + 4);
 
 
2342	} else {
2343		*value = readl(iommu->mmio_base + offset + 4);
2344		*value <<= 32;
2345		*value = readl(iommu->mmio_base + offset);
 
2346	}
2347
2348	return 0;
2349}
2350EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
2351
2352int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2353				    u64 *value, bool is_write)
2354{
2355	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
 
2356
2357	/* Make sure the IOMMU PC resource is available */
2358	if (!amd_iommu_pc_present || iommu == NULL)
2359		return -ENODEV;
 
 
 
 
 
2360
2361	return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
2362					value, is_write);
2363}
v4.17
   1/*
   2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *         Leo Duran <leo.duran@amd.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/pci.h>
  21#include <linux/acpi.h>
  22#include <linux/list.h>
  23#include <linux/bitmap.h>
  24#include <linux/slab.h>
  25#include <linux/syscore_ops.h>
  26#include <linux/interrupt.h>
  27#include <linux/msi.h>
  28#include <linux/amd-iommu.h>
  29#include <linux/export.h>
  30#include <linux/iommu.h>
  31#include <linux/kmemleak.h>
  32#include <linux/mem_encrypt.h>
  33#include <asm/pci-direct.h>
  34#include <asm/iommu.h>
  35#include <asm/gart.h>
  36#include <asm/x86_init.h>
  37#include <asm/iommu_table.h>
  38#include <asm/io_apic.h>
  39#include <asm/irq_remapping.h>
  40
  41#include <linux/crash_dump.h>
  42#include "amd_iommu_proto.h"
  43#include "amd_iommu_types.h"
  44#include "irq_remapping.h"
  45
  46/*
  47 * definitions for the ACPI scanning code
  48 */
  49#define IVRS_HEADER_LENGTH 48
  50
  51#define ACPI_IVHD_TYPE_MAX_SUPPORTED	0x40
  52#define ACPI_IVMD_TYPE_ALL              0x20
  53#define ACPI_IVMD_TYPE                  0x21
  54#define ACPI_IVMD_TYPE_RANGE            0x22
  55
  56#define IVHD_DEV_ALL                    0x01
  57#define IVHD_DEV_SELECT                 0x02
  58#define IVHD_DEV_SELECT_RANGE_START     0x03
  59#define IVHD_DEV_RANGE_END              0x04
  60#define IVHD_DEV_ALIAS                  0x42
  61#define IVHD_DEV_ALIAS_RANGE            0x43
  62#define IVHD_DEV_EXT_SELECT             0x46
  63#define IVHD_DEV_EXT_SELECT_RANGE       0x47
  64#define IVHD_DEV_SPECIAL		0x48
  65#define IVHD_DEV_ACPI_HID		0xf0
  66
  67#define UID_NOT_PRESENT                 0
  68#define UID_IS_INTEGER                  1
  69#define UID_IS_CHARACTER                2
  70
  71#define IVHD_SPECIAL_IOAPIC		1
  72#define IVHD_SPECIAL_HPET		2
  73
  74#define IVHD_FLAG_HT_TUN_EN_MASK        0x01
  75#define IVHD_FLAG_PASSPW_EN_MASK        0x02
  76#define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
  77#define IVHD_FLAG_ISOC_EN_MASK          0x08
  78
  79#define IVMD_FLAG_EXCL_RANGE            0x08
  80#define IVMD_FLAG_UNITY_MAP             0x01
  81
  82#define ACPI_DEVFLAG_INITPASS           0x01
  83#define ACPI_DEVFLAG_EXTINT             0x02
  84#define ACPI_DEVFLAG_NMI                0x04
  85#define ACPI_DEVFLAG_SYSMGT1            0x10
  86#define ACPI_DEVFLAG_SYSMGT2            0x20
  87#define ACPI_DEVFLAG_LINT0              0x40
  88#define ACPI_DEVFLAG_LINT1              0x80
  89#define ACPI_DEVFLAG_ATSDIS             0x10000000
  90
  91#define LOOP_TIMEOUT	100000
  92/*
  93 * ACPI table definitions
  94 *
  95 * These data structures are laid over the table to parse the important values
  96 * out of it.
  97 */
  98
  99extern const struct iommu_ops amd_iommu_ops;
 100
 101/*
 102 * structure describing one IOMMU in the ACPI table. Typically followed by one
 103 * or more ivhd_entrys.
 104 */
 105struct ivhd_header {
 106	u8 type;
 107	u8 flags;
 108	u16 length;
 109	u16 devid;
 110	u16 cap_ptr;
 111	u64 mmio_phys;
 112	u16 pci_seg;
 113	u16 info;
 114	u32 efr_attr;
 115
 116	/* Following only valid on IVHD type 11h and 40h */
 117	u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
 118	u64 res;
 119} __attribute__((packed));
 120
 121/*
 122 * A device entry describing which devices a specific IOMMU translates and
 123 * which requestor ids they use.
 124 */
 125struct ivhd_entry {
 126	u8 type;
 127	u16 devid;
 128	u8 flags;
 129	u32 ext;
 130	u32 hidh;
 131	u64 cid;
 132	u8 uidf;
 133	u8 uidl;
 134	u8 uid;
 135} __attribute__((packed));
 136
 137/*
 138 * An AMD IOMMU memory definition structure. It defines things like exclusion
 139 * ranges for devices and regions that should be unity mapped.
 140 */
 141struct ivmd_header {
 142	u8 type;
 143	u8 flags;
 144	u16 length;
 145	u16 devid;
 146	u16 aux;
 147	u64 resv;
 148	u64 range_start;
 149	u64 range_length;
 150} __attribute__((packed));
 151
 152bool amd_iommu_dump;
 153bool amd_iommu_irq_remap __read_mostly;
 154
 155int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
 156
 157static bool amd_iommu_detected;
 158static bool __initdata amd_iommu_disabled;
 159static int amd_iommu_target_ivhd_type;
 160
 161u16 amd_iommu_last_bdf;			/* largest PCI device id we have
 162					   to handle */
 163LIST_HEAD(amd_iommu_unity_map);		/* a list of required unity mappings
 164					   we find in ACPI */
 165bool amd_iommu_unmap_flush;		/* if true, flush on every unmap */
 166
 167LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
 168					   system */
 169
 170/* Array to assign indices to IOMMUs*/
 171struct amd_iommu *amd_iommus[MAX_IOMMUS];
 172
 173/* Number of IOMMUs present in the system */
 174static int amd_iommus_present;
 175
 176/* IOMMUs have a non-present cache? */
 177bool amd_iommu_np_cache __read_mostly;
 178bool amd_iommu_iotlb_sup __read_mostly = true;
 179
 180u32 amd_iommu_max_pasid __read_mostly = ~0;
 181
 182bool amd_iommu_v2_present __read_mostly;
 183static bool amd_iommu_pc_present __read_mostly;
 184
 185bool amd_iommu_force_isolation __read_mostly;
 186
 187/*
 188 * List of protection domains - used during resume
 189 */
 190LIST_HEAD(amd_iommu_pd_list);
 191spinlock_t amd_iommu_pd_lock;
 192
 193/*
 194 * Pointer to the device table which is shared by all AMD IOMMUs
 195 * it is indexed by the PCI device id or the HT unit id and contains
 196 * information about the domain the device belongs to as well as the
 197 * page table root pointer.
 198 */
 199struct dev_table_entry *amd_iommu_dev_table;
 200/*
 201 * Pointer to a device table which the content of old device table
 202 * will be copied to. It's only be used in kdump kernel.
 203 */
 204static struct dev_table_entry *old_dev_tbl_cpy;
 205
 206/*
 207 * The alias table is a driver specific data structure which contains the
 208 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
 209 * More than one device can share the same requestor id.
 210 */
 211u16 *amd_iommu_alias_table;
 212
 213/*
 214 * The rlookup table is used to find the IOMMU which is responsible
 215 * for a specific device. It is also indexed by the PCI device id.
 216 */
 217struct amd_iommu **amd_iommu_rlookup_table;
 218EXPORT_SYMBOL(amd_iommu_rlookup_table);
 219
 220/*
 221 * This table is used to find the irq remapping table for a given device id
 222 * quickly.
 223 */
 224struct irq_remap_table **irq_lookup_table;
 225
 226/*
 227 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
 228 * to know which ones are already in use.
 229 */
 230unsigned long *amd_iommu_pd_alloc_bitmap;
 231
 232static u32 dev_table_size;	/* size of the device table */
 233static u32 alias_table_size;	/* size of the alias table */
 234static u32 rlookup_table_size;	/* size if the rlookup table */
 235
 236enum iommu_init_state {
 237	IOMMU_START_STATE,
 238	IOMMU_IVRS_DETECTED,
 239	IOMMU_ACPI_FINISHED,
 240	IOMMU_ENABLED,
 241	IOMMU_PCI_INIT,
 242	IOMMU_INTERRUPTS_EN,
 243	IOMMU_DMA_OPS,
 244	IOMMU_INITIALIZED,
 245	IOMMU_NOT_FOUND,
 246	IOMMU_INIT_ERROR,
 247	IOMMU_CMDLINE_DISABLED,
 248};
 249
 250/* Early ioapic and hpet maps from kernel command line */
 251#define EARLY_MAP_SIZE		4
 252static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
 253static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
 254static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
 255
 256static int __initdata early_ioapic_map_size;
 257static int __initdata early_hpet_map_size;
 258static int __initdata early_acpihid_map_size;
 259
 260static bool __initdata cmdline_maps;
 261
 262static enum iommu_init_state init_state = IOMMU_START_STATE;
 263
 264static int amd_iommu_enable_interrupts(void);
 265static int __init iommu_go_to_state(enum iommu_init_state state);
 266static void init_device_table_dma(void);
 267
 268static bool amd_iommu_pre_enabled = true;
 269
 270bool translation_pre_enabled(struct amd_iommu *iommu)
 271{
 272	return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
 273}
 274EXPORT_SYMBOL(translation_pre_enabled);
 275
 276static void clear_translation_pre_enabled(struct amd_iommu *iommu)
 277{
 278	iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
 279}
 280
 281static void init_translation_status(struct amd_iommu *iommu)
 282{
 283	u32 ctrl;
 284
 285	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 286	if (ctrl & (1<<CONTROL_IOMMU_EN))
 287		iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
 288}
 289
 290static inline void update_last_devid(u16 devid)
 291{
 292	if (devid > amd_iommu_last_bdf)
 293		amd_iommu_last_bdf = devid;
 294}
 295
 296static inline unsigned long tbl_size(int entry_size)
 297{
 298	unsigned shift = PAGE_SHIFT +
 299			 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
 300
 301	return 1UL << shift;
 302}
 303
 304int amd_iommu_get_num_iommus(void)
 305{
 306	return amd_iommus_present;
 307}
 308
 309/* Access to l1 and l2 indexed register spaces */
 310
 311static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
 312{
 313	u32 val;
 314
 315	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
 316	pci_read_config_dword(iommu->dev, 0xfc, &val);
 317	return val;
 318}
 319
 320static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
 321{
 322	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
 323	pci_write_config_dword(iommu->dev, 0xfc, val);
 324	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
 325}
 326
 327static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
 328{
 329	u32 val;
 330
 331	pci_write_config_dword(iommu->dev, 0xf0, address);
 332	pci_read_config_dword(iommu->dev, 0xf4, &val);
 333	return val;
 334}
 335
 336static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
 337{
 338	pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
 339	pci_write_config_dword(iommu->dev, 0xf4, val);
 340}
 341
 342/****************************************************************************
 343 *
 344 * AMD IOMMU MMIO register space handling functions
 345 *
 346 * These functions are used to program the IOMMU device registers in
 347 * MMIO space required for that driver.
 348 *
 349 ****************************************************************************/
 350
 351/*
 352 * This function set the exclusion range in the IOMMU. DMA accesses to the
 353 * exclusion range are passed through untranslated
 354 */
 355static void iommu_set_exclusion_range(struct amd_iommu *iommu)
 356{
 357	u64 start = iommu->exclusion_start & PAGE_MASK;
 358	u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
 359	u64 entry;
 360
 361	if (!iommu->exclusion_start)
 362		return;
 363
 364	entry = start | MMIO_EXCL_ENABLE_MASK;
 365	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
 366			&entry, sizeof(entry));
 367
 368	entry = limit;
 369	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
 370			&entry, sizeof(entry));
 371}
 372
 373/* Programs the physical address of the device table into the IOMMU hardware */
 374static void iommu_set_device_table(struct amd_iommu *iommu)
 375{
 376	u64 entry;
 377
 378	BUG_ON(iommu->mmio_base == NULL);
 379
 380	entry = iommu_virt_to_phys(amd_iommu_dev_table);
 381	entry |= (dev_table_size >> 12) - 1;
 382	memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
 383			&entry, sizeof(entry));
 384}
 385
 386/* Generic functions to enable/disable certain features of the IOMMU. */
 387static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
 388{
 389	u32 ctrl;
 390
 391	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 392	ctrl |= (1 << bit);
 393	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 394}
 395
 396static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
 397{
 398	u32 ctrl;
 399
 400	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 401	ctrl &= ~(1 << bit);
 402	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 403}
 404
 405static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
 406{
 407	u32 ctrl;
 408
 409	ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
 410	ctrl &= ~CTRL_INV_TO_MASK;
 411	ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
 412	writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
 413}
 414
 415/* Function to enable the hardware */
 416static void iommu_enable(struct amd_iommu *iommu)
 417{
 418	iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
 419}
 420
 421static void iommu_disable(struct amd_iommu *iommu)
 422{
 423	/* Disable command buffer */
 424	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 425
 426	/* Disable event logging and event interrupts */
 427	iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
 428	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
 429
 430	/* Disable IOMMU GA_LOG */
 431	iommu_feature_disable(iommu, CONTROL_GALOG_EN);
 432	iommu_feature_disable(iommu, CONTROL_GAINT_EN);
 433
 434	/* Disable IOMMU hardware itself */
 435	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
 436}
 437
 438/*
 439 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
 440 * the system has one.
 441 */
 442static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
 443{
 444	if (!request_mem_region(address, end, "amd_iommu")) {
 445		pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
 446			address, end);
 447		pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
 448		return NULL;
 449	}
 450
 451	return (u8 __iomem *)ioremap_nocache(address, end);
 452}
 453
 454static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
 455{
 456	if (iommu->mmio_base)
 457		iounmap(iommu->mmio_base);
 458	release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
 459}
 460
 461static inline u32 get_ivhd_header_size(struct ivhd_header *h)
 462{
 463	u32 size = 0;
 464
 465	switch (h->type) {
 466	case 0x10:
 467		size = 24;
 468		break;
 469	case 0x11:
 470	case 0x40:
 471		size = 40;
 472		break;
 473	}
 474	return size;
 475}
 476
 477/****************************************************************************
 478 *
 479 * The functions below belong to the first pass of AMD IOMMU ACPI table
 480 * parsing. In this pass we try to find out the highest device id this
 481 * code has to handle. Upon this information the size of the shared data
 482 * structures is determined later.
 483 *
 484 ****************************************************************************/
 485
 486/*
 487 * This function calculates the length of a given IVHD entry
 488 */
 489static inline int ivhd_entry_length(u8 *ivhd)
 490{
 491	u32 type = ((struct ivhd_entry *)ivhd)->type;
 492
 493	if (type < 0x80) {
 494		return 0x04 << (*ivhd >> 6);
 495	} else if (type == IVHD_DEV_ACPI_HID) {
 496		/* For ACPI_HID, offset 21 is uid len */
 497		return *((u8 *)ivhd + 21) + 22;
 498	}
 499	return 0;
 500}
 501
 502/*
 503 * After reading the highest device id from the IOMMU PCI capability header
 504 * this function looks if there is a higher device id defined in the ACPI table
 505 */
 506static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
 507{
 508	u8 *p = (void *)h, *end = (void *)h;
 509	struct ivhd_entry *dev;
 510
 511	u32 ivhd_size = get_ivhd_header_size(h);
 512
 513	if (!ivhd_size) {
 514		pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
 515		return -EINVAL;
 516	}
 517
 518	p += ivhd_size;
 519	end += h->length;
 520
 521	while (p < end) {
 522		dev = (struct ivhd_entry *)p;
 523		switch (dev->type) {
 524		case IVHD_DEV_ALL:
 525			/* Use maximum BDF value for DEV_ALL */
 526			update_last_devid(0xffff);
 527			break;
 528		case IVHD_DEV_SELECT:
 529		case IVHD_DEV_RANGE_END:
 530		case IVHD_DEV_ALIAS:
 531		case IVHD_DEV_EXT_SELECT:
 532			/* all the above subfield types refer to device ids */
 533			update_last_devid(dev->devid);
 534			break;
 535		default:
 536			break;
 537		}
 538		p += ivhd_entry_length(p);
 539	}
 540
 541	WARN_ON(p != end);
 542
 543	return 0;
 544}
 545
 546static int __init check_ivrs_checksum(struct acpi_table_header *table)
 547{
 548	int i;
 549	u8 checksum = 0, *p = (u8 *)table;
 550
 551	for (i = 0; i < table->length; ++i)
 552		checksum += p[i];
 553	if (checksum != 0) {
 554		/* ACPI table corrupt */
 555		pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
 556		return -ENODEV;
 557	}
 558
 559	return 0;
 560}
 561
 562/*
 563 * Iterate over all IVHD entries in the ACPI table and find the highest device
 564 * id which we need to handle. This is the first of three functions which parse
 565 * the ACPI table. So we check the checksum here.
 566 */
 567static int __init find_last_devid_acpi(struct acpi_table_header *table)
 568{
 569	u8 *p = (u8 *)table, *end = (u8 *)table;
 
 570	struct ivhd_header *h;
 571
 
 
 
 
 
 
 
 
 
 
 572	p += IVRS_HEADER_LENGTH;
 573
 574	end += table->length;
 575	while (p < end) {
 576		h = (struct ivhd_header *)p;
 577		if (h->type == amd_iommu_target_ivhd_type) {
 578			int ret = find_last_devid_from_ivhd(h);
 579
 580			if (ret)
 581				return ret;
 
 582		}
 583		p += h->length;
 584	}
 585	WARN_ON(p != end);
 586
 587	return 0;
 588}
 589
 590/****************************************************************************
 591 *
 592 * The following functions belong to the code path which parses the ACPI table
 593 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
 594 * data structures, initialize the device/alias/rlookup table and also
 595 * basically initialize the hardware.
 596 *
 597 ****************************************************************************/
 598
 599/*
 600 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
 601 * write commands to that buffer later and the IOMMU will execute them
 602 * asynchronously
 603 */
 604static int __init alloc_command_buffer(struct amd_iommu *iommu)
 605{
 606	iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 607						  get_order(CMD_BUFFER_SIZE));
 608
 609	return iommu->cmd_buf ? 0 : -ENOMEM;
 610}
 611
 612/*
 613 * This function resets the command buffer if the IOMMU stopped fetching
 614 * commands from it.
 615 */
 616void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
 617{
 618	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 619
 620	writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
 621	writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
 622	iommu->cmd_buf_head = 0;
 623	iommu->cmd_buf_tail = 0;
 624
 625	iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
 626}
 627
 628/*
 629 * This function writes the command buffer address to the hardware and
 630 * enables it.
 631 */
 632static void iommu_enable_command_buffer(struct amd_iommu *iommu)
 633{
 634	u64 entry;
 635
 636	BUG_ON(iommu->cmd_buf == NULL);
 637
 638	entry = iommu_virt_to_phys(iommu->cmd_buf);
 639	entry |= MMIO_CMD_SIZE_512;
 640
 641	memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
 642		    &entry, sizeof(entry));
 643
 644	amd_iommu_reset_cmd_buffer(iommu);
 645}
 646
 647/*
 648 * This function disables the command buffer
 649 */
 650static void iommu_disable_command_buffer(struct amd_iommu *iommu)
 651{
 652	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
 653}
 654
 655static void __init free_command_buffer(struct amd_iommu *iommu)
 656{
 657	free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
 658}
 659
 660/* allocates the memory where the IOMMU will log its events to */
 661static int __init alloc_event_buffer(struct amd_iommu *iommu)
 662{
 663	iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 664						  get_order(EVT_BUFFER_SIZE));
 665
 666	return iommu->evt_buf ? 0 : -ENOMEM;
 667}
 668
 669static void iommu_enable_event_buffer(struct amd_iommu *iommu)
 670{
 671	u64 entry;
 672
 673	BUG_ON(iommu->evt_buf == NULL);
 674
 675	entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
 676
 677	memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
 678		    &entry, sizeof(entry));
 679
 680	/* set head and tail to zero manually */
 681	writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 682	writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
 683
 684	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
 685}
 686
 687/*
 688 * This function disables the event log buffer
 689 */
 690static void iommu_disable_event_buffer(struct amd_iommu *iommu)
 691{
 692	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
 693}
 694
 695static void __init free_event_buffer(struct amd_iommu *iommu)
 696{
 697	free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
 698}
 699
 700/* allocates the memory where the IOMMU will log its events to */
 701static int __init alloc_ppr_log(struct amd_iommu *iommu)
 702{
 703	iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 704						  get_order(PPR_LOG_SIZE));
 705
 706	return iommu->ppr_log ? 0 : -ENOMEM;
 707}
 708
 709static void iommu_enable_ppr_log(struct amd_iommu *iommu)
 710{
 711	u64 entry;
 712
 713	if (iommu->ppr_log == NULL)
 714		return;
 715
 716	entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
 717
 718	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
 719		    &entry, sizeof(entry));
 720
 721	/* set head and tail to zero manually */
 722	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 723	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
 724
 725	iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
 726	iommu_feature_enable(iommu, CONTROL_PPR_EN);
 727}
 728
 729static void __init free_ppr_log(struct amd_iommu *iommu)
 730{
 731	if (iommu->ppr_log == NULL)
 732		return;
 733
 734	free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
 735}
 736
 737static void free_ga_log(struct amd_iommu *iommu)
 738{
 739#ifdef CONFIG_IRQ_REMAP
 740	if (iommu->ga_log)
 741		free_pages((unsigned long)iommu->ga_log,
 742			    get_order(GA_LOG_SIZE));
 743	if (iommu->ga_log_tail)
 744		free_pages((unsigned long)iommu->ga_log_tail,
 745			    get_order(8));
 746#endif
 747}
 748
 749static int iommu_ga_log_enable(struct amd_iommu *iommu)
 750{
 751#ifdef CONFIG_IRQ_REMAP
 752	u32 status, i;
 753
 754	if (!iommu->ga_log)
 755		return -EINVAL;
 756
 757	status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 758
 759	/* Check if already running */
 760	if (status & (MMIO_STATUS_GALOG_RUN_MASK))
 761		return 0;
 762
 763	iommu_feature_enable(iommu, CONTROL_GAINT_EN);
 764	iommu_feature_enable(iommu, CONTROL_GALOG_EN);
 765
 766	for (i = 0; i < LOOP_TIMEOUT; ++i) {
 767		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
 768		if (status & (MMIO_STATUS_GALOG_RUN_MASK))
 769			break;
 770	}
 771
 772	if (i >= LOOP_TIMEOUT)
 773		return -EINVAL;
 774#endif /* CONFIG_IRQ_REMAP */
 775	return 0;
 776}
 777
 778#ifdef CONFIG_IRQ_REMAP
 779static int iommu_init_ga_log(struct amd_iommu *iommu)
 780{
 781	u64 entry;
 782
 783	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
 784		return 0;
 785
 786	iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 787					get_order(GA_LOG_SIZE));
 788	if (!iommu->ga_log)
 789		goto err_out;
 790
 791	iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 792					get_order(8));
 793	if (!iommu->ga_log_tail)
 794		goto err_out;
 795
 796	entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
 797	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
 798		    &entry, sizeof(entry));
 799	entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
 800	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
 801		    &entry, sizeof(entry));
 802	writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
 803	writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
 804
 805	return 0;
 806err_out:
 807	free_ga_log(iommu);
 808	return -EINVAL;
 809}
 810#endif /* CONFIG_IRQ_REMAP */
 811
 812static int iommu_init_ga(struct amd_iommu *iommu)
 813{
 814	int ret = 0;
 815
 816#ifdef CONFIG_IRQ_REMAP
 817	/* Note: We have already checked GASup from IVRS table.
 818	 *       Now, we need to make sure that GAMSup is set.
 819	 */
 820	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
 821	    !iommu_feature(iommu, FEATURE_GAM_VAPIC))
 822		amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
 823
 824	ret = iommu_init_ga_log(iommu);
 825#endif /* CONFIG_IRQ_REMAP */
 826
 827	return ret;
 828}
 829
 830static void iommu_enable_gt(struct amd_iommu *iommu)
 831{
 832	if (!iommu_feature(iommu, FEATURE_GT))
 833		return;
 834
 835	iommu_feature_enable(iommu, CONTROL_GT_EN);
 836}
 837
 838/* sets a specific bit in the device table entry. */
 839static void set_dev_entry_bit(u16 devid, u8 bit)
 840{
 841	int i = (bit >> 6) & 0x03;
 842	int _bit = bit & 0x3f;
 843
 844	amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
 845}
 846
 847static int get_dev_entry_bit(u16 devid, u8 bit)
 848{
 849	int i = (bit >> 6) & 0x03;
 850	int _bit = bit & 0x3f;
 851
 852	return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
 853}
 854
 855
 856static bool copy_device_table(void)
 857{
 858	u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
 859	struct dev_table_entry *old_devtb = NULL;
 860	u32 lo, hi, devid, old_devtb_size;
 861	phys_addr_t old_devtb_phys;
 862	struct amd_iommu *iommu;
 863	u16 dom_id, dte_v, irq_v;
 864	gfp_t gfp_flag;
 865	u64 tmp;
 866
 867	if (!amd_iommu_pre_enabled)
 868		return false;
 869
 870	pr_warn("Translation is already enabled - trying to copy translation structures\n");
 871	for_each_iommu(iommu) {
 872		/* All IOMMUs should use the same device table with the same size */
 873		lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
 874		hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
 875		entry = (((u64) hi) << 32) + lo;
 876		if (last_entry && last_entry != entry) {
 877			pr_err("IOMMU:%d should use the same dev table as others!\n",
 878				iommu->index);
 879			return false;
 880		}
 881		last_entry = entry;
 882
 883		old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
 884		if (old_devtb_size != dev_table_size) {
 885			pr_err("The device table size of IOMMU:%d is not expected!\n",
 886				iommu->index);
 887			return false;
 888		}
 889	}
 890
 891	old_devtb_phys = entry & PAGE_MASK;
 892	if (old_devtb_phys >= 0x100000000ULL) {
 893		pr_err("The address of old device table is above 4G, not trustworthy!\n");
 894		return false;
 895	}
 896	old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
 897	if (!old_devtb)
 898		return false;
 899
 900	gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
 901	old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
 902				get_order(dev_table_size));
 903	if (old_dev_tbl_cpy == NULL) {
 904		pr_err("Failed to allocate memory for copying old device table!\n");
 905		return false;
 906	}
 907
 908	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
 909		old_dev_tbl_cpy[devid] = old_devtb[devid];
 910		dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
 911		dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
 912
 913		if (dte_v && dom_id) {
 914			old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
 915			old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
 916			__set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
 917			/* If gcr3 table existed, mask it out */
 918			if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
 919				tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
 920				tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
 921				old_dev_tbl_cpy[devid].data[1] &= ~tmp;
 922				tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
 923				tmp |= DTE_FLAG_GV;
 924				old_dev_tbl_cpy[devid].data[0] &= ~tmp;
 925			}
 926		}
 927
 928		irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
 929		int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
 930		int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
 931		if (irq_v && (int_ctl || int_tab_len)) {
 932			if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
 933			    (int_tab_len != DTE_IRQ_TABLE_LEN)) {
 934				pr_err("Wrong old irq remapping flag: %#x\n", devid);
 935				return false;
 936			}
 937
 938		        old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
 939		}
 940	}
 941	memunmap(old_devtb);
 942
 943	return true;
 944}
 945
 946void amd_iommu_apply_erratum_63(u16 devid)
 947{
 948	int sysmgt;
 949
 950	sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
 951		 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
 952
 953	if (sysmgt == 0x01)
 954		set_dev_entry_bit(devid, DEV_ENTRY_IW);
 955}
 956
 957/* Writes the specific IOMMU for a device into the rlookup table */
 958static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
 959{
 960	amd_iommu_rlookup_table[devid] = iommu;
 961}
 962
 963/*
 964 * This function takes the device specific flags read from the ACPI
 965 * table and sets up the device table entry with that information
 966 */
 967static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
 968					   u16 devid, u32 flags, u32 ext_flags)
 969{
 970	if (flags & ACPI_DEVFLAG_INITPASS)
 971		set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
 972	if (flags & ACPI_DEVFLAG_EXTINT)
 973		set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
 974	if (flags & ACPI_DEVFLAG_NMI)
 975		set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
 976	if (flags & ACPI_DEVFLAG_SYSMGT1)
 977		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
 978	if (flags & ACPI_DEVFLAG_SYSMGT2)
 979		set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
 980	if (flags & ACPI_DEVFLAG_LINT0)
 981		set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
 982	if (flags & ACPI_DEVFLAG_LINT1)
 983		set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
 984
 985	amd_iommu_apply_erratum_63(devid);
 986
 987	set_iommu_for_device(iommu, devid);
 988}
 989
 990static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
 991{
 992	struct devid_map *entry;
 993	struct list_head *list;
 994
 995	if (type == IVHD_SPECIAL_IOAPIC)
 996		list = &ioapic_map;
 997	else if (type == IVHD_SPECIAL_HPET)
 998		list = &hpet_map;
 999	else
1000		return -EINVAL;
1001
1002	list_for_each_entry(entry, list, list) {
1003		if (!(entry->id == id && entry->cmd_line))
1004			continue;
1005
1006		pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
1007			type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1008
1009		*devid = entry->devid;
1010
1011		return 0;
1012	}
1013
1014	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1015	if (!entry)
1016		return -ENOMEM;
1017
1018	entry->id	= id;
1019	entry->devid	= *devid;
1020	entry->cmd_line	= cmd_line;
1021
1022	list_add_tail(&entry->list, list);
1023
1024	return 0;
1025}
1026
1027static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1028				      bool cmd_line)
1029{
1030	struct acpihid_map_entry *entry;
1031	struct list_head *list = &acpihid_map;
1032
1033	list_for_each_entry(entry, list, list) {
1034		if (strcmp(entry->hid, hid) ||
1035		    (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1036		    !entry->cmd_line)
1037			continue;
1038
1039		pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
1040			hid, uid);
1041		*devid = entry->devid;
1042		return 0;
1043	}
1044
1045	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1046	if (!entry)
1047		return -ENOMEM;
1048
1049	memcpy(entry->uid, uid, strlen(uid));
1050	memcpy(entry->hid, hid, strlen(hid));
1051	entry->devid = *devid;
1052	entry->cmd_line	= cmd_line;
1053	entry->root_devid = (entry->devid & (~0x7));
1054
1055	pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
1056		entry->cmd_line ? "cmd" : "ivrs",
1057		entry->hid, entry->uid, entry->root_devid);
1058
1059	list_add_tail(&entry->list, list);
1060	return 0;
1061}
1062
1063static int __init add_early_maps(void)
1064{
1065	int i, ret;
1066
1067	for (i = 0; i < early_ioapic_map_size; ++i) {
1068		ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1069					 early_ioapic_map[i].id,
1070					 &early_ioapic_map[i].devid,
1071					 early_ioapic_map[i].cmd_line);
1072		if (ret)
1073			return ret;
1074	}
1075
1076	for (i = 0; i < early_hpet_map_size; ++i) {
1077		ret = add_special_device(IVHD_SPECIAL_HPET,
1078					 early_hpet_map[i].id,
1079					 &early_hpet_map[i].devid,
1080					 early_hpet_map[i].cmd_line);
1081		if (ret)
1082			return ret;
1083	}
1084
1085	for (i = 0; i < early_acpihid_map_size; ++i) {
1086		ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1087					  early_acpihid_map[i].uid,
1088					  &early_acpihid_map[i].devid,
1089					  early_acpihid_map[i].cmd_line);
1090		if (ret)
1091			return ret;
1092	}
1093
1094	return 0;
1095}
1096
1097/*
1098 * Reads the device exclusion range from ACPI and initializes the IOMMU with
1099 * it
1100 */
1101static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1102{
1103	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1104
1105	if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1106		return;
1107
1108	if (iommu) {
1109		/*
1110		 * We only can configure exclusion ranges per IOMMU, not
1111		 * per device. But we can enable the exclusion range per
1112		 * device. This is done here
1113		 */
1114		set_dev_entry_bit(devid, DEV_ENTRY_EX);
1115		iommu->exclusion_start = m->range_start;
1116		iommu->exclusion_length = m->range_length;
1117	}
1118}
1119
1120/*
1121 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1122 * initializes the hardware and our data structures with it.
1123 */
1124static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1125					struct ivhd_header *h)
1126{
1127	u8 *p = (u8 *)h;
1128	u8 *end = p, flags = 0;
1129	u16 devid = 0, devid_start = 0, devid_to = 0;
1130	u32 dev_i, ext_flags = 0;
1131	bool alias = false;
1132	struct ivhd_entry *e;
1133	u32 ivhd_size;
1134	int ret;
1135
1136
1137	ret = add_early_maps();
1138	if (ret)
1139		return ret;
1140
1141	/*
1142	 * First save the recommended feature enable bits from ACPI
1143	 */
1144	iommu->acpi_flags = h->flags;
1145
1146	/*
1147	 * Done. Now parse the device entries
1148	 */
1149	ivhd_size = get_ivhd_header_size(h);
1150	if (!ivhd_size) {
1151		pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
1152		return -EINVAL;
1153	}
1154
1155	p += ivhd_size;
1156
1157	end += h->length;
1158
1159
1160	while (p < end) {
1161		e = (struct ivhd_entry *)p;
1162		switch (e->type) {
1163		case IVHD_DEV_ALL:
1164
1165			DUMP_printk("  DEV_ALL\t\t\tflags: %02x\n", e->flags);
1166
1167			for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1168				set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1169			break;
1170		case IVHD_DEV_SELECT:
1171
1172			DUMP_printk("  DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1173				    "flags: %02x\n",
1174				    PCI_BUS_NUM(e->devid),
1175				    PCI_SLOT(e->devid),
1176				    PCI_FUNC(e->devid),
1177				    e->flags);
1178
1179			devid = e->devid;
1180			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1181			break;
1182		case IVHD_DEV_SELECT_RANGE_START:
1183
1184			DUMP_printk("  DEV_SELECT_RANGE_START\t "
1185				    "devid: %02x:%02x.%x flags: %02x\n",
1186				    PCI_BUS_NUM(e->devid),
1187				    PCI_SLOT(e->devid),
1188				    PCI_FUNC(e->devid),
1189				    e->flags);
1190
1191			devid_start = e->devid;
1192			flags = e->flags;
1193			ext_flags = 0;
1194			alias = false;
1195			break;
1196		case IVHD_DEV_ALIAS:
1197
1198			DUMP_printk("  DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1199				    "flags: %02x devid_to: %02x:%02x.%x\n",
1200				    PCI_BUS_NUM(e->devid),
1201				    PCI_SLOT(e->devid),
1202				    PCI_FUNC(e->devid),
1203				    e->flags,
1204				    PCI_BUS_NUM(e->ext >> 8),
1205				    PCI_SLOT(e->ext >> 8),
1206				    PCI_FUNC(e->ext >> 8));
1207
1208			devid = e->devid;
1209			devid_to = e->ext >> 8;
1210			set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
1211			set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1212			amd_iommu_alias_table[devid] = devid_to;
1213			break;
1214		case IVHD_DEV_ALIAS_RANGE:
1215
1216			DUMP_printk("  DEV_ALIAS_RANGE\t\t "
1217				    "devid: %02x:%02x.%x flags: %02x "
1218				    "devid_to: %02x:%02x.%x\n",
1219				    PCI_BUS_NUM(e->devid),
1220				    PCI_SLOT(e->devid),
1221				    PCI_FUNC(e->devid),
1222				    e->flags,
1223				    PCI_BUS_NUM(e->ext >> 8),
1224				    PCI_SLOT(e->ext >> 8),
1225				    PCI_FUNC(e->ext >> 8));
1226
1227			devid_start = e->devid;
1228			flags = e->flags;
1229			devid_to = e->ext >> 8;
1230			ext_flags = 0;
1231			alias = true;
1232			break;
1233		case IVHD_DEV_EXT_SELECT:
1234
1235			DUMP_printk("  DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1236				    "flags: %02x ext: %08x\n",
1237				    PCI_BUS_NUM(e->devid),
1238				    PCI_SLOT(e->devid),
1239				    PCI_FUNC(e->devid),
1240				    e->flags, e->ext);
1241
1242			devid = e->devid;
1243			set_dev_entry_from_acpi(iommu, devid, e->flags,
1244						e->ext);
1245			break;
1246		case IVHD_DEV_EXT_SELECT_RANGE:
1247
1248			DUMP_printk("  DEV_EXT_SELECT_RANGE\t devid: "
1249				    "%02x:%02x.%x flags: %02x ext: %08x\n",
1250				    PCI_BUS_NUM(e->devid),
1251				    PCI_SLOT(e->devid),
1252				    PCI_FUNC(e->devid),
1253				    e->flags, e->ext);
1254
1255			devid_start = e->devid;
1256			flags = e->flags;
1257			ext_flags = e->ext;
1258			alias = false;
1259			break;
1260		case IVHD_DEV_RANGE_END:
1261
1262			DUMP_printk("  DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1263				    PCI_BUS_NUM(e->devid),
1264				    PCI_SLOT(e->devid),
1265				    PCI_FUNC(e->devid));
1266
1267			devid = e->devid;
1268			for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1269				if (alias) {
1270					amd_iommu_alias_table[dev_i] = devid_to;
1271					set_dev_entry_from_acpi(iommu,
1272						devid_to, flags, ext_flags);
1273				}
1274				set_dev_entry_from_acpi(iommu, dev_i,
1275							flags, ext_flags);
1276			}
1277			break;
1278		case IVHD_DEV_SPECIAL: {
1279			u8 handle, type;
1280			const char *var;
1281			u16 devid;
1282			int ret;
1283
1284			handle = e->ext & 0xff;
1285			devid  = (e->ext >>  8) & 0xffff;
1286			type   = (e->ext >> 24) & 0xff;
1287
1288			if (type == IVHD_SPECIAL_IOAPIC)
1289				var = "IOAPIC";
1290			else if (type == IVHD_SPECIAL_HPET)
1291				var = "HPET";
1292			else
1293				var = "UNKNOWN";
1294
1295			DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1296				    var, (int)handle,
1297				    PCI_BUS_NUM(devid),
1298				    PCI_SLOT(devid),
1299				    PCI_FUNC(devid));
1300
1301			ret = add_special_device(type, handle, &devid, false);
1302			if (ret)
1303				return ret;
1304
1305			/*
1306			 * add_special_device might update the devid in case a
1307			 * command-line override is present. So call
1308			 * set_dev_entry_from_acpi after add_special_device.
1309			 */
1310			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1311
1312			break;
1313		}
1314		case IVHD_DEV_ACPI_HID: {
1315			u16 devid;
1316			u8 hid[ACPIHID_HID_LEN] = {0};
1317			u8 uid[ACPIHID_UID_LEN] = {0};
1318			int ret;
1319
1320			if (h->type != 0x40) {
1321				pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1322				       e->type);
1323				break;
1324			}
1325
1326			memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1327			hid[ACPIHID_HID_LEN - 1] = '\0';
1328
1329			if (!(*hid)) {
1330				pr_err(FW_BUG "Invalid HID.\n");
1331				break;
1332			}
1333
1334			switch (e->uidf) {
1335			case UID_NOT_PRESENT:
1336
1337				if (e->uidl != 0)
1338					pr_warn(FW_BUG "Invalid UID length.\n");
1339
1340				break;
1341			case UID_IS_INTEGER:
1342
1343				sprintf(uid, "%d", e->uid);
1344
1345				break;
1346			case UID_IS_CHARACTER:
1347
1348				memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
1349				uid[ACPIHID_UID_LEN - 1] = '\0';
1350
1351				break;
1352			default:
1353				break;
1354			}
1355
1356			devid = e->devid;
1357			DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1358				    hid, uid,
1359				    PCI_BUS_NUM(devid),
1360				    PCI_SLOT(devid),
1361				    PCI_FUNC(devid));
1362
1363			flags = e->flags;
1364
1365			ret = add_acpi_hid_device(hid, uid, &devid, false);
1366			if (ret)
1367				return ret;
1368
1369			/*
1370			 * add_special_device might update the devid in case a
1371			 * command-line override is present. So call
1372			 * set_dev_entry_from_acpi after add_special_device.
1373			 */
1374			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1375
1376			break;
1377		}
1378		default:
1379			break;
1380		}
1381
1382		p += ivhd_entry_length(p);
1383	}
1384
1385	return 0;
1386}
1387
1388static void __init free_iommu_one(struct amd_iommu *iommu)
1389{
1390	free_command_buffer(iommu);
1391	free_event_buffer(iommu);
1392	free_ppr_log(iommu);
1393	free_ga_log(iommu);
1394	iommu_unmap_mmio_space(iommu);
1395}
1396
1397static void __init free_iommu_all(void)
1398{
1399	struct amd_iommu *iommu, *next;
1400
1401	for_each_iommu_safe(iommu, next) {
1402		list_del(&iommu->list);
1403		free_iommu_one(iommu);
1404		kfree(iommu);
1405	}
1406}
1407
1408/*
1409 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1410 * Workaround:
1411 *     BIOS should disable L2B micellaneous clock gating by setting
1412 *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1413 */
1414static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1415{
1416	u32 value;
1417
1418	if ((boot_cpu_data.x86 != 0x15) ||
1419	    (boot_cpu_data.x86_model < 0x10) ||
1420	    (boot_cpu_data.x86_model > 0x1f))
1421		return;
1422
1423	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1424	pci_read_config_dword(iommu->dev, 0xf4, &value);
1425
1426	if (value & BIT(2))
1427		return;
1428
1429	/* Select NB indirect register 0x90 and enable writing */
1430	pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1431
1432	pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1433	pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1434		dev_name(&iommu->dev->dev));
1435
1436	/* Clear the enable writing bit */
1437	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1438}
1439
1440/*
1441 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1442 * Workaround:
1443 *     BIOS should enable ATS write permission check by setting
1444 *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1445 */
1446static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1447{
1448	u32 value;
1449
1450	if ((boot_cpu_data.x86 != 0x15) ||
1451	    (boot_cpu_data.x86_model < 0x30) ||
1452	    (boot_cpu_data.x86_model > 0x3f))
1453		return;
1454
1455	/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1456	value = iommu_read_l2(iommu, 0x47);
1457
1458	if (value & BIT(0))
1459		return;
1460
1461	/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1462	iommu_write_l2(iommu, 0x47, value | BIT(0));
1463
1464	pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1465		dev_name(&iommu->dev->dev));
1466}
1467
1468/*
1469 * This function clues the initialization function for one IOMMU
1470 * together and also allocates the command buffer and programs the
1471 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1472 */
1473static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1474{
1475	int ret;
1476
1477	raw_spin_lock_init(&iommu->lock);
1478
1479	/* Add IOMMU to internal data structures */
1480	list_add_tail(&iommu->list, &amd_iommu_list);
1481	iommu->index = amd_iommus_present++;
1482
1483	if (unlikely(iommu->index >= MAX_IOMMUS)) {
1484		WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1485		return -ENOSYS;
1486	}
1487
1488	/* Index is fine - add IOMMU to the array */
1489	amd_iommus[iommu->index] = iommu;
1490
1491	/*
1492	 * Copy data from ACPI table entry to the iommu struct
1493	 */
1494	iommu->devid   = h->devid;
1495	iommu->cap_ptr = h->cap_ptr;
1496	iommu->pci_seg = h->pci_seg;
1497	iommu->mmio_phys = h->mmio_phys;
1498
1499	switch (h->type) {
1500	case 0x10:
1501		/* Check if IVHD EFR contains proper max banks/counters */
1502		if ((h->efr_attr != 0) &&
1503		    ((h->efr_attr & (0xF << 13)) != 0) &&
1504		    ((h->efr_attr & (0x3F << 17)) != 0))
1505			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1506		else
1507			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1508		if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1509			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1510		break;
1511	case 0x11:
1512	case 0x40:
1513		if (h->efr_reg & (1 << 9))
1514			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1515		else
1516			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1517		if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1518			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1519		break;
1520	default:
1521		return -EINVAL;
1522	}
1523
1524	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1525						iommu->mmio_phys_end);
1526	if (!iommu->mmio_base)
1527		return -ENOMEM;
1528
1529	if (alloc_command_buffer(iommu))
1530		return -ENOMEM;
1531
1532	if (alloc_event_buffer(iommu))
1533		return -ENOMEM;
1534
1535	iommu->int_enabled = false;
1536
1537	init_translation_status(iommu);
1538	if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1539		iommu_disable(iommu);
1540		clear_translation_pre_enabled(iommu);
1541		pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1542			iommu->index);
1543	}
1544	if (amd_iommu_pre_enabled)
1545		amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1546
1547	ret = init_iommu_from_acpi(iommu, h);
1548	if (ret)
1549		return ret;
1550
1551	ret = amd_iommu_create_irq_domain(iommu);
1552	if (ret)
1553		return ret;
1554
1555	/*
1556	 * Make sure IOMMU is not considered to translate itself. The IVRS
1557	 * table tells us so, but this is a lie!
1558	 */
1559	amd_iommu_rlookup_table[iommu->devid] = NULL;
1560
1561	return 0;
1562}
1563
1564/**
1565 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1566 * @ivrs          Pointer to the IVRS header
1567 *
1568 * This function search through all IVDB of the maximum supported IVHD
1569 */
1570static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1571{
1572	u8 *base = (u8 *)ivrs;
1573	struct ivhd_header *ivhd = (struct ivhd_header *)
1574					(base + IVRS_HEADER_LENGTH);
1575	u8 last_type = ivhd->type;
1576	u16 devid = ivhd->devid;
1577
1578	while (((u8 *)ivhd - base < ivrs->length) &&
1579	       (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1580		u8 *p = (u8 *) ivhd;
1581
1582		if (ivhd->devid == devid)
1583			last_type = ivhd->type;
1584		ivhd = (struct ivhd_header *)(p + ivhd->length);
1585	}
1586
1587	return last_type;
1588}
1589
1590/*
1591 * Iterates over all IOMMU entries in the ACPI table, allocates the
1592 * IOMMU structure and initializes it with init_iommu_one()
1593 */
1594static int __init init_iommu_all(struct acpi_table_header *table)
1595{
1596	u8 *p = (u8 *)table, *end = (u8 *)table;
1597	struct ivhd_header *h;
1598	struct amd_iommu *iommu;
1599	int ret;
1600
1601	end += table->length;
1602	p += IVRS_HEADER_LENGTH;
1603
1604	while (p < end) {
1605		h = (struct ivhd_header *)p;
1606		if (*p == amd_iommu_target_ivhd_type) {
 
1607
1608			DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1609				    "seg: %d flags: %01x info %04x\n",
1610				    PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1611				    PCI_FUNC(h->devid), h->cap_ptr,
1612				    h->pci_seg, h->flags, h->info);
1613			DUMP_printk("       mmio-addr: %016llx\n",
1614				    h->mmio_phys);
1615
1616			iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1617			if (iommu == NULL)
1618				return -ENOMEM;
1619
1620			ret = init_iommu_one(iommu, h);
1621			if (ret)
1622				return ret;
 
 
 
1623		}
1624		p += h->length;
1625
1626	}
1627	WARN_ON(p != end);
1628
1629	return 0;
1630}
1631
1632static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1633				u8 fxn, u64 *value, bool is_write);
1634
1635static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1636{
1637	u64 val = 0xabcd, val2 = 0;
1638
1639	if (!iommu_feature(iommu, FEATURE_PC))
1640		return;
1641
1642	amd_iommu_pc_present = true;
1643
1644	/* Check if the performance counters can be written to */
1645	if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1646	    (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1647	    (val != val2)) {
1648		pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1649		amd_iommu_pc_present = false;
1650		return;
1651	}
1652
1653	pr_info("AMD-Vi: IOMMU performance counters supported\n");
1654
1655	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1656	iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1657	iommu->max_counters = (u8) ((val >> 7) & 0xf);
1658}
1659
1660static ssize_t amd_iommu_show_cap(struct device *dev,
1661				  struct device_attribute *attr,
1662				  char *buf)
1663{
1664	struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1665	return sprintf(buf, "%x\n", iommu->cap);
1666}
1667static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1668
1669static ssize_t amd_iommu_show_features(struct device *dev,
1670				       struct device_attribute *attr,
1671				       char *buf)
1672{
1673	struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1674	return sprintf(buf, "%llx\n", iommu->features);
1675}
1676static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1677
1678static struct attribute *amd_iommu_attrs[] = {
1679	&dev_attr_cap.attr,
1680	&dev_attr_features.attr,
1681	NULL,
1682};
1683
1684static struct attribute_group amd_iommu_group = {
1685	.name = "amd-iommu",
1686	.attrs = amd_iommu_attrs,
1687};
1688
1689static const struct attribute_group *amd_iommu_groups[] = {
1690	&amd_iommu_group,
1691	NULL,
1692};
1693
1694static int iommu_init_pci(struct amd_iommu *iommu)
1695{
1696	int cap_ptr = iommu->cap_ptr;
1697	u32 range, misc, low, high;
1698	int ret;
1699
1700	iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1701						 iommu->devid & 0xff);
1702	if (!iommu->dev)
1703		return -ENODEV;
1704
1705	/* Prevent binding other PCI device drivers to IOMMU devices */
1706	iommu->dev->match_driver = false;
1707
1708	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1709			      &iommu->cap);
1710	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1711			      &range);
1712	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1713			      &misc);
1714
1715	if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1716		amd_iommu_iotlb_sup = false;
1717
1718	/* read extended feature bits */
1719	low  = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1720	high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1721
1722	iommu->features = ((u64)high << 32) | low;
1723
1724	if (iommu_feature(iommu, FEATURE_GT)) {
1725		int glxval;
1726		u32 max_pasid;
1727		u64 pasmax;
1728
1729		pasmax = iommu->features & FEATURE_PASID_MASK;
1730		pasmax >>= FEATURE_PASID_SHIFT;
1731		max_pasid  = (1 << (pasmax + 1)) - 1;
1732
1733		amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1734
1735		BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1736
1737		glxval   = iommu->features & FEATURE_GLXVAL_MASK;
1738		glxval >>= FEATURE_GLXVAL_SHIFT;
1739
1740		if (amd_iommu_max_glx_val == -1)
1741			amd_iommu_max_glx_val = glxval;
1742		else
1743			amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1744	}
1745
1746	if (iommu_feature(iommu, FEATURE_GT) &&
1747	    iommu_feature(iommu, FEATURE_PPR)) {
1748		iommu->is_iommu_v2   = true;
1749		amd_iommu_v2_present = true;
1750	}
1751
1752	if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1753		return -ENOMEM;
1754
1755	ret = iommu_init_ga(iommu);
1756	if (ret)
1757		return ret;
1758
1759	if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1760		amd_iommu_np_cache = true;
1761
1762	init_iommu_perf_ctr(iommu);
1763
1764	if (is_rd890_iommu(iommu->dev)) {
1765		int i, j;
1766
1767		iommu->root_pdev =
1768			pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1769						    PCI_DEVFN(0, 0));
1770
1771		/*
1772		 * Some rd890 systems may not be fully reconfigured by the
1773		 * BIOS, so it's necessary for us to store this information so
1774		 * it can be reprogrammed on resume
1775		 */
1776		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1777				&iommu->stored_addr_lo);
1778		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1779				&iommu->stored_addr_hi);
1780
1781		/* Low bit locks writes to configuration space */
1782		iommu->stored_addr_lo &= ~1;
1783
1784		for (i = 0; i < 6; i++)
1785			for (j = 0; j < 0x12; j++)
1786				iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1787
1788		for (i = 0; i < 0x83; i++)
1789			iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1790	}
1791
1792	amd_iommu_erratum_746_workaround(iommu);
1793	amd_iommu_ats_write_check_workaround(iommu);
1794
1795	iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1796			       amd_iommu_groups, "ivhd%d", iommu->index);
1797	iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1798	iommu_device_register(&iommu->iommu);
1799
1800	return pci_enable_device(iommu->dev);
1801}
1802
1803static void print_iommu_info(void)
1804{
1805	static const char * const feat_str[] = {
1806		"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1807		"IA", "GA", "HE", "PC"
1808	};
1809	struct amd_iommu *iommu;
1810
1811	for_each_iommu(iommu) {
1812		int i;
1813
1814		pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1815			dev_name(&iommu->dev->dev), iommu->cap_ptr);
1816
1817		if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1818			pr_info("AMD-Vi: Extended features (%#llx):\n",
1819				iommu->features);
1820			for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1821				if (iommu_feature(iommu, (1ULL << i)))
1822					pr_cont(" %s", feat_str[i]);
1823			}
1824
1825			if (iommu->features & FEATURE_GAM_VAPIC)
1826				pr_cont(" GA_vAPIC");
1827
1828			pr_cont("\n");
1829		}
1830	}
1831	if (irq_remapping_enabled) {
1832		pr_info("AMD-Vi: Interrupt remapping enabled\n");
1833		if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1834			pr_info("AMD-Vi: virtual APIC enabled\n");
1835	}
1836}
1837
1838static int __init amd_iommu_init_pci(void)
1839{
1840	struct amd_iommu *iommu;
1841	int ret = 0;
1842
1843	for_each_iommu(iommu) {
1844		ret = iommu_init_pci(iommu);
1845		if (ret)
1846			break;
1847	}
1848
1849	/*
1850	 * Order is important here to make sure any unity map requirements are
1851	 * fulfilled. The unity mappings are created and written to the device
1852	 * table during the amd_iommu_init_api() call.
1853	 *
1854	 * After that we call init_device_table_dma() to make sure any
1855	 * uninitialized DTE will block DMA, and in the end we flush the caches
1856	 * of all IOMMUs to make sure the changes to the device table are
1857	 * active.
1858	 */
1859	ret = amd_iommu_init_api();
1860
1861	init_device_table_dma();
1862
1863	for_each_iommu(iommu)
1864		iommu_flush_all_caches(iommu);
1865
 
 
1866	if (!ret)
1867		print_iommu_info();
1868
1869	return ret;
1870}
1871
1872/****************************************************************************
1873 *
1874 * The following functions initialize the MSI interrupts for all IOMMUs
1875 * in the system. It's a bit challenging because there could be multiple
1876 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1877 * pci_dev.
1878 *
1879 ****************************************************************************/
1880
1881static int iommu_setup_msi(struct amd_iommu *iommu)
1882{
1883	int r;
1884
1885	r = pci_enable_msi(iommu->dev);
1886	if (r)
1887		return r;
1888
1889	r = request_threaded_irq(iommu->dev->irq,
1890				 amd_iommu_int_handler,
1891				 amd_iommu_int_thread,
1892				 0, "AMD-Vi",
1893				 iommu);
1894
1895	if (r) {
1896		pci_disable_msi(iommu->dev);
1897		return r;
1898	}
1899
1900	iommu->int_enabled = true;
1901
1902	return 0;
1903}
1904
1905static int iommu_init_msi(struct amd_iommu *iommu)
1906{
1907	int ret;
1908
1909	if (iommu->int_enabled)
1910		goto enable_faults;
1911
1912	if (iommu->dev->msi_cap)
1913		ret = iommu_setup_msi(iommu);
1914	else
1915		ret = -ENODEV;
1916
1917	if (ret)
1918		return ret;
1919
1920enable_faults:
1921	iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1922
1923	if (iommu->ppr_log != NULL)
1924		iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1925
1926	iommu_ga_log_enable(iommu);
1927
1928	return 0;
1929}
1930
1931/****************************************************************************
1932 *
1933 * The next functions belong to the third pass of parsing the ACPI
1934 * table. In this last pass the memory mapping requirements are
1935 * gathered (like exclusion and unity mapping ranges).
1936 *
1937 ****************************************************************************/
1938
1939static void __init free_unity_maps(void)
1940{
1941	struct unity_map_entry *entry, *next;
1942
1943	list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1944		list_del(&entry->list);
1945		kfree(entry);
1946	}
1947}
1948
1949/* called when we find an exclusion range definition in ACPI */
1950static int __init init_exclusion_range(struct ivmd_header *m)
1951{
1952	int i;
1953
1954	switch (m->type) {
1955	case ACPI_IVMD_TYPE:
1956		set_device_exclusion_range(m->devid, m);
1957		break;
1958	case ACPI_IVMD_TYPE_ALL:
1959		for (i = 0; i <= amd_iommu_last_bdf; ++i)
1960			set_device_exclusion_range(i, m);
1961		break;
1962	case ACPI_IVMD_TYPE_RANGE:
1963		for (i = m->devid; i <= m->aux; ++i)
1964			set_device_exclusion_range(i, m);
1965		break;
1966	default:
1967		break;
1968	}
1969
1970	return 0;
1971}
1972
1973/* called for unity map ACPI definition */
1974static int __init init_unity_map_range(struct ivmd_header *m)
1975{
1976	struct unity_map_entry *e = NULL;
1977	char *s;
1978
1979	e = kzalloc(sizeof(*e), GFP_KERNEL);
1980	if (e == NULL)
1981		return -ENOMEM;
1982
1983	switch (m->type) {
1984	default:
1985		kfree(e);
1986		return 0;
1987	case ACPI_IVMD_TYPE:
1988		s = "IVMD_TYPEi\t\t\t";
1989		e->devid_start = e->devid_end = m->devid;
1990		break;
1991	case ACPI_IVMD_TYPE_ALL:
1992		s = "IVMD_TYPE_ALL\t\t";
1993		e->devid_start = 0;
1994		e->devid_end = amd_iommu_last_bdf;
1995		break;
1996	case ACPI_IVMD_TYPE_RANGE:
1997		s = "IVMD_TYPE_RANGE\t\t";
1998		e->devid_start = m->devid;
1999		e->devid_end = m->aux;
2000		break;
2001	}
2002	e->address_start = PAGE_ALIGN(m->range_start);
2003	e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2004	e->prot = m->flags >> 1;
2005
2006	DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2007		    " range_start: %016llx range_end: %016llx flags: %x\n", s,
2008		    PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2009		    PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2010		    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2011		    e->address_start, e->address_end, m->flags);
2012
2013	list_add_tail(&e->list, &amd_iommu_unity_map);
2014
2015	return 0;
2016}
2017
2018/* iterates over all memory definitions we find in the ACPI table */
2019static int __init init_memory_definitions(struct acpi_table_header *table)
2020{
2021	u8 *p = (u8 *)table, *end = (u8 *)table;
2022	struct ivmd_header *m;
2023
2024	end += table->length;
2025	p += IVRS_HEADER_LENGTH;
2026
2027	while (p < end) {
2028		m = (struct ivmd_header *)p;
2029		if (m->flags & IVMD_FLAG_EXCL_RANGE)
2030			init_exclusion_range(m);
2031		else if (m->flags & IVMD_FLAG_UNITY_MAP)
2032			init_unity_map_range(m);
2033
2034		p += m->length;
2035	}
2036
2037	return 0;
2038}
2039
2040/*
2041 * Init the device table to not allow DMA access for devices
 
2042 */
2043static void init_device_table_dma(void)
2044{
2045	u32 devid;
2046
2047	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2048		set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2049		set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2050	}
2051}
2052
2053static void __init uninit_device_table_dma(void)
2054{
2055	u32 devid;
2056
2057	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2058		amd_iommu_dev_table[devid].data[0] = 0ULL;
2059		amd_iommu_dev_table[devid].data[1] = 0ULL;
2060	}
2061}
2062
2063static void init_device_table(void)
2064{
2065	u32 devid;
2066
2067	if (!amd_iommu_irq_remap)
2068		return;
2069
2070	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2071		set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2072}
2073
2074static void iommu_init_flags(struct amd_iommu *iommu)
2075{
2076	iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2077		iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2078		iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2079
2080	iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2081		iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2082		iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2083
2084	iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2085		iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2086		iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2087
2088	iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2089		iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2090		iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2091
2092	/*
2093	 * make IOMMU memory accesses cache coherent
2094	 */
2095	iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2096
2097	/* Set IOTLB invalidation timeout to 1s */
2098	iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2099}
2100
2101static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2102{
2103	int i, j;
2104	u32 ioc_feature_control;
2105	struct pci_dev *pdev = iommu->root_pdev;
2106
2107	/* RD890 BIOSes may not have completely reconfigured the iommu */
2108	if (!is_rd890_iommu(iommu->dev) || !pdev)
2109		return;
2110
2111	/*
2112	 * First, we need to ensure that the iommu is enabled. This is
2113	 * controlled by a register in the northbridge
2114	 */
2115
2116	/* Select Northbridge indirect register 0x75 and enable writing */
2117	pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2118	pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2119
2120	/* Enable the iommu */
2121	if (!(ioc_feature_control & 0x1))
2122		pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2123
2124	/* Restore the iommu BAR */
2125	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2126			       iommu->stored_addr_lo);
2127	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2128			       iommu->stored_addr_hi);
2129
2130	/* Restore the l1 indirect regs for each of the 6 l1s */
2131	for (i = 0; i < 6; i++)
2132		for (j = 0; j < 0x12; j++)
2133			iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2134
2135	/* Restore the l2 indirect regs */
2136	for (i = 0; i < 0x83; i++)
2137		iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2138
2139	/* Lock PCI setup registers */
2140	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2141			       iommu->stored_addr_lo | 1);
2142}
2143
2144static void iommu_enable_ga(struct amd_iommu *iommu)
2145{
2146#ifdef CONFIG_IRQ_REMAP
2147	switch (amd_iommu_guest_ir) {
2148	case AMD_IOMMU_GUEST_IR_VAPIC:
2149		iommu_feature_enable(iommu, CONTROL_GAM_EN);
2150		/* Fall through */
2151	case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2152		iommu_feature_enable(iommu, CONTROL_GA_EN);
2153		iommu->irte_ops = &irte_128_ops;
2154		break;
2155	default:
2156		iommu->irte_ops = &irte_32_ops;
2157		break;
2158	}
2159#endif
2160}
2161
2162static void early_enable_iommu(struct amd_iommu *iommu)
2163{
2164	iommu_disable(iommu);
2165	iommu_init_flags(iommu);
2166	iommu_set_device_table(iommu);
2167	iommu_enable_command_buffer(iommu);
2168	iommu_enable_event_buffer(iommu);
2169	iommu_set_exclusion_range(iommu);
2170	iommu_enable_ga(iommu);
2171	iommu_enable(iommu);
2172	iommu_flush_all_caches(iommu);
2173}
2174
2175/*
2176 * This function finally enables all IOMMUs found in the system after
2177 * they have been initialized.
2178 *
2179 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2180 * the old content of device table entries. Not this case or copy failed,
2181 * just continue as normal kernel does.
2182 */
2183static void early_enable_iommus(void)
2184{
2185	struct amd_iommu *iommu;
2186
2187
2188	if (!copy_device_table()) {
2189		/*
2190		 * If come here because of failure in copying device table from old
2191		 * kernel with all IOMMUs enabled, print error message and try to
2192		 * free allocated old_dev_tbl_cpy.
2193		 */
2194		if (amd_iommu_pre_enabled)
2195			pr_err("Failed to copy DEV table from previous kernel.\n");
2196		if (old_dev_tbl_cpy != NULL)
2197			free_pages((unsigned long)old_dev_tbl_cpy,
2198					get_order(dev_table_size));
2199
2200		for_each_iommu(iommu) {
2201			clear_translation_pre_enabled(iommu);
2202			early_enable_iommu(iommu);
2203		}
2204	} else {
2205		pr_info("Copied DEV table from previous kernel.\n");
2206		free_pages((unsigned long)amd_iommu_dev_table,
2207				get_order(dev_table_size));
2208		amd_iommu_dev_table = old_dev_tbl_cpy;
2209		for_each_iommu(iommu) {
2210			iommu_disable_command_buffer(iommu);
2211			iommu_disable_event_buffer(iommu);
2212			iommu_enable_command_buffer(iommu);
2213			iommu_enable_event_buffer(iommu);
2214			iommu_enable_ga(iommu);
2215			iommu_set_device_table(iommu);
2216			iommu_flush_all_caches(iommu);
2217		}
2218	}
2219
2220#ifdef CONFIG_IRQ_REMAP
2221	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2222		amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2223#endif
2224}
2225
2226static void enable_iommus_v2(void)
2227{
2228	struct amd_iommu *iommu;
2229
2230	for_each_iommu(iommu) {
2231		iommu_enable_ppr_log(iommu);
2232		iommu_enable_gt(iommu);
2233	}
2234}
2235
2236static void enable_iommus(void)
2237{
2238	early_enable_iommus();
2239
2240	enable_iommus_v2();
2241}
2242
2243static void disable_iommus(void)
2244{
2245	struct amd_iommu *iommu;
2246
2247	for_each_iommu(iommu)
2248		iommu_disable(iommu);
2249
2250#ifdef CONFIG_IRQ_REMAP
2251	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2252		amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2253#endif
2254}
2255
2256/*
2257 * Suspend/Resume support
2258 * disable suspend until real resume implemented
2259 */
2260
2261static void amd_iommu_resume(void)
2262{
2263	struct amd_iommu *iommu;
2264
2265	for_each_iommu(iommu)
2266		iommu_apply_resume_quirks(iommu);
2267
2268	/* re-load the hardware */
2269	enable_iommus();
2270
2271	amd_iommu_enable_interrupts();
2272}
2273
2274static int amd_iommu_suspend(void)
2275{
2276	/* disable IOMMUs to go out of the way for BIOS */
2277	disable_iommus();
2278
2279	return 0;
2280}
2281
2282static struct syscore_ops amd_iommu_syscore_ops = {
2283	.suspend = amd_iommu_suspend,
2284	.resume = amd_iommu_resume,
2285};
2286
2287static void __init free_iommu_resources(void)
2288{
2289	kmemleak_free(irq_lookup_table);
2290	free_pages((unsigned long)irq_lookup_table,
2291		   get_order(rlookup_table_size));
2292	irq_lookup_table = NULL;
2293
2294	kmem_cache_destroy(amd_iommu_irq_cache);
2295	amd_iommu_irq_cache = NULL;
2296
2297	free_pages((unsigned long)amd_iommu_rlookup_table,
2298		   get_order(rlookup_table_size));
2299	amd_iommu_rlookup_table = NULL;
2300
2301	free_pages((unsigned long)amd_iommu_alias_table,
2302		   get_order(alias_table_size));
2303	amd_iommu_alias_table = NULL;
2304
2305	free_pages((unsigned long)amd_iommu_dev_table,
2306		   get_order(dev_table_size));
2307	amd_iommu_dev_table = NULL;
2308
2309	free_iommu_all();
2310
2311#ifdef CONFIG_GART_IOMMU
2312	/*
2313	 * We failed to initialize the AMD IOMMU - try fallback to GART
2314	 * if possible.
2315	 */
2316	gart_iommu_init();
2317
2318#endif
2319}
2320
2321/* SB IOAPIC is always on this device in AMD systems */
2322#define IOAPIC_SB_DEVID		((0x00 << 8) | PCI_DEVFN(0x14, 0))
2323
2324static bool __init check_ioapic_information(void)
2325{
2326	const char *fw_bug = FW_BUG;
2327	bool ret, has_sb_ioapic;
2328	int idx;
2329
2330	has_sb_ioapic = false;
2331	ret           = false;
2332
2333	/*
2334	 * If we have map overrides on the kernel command line the
2335	 * messages in this function might not describe firmware bugs
2336	 * anymore - so be careful
2337	 */
2338	if (cmdline_maps)
2339		fw_bug = "";
2340
2341	for (idx = 0; idx < nr_ioapics; idx++) {
2342		int devid, id = mpc_ioapic_id(idx);
2343
2344		devid = get_ioapic_devid(id);
2345		if (devid < 0) {
2346			pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
2347				fw_bug, id);
2348			ret = false;
2349		} else if (devid == IOAPIC_SB_DEVID) {
2350			has_sb_ioapic = true;
2351			ret           = true;
2352		}
2353	}
2354
2355	if (!has_sb_ioapic) {
2356		/*
2357		 * We expect the SB IOAPIC to be listed in the IVRS
2358		 * table. The system timer is connected to the SB IOAPIC
2359		 * and if we don't have it in the list the system will
2360		 * panic at boot time.  This situation usually happens
2361		 * when the BIOS is buggy and provides us the wrong
2362		 * device id for the IOAPIC in the system.
2363		 */
2364		pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
2365	}
2366
2367	if (!ret)
2368		pr_err("AMD-Vi: Disabling interrupt remapping\n");
2369
2370	return ret;
2371}
2372
2373static void __init free_dma_resources(void)
2374{
2375	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2376		   get_order(MAX_DOMAIN_ID/8));
2377	amd_iommu_pd_alloc_bitmap = NULL;
2378
2379	free_unity_maps();
2380}
2381
2382/*
2383 * This is the hardware init function for AMD IOMMU in the system.
2384 * This function is called either from amd_iommu_init or from the interrupt
2385 * remapping setup code.
2386 *
2387 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2388 * four times:
2389 *
2390 *	1 pass) Discover the most comprehensive IVHD type to use.
2391 *
2392 *	2 pass) Find the highest PCI device id the driver has to handle.
2393 *		Upon this information the size of the data structures is
2394 *		determined that needs to be allocated.
2395 *
2396 *	3 pass) Initialize the data structures just allocated with the
2397 *		information in the ACPI table about available AMD IOMMUs
2398 *		in the system. It also maps the PCI devices in the
2399 *		system to specific IOMMUs
2400 *
2401 *	4 pass) After the basic data structures are allocated and
2402 *		initialized we update them with information about memory
2403 *		remapping requirements parsed out of the ACPI table in
2404 *		this last pass.
2405 *
2406 * After everything is set up the IOMMUs are enabled and the necessary
2407 * hotplug and suspend notifiers are registered.
2408 */
2409static int __init early_amd_iommu_init(void)
2410{
2411	struct acpi_table_header *ivrs_base;
 
2412	acpi_status status;
2413	int i, remap_cache_sz, ret = 0;
2414
2415	if (!amd_iommu_detected)
2416		return -ENODEV;
2417
2418	status = acpi_get_table("IVRS", 0, &ivrs_base);
2419	if (status == AE_NOT_FOUND)
2420		return -ENODEV;
2421	else if (ACPI_FAILURE(status)) {
2422		const char *err = acpi_format_exception(status);
2423		pr_err("AMD-Vi: IVRS table error: %s\n", err);
2424		return -EINVAL;
2425	}
2426
2427	/*
2428	 * Validate checksum here so we don't need to do it when
2429	 * we actually parse the table
2430	 */
2431	ret = check_ivrs_checksum(ivrs_base);
2432	if (ret)
2433		goto out;
2434
2435	amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2436	DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2437
2438	/*
2439	 * First parse ACPI tables to find the largest Bus/Dev/Func
2440	 * we need to handle. Upon this information the shared data
2441	 * structures for the IOMMUs in the system will be allocated
2442	 */
2443	ret = find_last_devid_acpi(ivrs_base);
2444	if (ret)
2445		goto out;
2446
2447	dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
2448	alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2449	rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2450
2451	/* Device table - directly used by all IOMMUs */
2452	ret = -ENOMEM;
2453	amd_iommu_dev_table = (void *)__get_free_pages(
2454				      GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2455				      get_order(dev_table_size));
2456	if (amd_iommu_dev_table == NULL)
2457		goto out;
2458
2459	/*
2460	 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
2461	 * IOMMU see for that device
2462	 */
2463	amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2464			get_order(alias_table_size));
2465	if (amd_iommu_alias_table == NULL)
2466		goto out;
2467
2468	/* IOMMU rlookup table - find the IOMMU for a specific device */
2469	amd_iommu_rlookup_table = (void *)__get_free_pages(
2470			GFP_KERNEL | __GFP_ZERO,
2471			get_order(rlookup_table_size));
2472	if (amd_iommu_rlookup_table == NULL)
2473		goto out;
2474
2475	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2476					    GFP_KERNEL | __GFP_ZERO,
2477					    get_order(MAX_DOMAIN_ID/8));
2478	if (amd_iommu_pd_alloc_bitmap == NULL)
2479		goto out;
2480
2481	/*
2482	 * let all alias entries point to itself
2483	 */
2484	for (i = 0; i <= amd_iommu_last_bdf; ++i)
2485		amd_iommu_alias_table[i] = i;
2486
2487	/*
2488	 * never allocate domain 0 because its used as the non-allocated and
2489	 * error value placeholder
2490	 */
2491	__set_bit(0, amd_iommu_pd_alloc_bitmap);
2492
2493	spin_lock_init(&amd_iommu_pd_lock);
2494
2495	/*
2496	 * now the data structures are allocated and basically initialized
2497	 * start the real acpi table scan
2498	 */
2499	ret = init_iommu_all(ivrs_base);
2500	if (ret)
2501		goto out;
2502
2503	/* Disable any previously enabled IOMMUs */
2504	if (!is_kdump_kernel() || amd_iommu_disabled)
2505		disable_iommus();
2506
2507	if (amd_iommu_irq_remap)
2508		amd_iommu_irq_remap = check_ioapic_information();
2509
2510	if (amd_iommu_irq_remap) {
2511		/*
2512		 * Interrupt remapping enabled, create kmem_cache for the
2513		 * remapping tables.
2514		 */
2515		ret = -ENOMEM;
2516		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2517			remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2518		else
2519			remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2520		amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2521							remap_cache_sz,
2522							IRQ_TABLE_ALIGNMENT,
2523							0, NULL);
2524		if (!amd_iommu_irq_cache)
2525			goto out;
2526
2527		irq_lookup_table = (void *)__get_free_pages(
2528				GFP_KERNEL | __GFP_ZERO,
2529				get_order(rlookup_table_size));
2530		kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2531			       1, GFP_KERNEL);
2532		if (!irq_lookup_table)
2533			goto out;
2534	}
2535
2536	ret = init_memory_definitions(ivrs_base);
2537	if (ret)
2538		goto out;
2539
2540	/* init the device table */
2541	init_device_table();
2542
2543out:
2544	/* Don't leak any ACPI memory */
2545	acpi_put_table(ivrs_base);
2546	ivrs_base = NULL;
2547
2548	return ret;
2549}
2550
2551static int amd_iommu_enable_interrupts(void)
2552{
2553	struct amd_iommu *iommu;
2554	int ret = 0;
2555
2556	for_each_iommu(iommu) {
2557		ret = iommu_init_msi(iommu);
2558		if (ret)
2559			goto out;
2560	}
2561
2562out:
2563	return ret;
2564}
2565
2566static bool detect_ivrs(void)
2567{
2568	struct acpi_table_header *ivrs_base;
 
2569	acpi_status status;
2570
2571	status = acpi_get_table("IVRS", 0, &ivrs_base);
2572	if (status == AE_NOT_FOUND)
2573		return false;
2574	else if (ACPI_FAILURE(status)) {
2575		const char *err = acpi_format_exception(status);
2576		pr_err("AMD-Vi: IVRS table error: %s\n", err);
2577		return false;
2578	}
2579
2580	acpi_put_table(ivrs_base);
2581
2582	/* Make sure ACS will be enabled during PCI probe */
2583	pci_request_acs();
2584
2585	return true;
2586}
2587
2588/****************************************************************************
2589 *
2590 * AMD IOMMU Initialization State Machine
2591 *
2592 ****************************************************************************/
2593
2594static int __init state_next(void)
2595{
2596	int ret = 0;
2597
2598	switch (init_state) {
2599	case IOMMU_START_STATE:
2600		if (!detect_ivrs()) {
2601			init_state	= IOMMU_NOT_FOUND;
2602			ret		= -ENODEV;
2603		} else {
2604			init_state	= IOMMU_IVRS_DETECTED;
2605		}
2606		break;
2607	case IOMMU_IVRS_DETECTED:
2608		ret = early_amd_iommu_init();
2609		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2610		if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2611			pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n");
2612			free_dma_resources();
2613			free_iommu_resources();
2614			init_state = IOMMU_CMDLINE_DISABLED;
2615			ret = -EINVAL;
2616		}
2617		break;
2618	case IOMMU_ACPI_FINISHED:
2619		early_enable_iommus();
 
2620		x86_platform.iommu_shutdown = disable_iommus;
2621		init_state = IOMMU_ENABLED;
2622		break;
2623	case IOMMU_ENABLED:
2624		register_syscore_ops(&amd_iommu_syscore_ops);
2625		ret = amd_iommu_init_pci();
2626		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2627		enable_iommus_v2();
2628		break;
2629	case IOMMU_PCI_INIT:
2630		ret = amd_iommu_enable_interrupts();
2631		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2632		break;
2633	case IOMMU_INTERRUPTS_EN:
2634		ret = amd_iommu_init_dma_ops();
2635		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2636		break;
2637	case IOMMU_DMA_OPS:
2638		init_state = IOMMU_INITIALIZED;
2639		break;
2640	case IOMMU_INITIALIZED:
2641		/* Nothing to do */
2642		break;
2643	case IOMMU_NOT_FOUND:
2644	case IOMMU_INIT_ERROR:
2645	case IOMMU_CMDLINE_DISABLED:
2646		/* Error states => do nothing */
2647		ret = -EINVAL;
2648		break;
2649	default:
2650		/* Unknown state */
2651		BUG();
2652	}
2653
2654	return ret;
2655}
2656
2657static int __init iommu_go_to_state(enum iommu_init_state state)
2658{
2659	int ret = -EINVAL;
2660
2661	while (init_state != state) {
2662		if (init_state == IOMMU_NOT_FOUND         ||
2663		    init_state == IOMMU_INIT_ERROR        ||
2664		    init_state == IOMMU_CMDLINE_DISABLED)
2665			break;
2666		ret = state_next();
2667	}
2668
2669	return ret;
2670}
2671
2672#ifdef CONFIG_IRQ_REMAP
2673int __init amd_iommu_prepare(void)
2674{
2675	int ret;
2676
2677	amd_iommu_irq_remap = true;
2678
2679	ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2680	if (ret)
2681		return ret;
2682	return amd_iommu_irq_remap ? 0 : -ENODEV;
2683}
2684
2685int __init amd_iommu_enable(void)
2686{
2687	int ret;
2688
2689	ret = iommu_go_to_state(IOMMU_ENABLED);
2690	if (ret)
2691		return ret;
2692
2693	irq_remapping_enabled = 1;
2694
2695	return 0;
2696}
2697
2698void amd_iommu_disable(void)
2699{
2700	amd_iommu_suspend();
2701}
2702
2703int amd_iommu_reenable(int mode)
2704{
2705	amd_iommu_resume();
2706
2707	return 0;
2708}
2709
2710int __init amd_iommu_enable_faulting(void)
2711{
2712	/* We enable MSI later when PCI is initialized */
2713	return 0;
2714}
2715#endif
2716
2717/*
2718 * This is the core init function for AMD IOMMU hardware in the system.
2719 * This function is called from the generic x86 DMA layer initialization
2720 * code.
2721 */
2722static int __init amd_iommu_init(void)
2723{
2724	int ret;
2725
2726	ret = iommu_go_to_state(IOMMU_INITIALIZED);
2727	if (ret) {
2728		free_dma_resources();
2729		if (!irq_remapping_enabled) {
2730			disable_iommus();
2731			free_iommu_resources();
2732		} else {
2733			struct amd_iommu *iommu;
2734
2735			uninit_device_table_dma();
2736			for_each_iommu(iommu)
2737				iommu_flush_all_caches(iommu);
2738		}
2739	}
2740
2741	return ret;
2742}
2743
2744static bool amd_iommu_sme_check(void)
2745{
2746	if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2747		return true;
2748
2749	/* For Fam17h, a specific level of support is required */
2750	if (boot_cpu_data.microcode >= 0x08001205)
2751		return true;
2752
2753	if ((boot_cpu_data.microcode >= 0x08001126) &&
2754	    (boot_cpu_data.microcode <= 0x080011ff))
2755		return true;
2756
2757	pr_notice("AMD-Vi: IOMMU not currently supported when SME is active\n");
2758
2759	return false;
2760}
2761
2762/****************************************************************************
2763 *
2764 * Early detect code. This code runs at IOMMU detection time in the DMA
2765 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2766 * IOMMUs
2767 *
2768 ****************************************************************************/
2769int __init amd_iommu_detect(void)
2770{
2771	int ret;
2772
2773	if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2774		return -ENODEV;
2775
2776	if (!amd_iommu_sme_check())
2777		return -ENODEV;
2778
2779	ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2780	if (ret)
2781		return ret;
2782
2783	amd_iommu_detected = true;
2784	iommu_detected = 1;
2785	x86_init.iommu.iommu_init = amd_iommu_init;
2786
2787	return 1;
2788}
2789
2790/****************************************************************************
2791 *
2792 * Parsing functions for the AMD IOMMU specific kernel command line
2793 * options.
2794 *
2795 ****************************************************************************/
2796
2797static int __init parse_amd_iommu_dump(char *str)
2798{
2799	amd_iommu_dump = true;
2800
2801	return 1;
2802}
2803
2804static int __init parse_amd_iommu_intr(char *str)
2805{
2806	for (; *str; ++str) {
2807		if (strncmp(str, "legacy", 6) == 0) {
2808			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
2809			break;
2810		}
2811		if (strncmp(str, "vapic", 5) == 0) {
2812			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2813			break;
2814		}
2815	}
2816	return 1;
2817}
2818
2819static int __init parse_amd_iommu_options(char *str)
2820{
2821	for (; *str; ++str) {
2822		if (strncmp(str, "fullflush", 9) == 0)
2823			amd_iommu_unmap_flush = true;
2824		if (strncmp(str, "off", 3) == 0)
2825			amd_iommu_disabled = true;
2826		if (strncmp(str, "force_isolation", 15) == 0)
2827			amd_iommu_force_isolation = true;
2828	}
2829
2830	return 1;
2831}
2832
2833static int __init parse_ivrs_ioapic(char *str)
2834{
2835	unsigned int bus, dev, fn;
2836	int ret, id, i;
2837	u16 devid;
2838
2839	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2840
2841	if (ret != 4) {
2842		pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2843		return 1;
2844	}
2845
2846	if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2847		pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2848			str);
2849		return 1;
2850	}
2851
2852	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2853
2854	cmdline_maps			= true;
2855	i				= early_ioapic_map_size++;
2856	early_ioapic_map[i].id		= id;
2857	early_ioapic_map[i].devid	= devid;
2858	early_ioapic_map[i].cmd_line	= true;
2859
2860	return 1;
2861}
2862
2863static int __init parse_ivrs_hpet(char *str)
2864{
2865	unsigned int bus, dev, fn;
2866	int ret, id, i;
2867	u16 devid;
2868
2869	ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2870
2871	if (ret != 4) {
2872		pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2873		return 1;
2874	}
2875
2876	if (early_hpet_map_size == EARLY_MAP_SIZE) {
2877		pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2878			str);
2879		return 1;
2880	}
2881
2882	devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2883
2884	cmdline_maps			= true;
2885	i				= early_hpet_map_size++;
2886	early_hpet_map[i].id		= id;
2887	early_hpet_map[i].devid		= devid;
2888	early_hpet_map[i].cmd_line	= true;
2889
2890	return 1;
2891}
2892
2893static int __init parse_ivrs_acpihid(char *str)
2894{
2895	u32 bus, dev, fn;
2896	char *hid, *uid, *p;
2897	char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
2898	int ret, i;
2899
2900	ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
2901	if (ret != 4) {
2902		pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
2903		return 1;
2904	}
2905
2906	p = acpiid;
2907	hid = strsep(&p, ":");
2908	uid = p;
2909
2910	if (!hid || !(*hid) || !uid) {
2911		pr_err("AMD-Vi: Invalid command line: hid or uid\n");
2912		return 1;
2913	}
2914
2915	i = early_acpihid_map_size++;
2916	memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
2917	memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
2918	early_acpihid_map[i].devid =
2919		((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2920	early_acpihid_map[i].cmd_line	= true;
2921
2922	return 1;
2923}
2924
2925__setup("amd_iommu_dump",	parse_amd_iommu_dump);
2926__setup("amd_iommu=",		parse_amd_iommu_options);
2927__setup("amd_iommu_intr=",	parse_amd_iommu_intr);
2928__setup("ivrs_ioapic",		parse_ivrs_ioapic);
2929__setup("ivrs_hpet",		parse_ivrs_hpet);
2930__setup("ivrs_acpihid",		parse_ivrs_acpihid);
2931
2932IOMMU_INIT_FINISH(amd_iommu_detect,
2933		  gart_iommu_hole_init,
2934		  NULL,
2935		  NULL);
2936
2937bool amd_iommu_v2_supported(void)
2938{
2939	return amd_iommu_v2_present;
2940}
2941EXPORT_SYMBOL(amd_iommu_v2_supported);
2942
2943struct amd_iommu *get_amd_iommu(unsigned int idx)
2944{
2945	unsigned int i = 0;
2946	struct amd_iommu *iommu;
2947
2948	for_each_iommu(iommu)
2949		if (i++ == idx)
2950			return iommu;
2951	return NULL;
2952}
2953EXPORT_SYMBOL(get_amd_iommu);
2954
2955/****************************************************************************
2956 *
2957 * IOMMU EFR Performance Counter support functionality. This code allows
2958 * access to the IOMMU PC functionality.
2959 *
2960 ****************************************************************************/
2961
2962u8 amd_iommu_pc_get_max_banks(unsigned int idx)
2963{
2964	struct amd_iommu *iommu = get_amd_iommu(idx);
 
2965
 
 
2966	if (iommu)
2967		return iommu->max_banks;
2968
2969	return 0;
2970}
2971EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2972
2973bool amd_iommu_pc_supported(void)
2974{
2975	return amd_iommu_pc_present;
2976}
2977EXPORT_SYMBOL(amd_iommu_pc_supported);
2978
2979u8 amd_iommu_pc_get_max_counters(unsigned int idx)
2980{
2981	struct amd_iommu *iommu = get_amd_iommu(idx);
 
2982
 
 
2983	if (iommu)
2984		return iommu->max_counters;
2985
2986	return 0;
2987}
2988EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2989
2990static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
2991				u8 fxn, u64 *value, bool is_write)
 
2992{
2993	u32 offset;
2994	u32 max_offset_lim;
2995
2996	/* Make sure the IOMMU PC resource is available */
2997	if (!amd_iommu_pc_present)
2998		return -ENODEV;
2999
3000	/* Check for valid iommu and pc register indexing */
3001	if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3002		return -ENODEV;
3003
3004	offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3005
3006	/* Limit the offset to the hw defined mmio region aperture */
3007	max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3008				(iommu->max_counters << 8) | 0x28);
3009	if ((offset < MMIO_CNTR_REG_OFFSET) ||
3010	    (offset > max_offset_lim))
3011		return -EINVAL;
3012
3013	if (is_write) {
3014		u64 val = *value & GENMASK_ULL(47, 0);
3015
3016		writel((u32)val, iommu->mmio_base + offset);
3017		writel((val >> 32), iommu->mmio_base + offset + 4);
3018	} else {
3019		*value = readl(iommu->mmio_base + offset + 4);
3020		*value <<= 32;
3021		*value |= readl(iommu->mmio_base + offset);
3022		*value &= GENMASK_ULL(47, 0);
3023	}
3024
3025	return 0;
3026}
 
3027
3028int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
 
3029{
3030	if (!iommu)
3031		return -EINVAL;
3032
3033	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3034}
3035EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3036
3037int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3038{
3039	if (!iommu)
3040		return -EINVAL;
3041
3042	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
 
3043}
3044EXPORT_SYMBOL(amd_iommu_pc_set_reg);