Loading...
1/*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <linux/list.h>
23#include <linux/slab.h>
24#include <linux/syscore_ops.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
27#include <linux/amd-iommu.h>
28#include <asm/pci-direct.h>
29#include <asm/iommu.h>
30#include <asm/gart.h>
31#include <asm/x86_init.h>
32#include <asm/iommu_table.h>
33
34#include "amd_iommu_proto.h"
35#include "amd_iommu_types.h"
36
37/*
38 * definitions for the ACPI scanning code
39 */
40#define IVRS_HEADER_LENGTH 48
41
42#define ACPI_IVHD_TYPE 0x10
43#define ACPI_IVMD_TYPE_ALL 0x20
44#define ACPI_IVMD_TYPE 0x21
45#define ACPI_IVMD_TYPE_RANGE 0x22
46
47#define IVHD_DEV_ALL 0x01
48#define IVHD_DEV_SELECT 0x02
49#define IVHD_DEV_SELECT_RANGE_START 0x03
50#define IVHD_DEV_RANGE_END 0x04
51#define IVHD_DEV_ALIAS 0x42
52#define IVHD_DEV_ALIAS_RANGE 0x43
53#define IVHD_DEV_EXT_SELECT 0x46
54#define IVHD_DEV_EXT_SELECT_RANGE 0x47
55
56#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
57#define IVHD_FLAG_PASSPW_EN_MASK 0x02
58#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
59#define IVHD_FLAG_ISOC_EN_MASK 0x08
60
61#define IVMD_FLAG_EXCL_RANGE 0x08
62#define IVMD_FLAG_UNITY_MAP 0x01
63
64#define ACPI_DEVFLAG_INITPASS 0x01
65#define ACPI_DEVFLAG_EXTINT 0x02
66#define ACPI_DEVFLAG_NMI 0x04
67#define ACPI_DEVFLAG_SYSMGT1 0x10
68#define ACPI_DEVFLAG_SYSMGT2 0x20
69#define ACPI_DEVFLAG_LINT0 0x40
70#define ACPI_DEVFLAG_LINT1 0x80
71#define ACPI_DEVFLAG_ATSDIS 0x10000000
72
73/*
74 * ACPI table definitions
75 *
76 * These data structures are laid over the table to parse the important values
77 * out of it.
78 */
79
80/*
81 * structure describing one IOMMU in the ACPI table. Typically followed by one
82 * or more ivhd_entrys.
83 */
84struct ivhd_header {
85 u8 type;
86 u8 flags;
87 u16 length;
88 u16 devid;
89 u16 cap_ptr;
90 u64 mmio_phys;
91 u16 pci_seg;
92 u16 info;
93 u32 reserved;
94} __attribute__((packed));
95
96/*
97 * A device entry describing which devices a specific IOMMU translates and
98 * which requestor ids they use.
99 */
100struct ivhd_entry {
101 u8 type;
102 u16 devid;
103 u8 flags;
104 u32 ext;
105} __attribute__((packed));
106
107/*
108 * An AMD IOMMU memory definition structure. It defines things like exclusion
109 * ranges for devices and regions that should be unity mapped.
110 */
111struct ivmd_header {
112 u8 type;
113 u8 flags;
114 u16 length;
115 u16 devid;
116 u16 aux;
117 u64 resv;
118 u64 range_start;
119 u64 range_length;
120} __attribute__((packed));
121
122bool amd_iommu_dump;
123
124static int __initdata amd_iommu_detected;
125static bool __initdata amd_iommu_disabled;
126
127u16 amd_iommu_last_bdf; /* largest PCI device id we have
128 to handle */
129LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
130 we find in ACPI */
131bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
132
133LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
134 system */
135
136/* Array to assign indices to IOMMUs*/
137struct amd_iommu *amd_iommus[MAX_IOMMUS];
138int amd_iommus_present;
139
140/* IOMMUs have a non-present cache? */
141bool amd_iommu_np_cache __read_mostly;
142bool amd_iommu_iotlb_sup __read_mostly = true;
143
144/*
145 * The ACPI table parsing functions set this variable on an error
146 */
147static int __initdata amd_iommu_init_err;
148
149/*
150 * List of protection domains - used during resume
151 */
152LIST_HEAD(amd_iommu_pd_list);
153spinlock_t amd_iommu_pd_lock;
154
155/*
156 * Pointer to the device table which is shared by all AMD IOMMUs
157 * it is indexed by the PCI device id or the HT unit id and contains
158 * information about the domain the device belongs to as well as the
159 * page table root pointer.
160 */
161struct dev_table_entry *amd_iommu_dev_table;
162
163/*
164 * The alias table is a driver specific data structure which contains the
165 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
166 * More than one device can share the same requestor id.
167 */
168u16 *amd_iommu_alias_table;
169
170/*
171 * The rlookup table is used to find the IOMMU which is responsible
172 * for a specific device. It is also indexed by the PCI device id.
173 */
174struct amd_iommu **amd_iommu_rlookup_table;
175
176/*
177 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
178 * to know which ones are already in use.
179 */
180unsigned long *amd_iommu_pd_alloc_bitmap;
181
182static u32 dev_table_size; /* size of the device table */
183static u32 alias_table_size; /* size of the alias table */
184static u32 rlookup_table_size; /* size if the rlookup table */
185
186/*
187 * This function flushes all internal caches of
188 * the IOMMU used by this driver.
189 */
190extern void iommu_flush_all_caches(struct amd_iommu *iommu);
191
192static inline void update_last_devid(u16 devid)
193{
194 if (devid > amd_iommu_last_bdf)
195 amd_iommu_last_bdf = devid;
196}
197
198static inline unsigned long tbl_size(int entry_size)
199{
200 unsigned shift = PAGE_SHIFT +
201 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
202
203 return 1UL << shift;
204}
205
206/* Access to l1 and l2 indexed register spaces */
207
208static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
209{
210 u32 val;
211
212 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
213 pci_read_config_dword(iommu->dev, 0xfc, &val);
214 return val;
215}
216
217static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
218{
219 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
220 pci_write_config_dword(iommu->dev, 0xfc, val);
221 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
222}
223
224static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
225{
226 u32 val;
227
228 pci_write_config_dword(iommu->dev, 0xf0, address);
229 pci_read_config_dword(iommu->dev, 0xf4, &val);
230 return val;
231}
232
233static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
234{
235 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
236 pci_write_config_dword(iommu->dev, 0xf4, val);
237}
238
239/****************************************************************************
240 *
241 * AMD IOMMU MMIO register space handling functions
242 *
243 * These functions are used to program the IOMMU device registers in
244 * MMIO space required for that driver.
245 *
246 ****************************************************************************/
247
248/*
249 * This function set the exclusion range in the IOMMU. DMA accesses to the
250 * exclusion range are passed through untranslated
251 */
252static void iommu_set_exclusion_range(struct amd_iommu *iommu)
253{
254 u64 start = iommu->exclusion_start & PAGE_MASK;
255 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
256 u64 entry;
257
258 if (!iommu->exclusion_start)
259 return;
260
261 entry = start | MMIO_EXCL_ENABLE_MASK;
262 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
263 &entry, sizeof(entry));
264
265 entry = limit;
266 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
267 &entry, sizeof(entry));
268}
269
270/* Programs the physical address of the device table into the IOMMU hardware */
271static void __init iommu_set_device_table(struct amd_iommu *iommu)
272{
273 u64 entry;
274
275 BUG_ON(iommu->mmio_base == NULL);
276
277 entry = virt_to_phys(amd_iommu_dev_table);
278 entry |= (dev_table_size >> 12) - 1;
279 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
280 &entry, sizeof(entry));
281}
282
283/* Generic functions to enable/disable certain features of the IOMMU. */
284static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
285{
286 u32 ctrl;
287
288 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
289 ctrl |= (1 << bit);
290 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
291}
292
293static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
294{
295 u32 ctrl;
296
297 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
298 ctrl &= ~(1 << bit);
299 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
300}
301
302/* Function to enable the hardware */
303static void iommu_enable(struct amd_iommu *iommu)
304{
305 static const char * const feat_str[] = {
306 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
307 "IA", "GA", "HE", "PC", NULL
308 };
309 int i;
310
311 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
312 dev_name(&iommu->dev->dev), iommu->cap_ptr);
313
314 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
315 printk(KERN_CONT " extended features: ");
316 for (i = 0; feat_str[i]; ++i)
317 if (iommu_feature(iommu, (1ULL << i)))
318 printk(KERN_CONT " %s", feat_str[i]);
319 }
320 printk(KERN_CONT "\n");
321
322 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
323}
324
325static void iommu_disable(struct amd_iommu *iommu)
326{
327 /* Disable command buffer */
328 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
329
330 /* Disable event logging and event interrupts */
331 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
332 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
333
334 /* Disable IOMMU hardware itself */
335 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
336}
337
338/*
339 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
340 * the system has one.
341 */
342static u8 * __init iommu_map_mmio_space(u64 address)
343{
344 u8 *ret;
345
346 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
347 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
348 address);
349 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
350 return NULL;
351 }
352
353 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
354 if (ret != NULL)
355 return ret;
356
357 release_mem_region(address, MMIO_REGION_LENGTH);
358
359 return NULL;
360}
361
362static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
363{
364 if (iommu->mmio_base)
365 iounmap(iommu->mmio_base);
366 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
367}
368
369/****************************************************************************
370 *
371 * The functions below belong to the first pass of AMD IOMMU ACPI table
372 * parsing. In this pass we try to find out the highest device id this
373 * code has to handle. Upon this information the size of the shared data
374 * structures is determined later.
375 *
376 ****************************************************************************/
377
378/*
379 * This function calculates the length of a given IVHD entry
380 */
381static inline int ivhd_entry_length(u8 *ivhd)
382{
383 return 0x04 << (*ivhd >> 6);
384}
385
386/*
387 * This function reads the last device id the IOMMU has to handle from the PCI
388 * capability header for this IOMMU
389 */
390static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
391{
392 u32 cap;
393
394 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
395 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
396
397 return 0;
398}
399
400/*
401 * After reading the highest device id from the IOMMU PCI capability header
402 * this function looks if there is a higher device id defined in the ACPI table
403 */
404static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
405{
406 u8 *p = (void *)h, *end = (void *)h;
407 struct ivhd_entry *dev;
408
409 p += sizeof(*h);
410 end += h->length;
411
412 find_last_devid_on_pci(PCI_BUS(h->devid),
413 PCI_SLOT(h->devid),
414 PCI_FUNC(h->devid),
415 h->cap_ptr);
416
417 while (p < end) {
418 dev = (struct ivhd_entry *)p;
419 switch (dev->type) {
420 case IVHD_DEV_SELECT:
421 case IVHD_DEV_RANGE_END:
422 case IVHD_DEV_ALIAS:
423 case IVHD_DEV_EXT_SELECT:
424 /* all the above subfield types refer to device ids */
425 update_last_devid(dev->devid);
426 break;
427 default:
428 break;
429 }
430 p += ivhd_entry_length(p);
431 }
432
433 WARN_ON(p != end);
434
435 return 0;
436}
437
438/*
439 * Iterate over all IVHD entries in the ACPI table and find the highest device
440 * id which we need to handle. This is the first of three functions which parse
441 * the ACPI table. So we check the checksum here.
442 */
443static int __init find_last_devid_acpi(struct acpi_table_header *table)
444{
445 int i;
446 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
447 struct ivhd_header *h;
448
449 /*
450 * Validate checksum here so we don't need to do it when
451 * we actually parse the table
452 */
453 for (i = 0; i < table->length; ++i)
454 checksum += p[i];
455 if (checksum != 0) {
456 /* ACPI table corrupt */
457 amd_iommu_init_err = -ENODEV;
458 return 0;
459 }
460
461 p += IVRS_HEADER_LENGTH;
462
463 end += table->length;
464 while (p < end) {
465 h = (struct ivhd_header *)p;
466 switch (h->type) {
467 case ACPI_IVHD_TYPE:
468 find_last_devid_from_ivhd(h);
469 break;
470 default:
471 break;
472 }
473 p += h->length;
474 }
475 WARN_ON(p != end);
476
477 return 0;
478}
479
480/****************************************************************************
481 *
482 * The following functions belong the the code path which parses the ACPI table
483 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
484 * data structures, initialize the device/alias/rlookup table and also
485 * basically initialize the hardware.
486 *
487 ****************************************************************************/
488
489/*
490 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
491 * write commands to that buffer later and the IOMMU will execute them
492 * asynchronously
493 */
494static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
495{
496 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
497 get_order(CMD_BUFFER_SIZE));
498
499 if (cmd_buf == NULL)
500 return NULL;
501
502 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
503
504 return cmd_buf;
505}
506
507/*
508 * This function resets the command buffer if the IOMMU stopped fetching
509 * commands from it.
510 */
511void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
512{
513 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
514
515 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
516 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
517
518 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
519}
520
521/*
522 * This function writes the command buffer address to the hardware and
523 * enables it.
524 */
525static void iommu_enable_command_buffer(struct amd_iommu *iommu)
526{
527 u64 entry;
528
529 BUG_ON(iommu->cmd_buf == NULL);
530
531 entry = (u64)virt_to_phys(iommu->cmd_buf);
532 entry |= MMIO_CMD_SIZE_512;
533
534 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
535 &entry, sizeof(entry));
536
537 amd_iommu_reset_cmd_buffer(iommu);
538 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
539}
540
541static void __init free_command_buffer(struct amd_iommu *iommu)
542{
543 free_pages((unsigned long)iommu->cmd_buf,
544 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
545}
546
547/* allocates the memory where the IOMMU will log its events to */
548static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
549{
550 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
551 get_order(EVT_BUFFER_SIZE));
552
553 if (iommu->evt_buf == NULL)
554 return NULL;
555
556 iommu->evt_buf_size = EVT_BUFFER_SIZE;
557
558 return iommu->evt_buf;
559}
560
561static void iommu_enable_event_buffer(struct amd_iommu *iommu)
562{
563 u64 entry;
564
565 BUG_ON(iommu->evt_buf == NULL);
566
567 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
568
569 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
570 &entry, sizeof(entry));
571
572 /* set head and tail to zero manually */
573 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
574 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
575
576 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
577}
578
579static void __init free_event_buffer(struct amd_iommu *iommu)
580{
581 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
582}
583
584/* sets a specific bit in the device table entry. */
585static void set_dev_entry_bit(u16 devid, u8 bit)
586{
587 int i = (bit >> 5) & 0x07;
588 int _bit = bit & 0x1f;
589
590 amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
591}
592
593static int get_dev_entry_bit(u16 devid, u8 bit)
594{
595 int i = (bit >> 5) & 0x07;
596 int _bit = bit & 0x1f;
597
598 return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
599}
600
601
602void amd_iommu_apply_erratum_63(u16 devid)
603{
604 int sysmgt;
605
606 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
607 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
608
609 if (sysmgt == 0x01)
610 set_dev_entry_bit(devid, DEV_ENTRY_IW);
611}
612
613/* Writes the specific IOMMU for a device into the rlookup table */
614static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
615{
616 amd_iommu_rlookup_table[devid] = iommu;
617}
618
619/*
620 * This function takes the device specific flags read from the ACPI
621 * table and sets up the device table entry with that information
622 */
623static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
624 u16 devid, u32 flags, u32 ext_flags)
625{
626 if (flags & ACPI_DEVFLAG_INITPASS)
627 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
628 if (flags & ACPI_DEVFLAG_EXTINT)
629 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
630 if (flags & ACPI_DEVFLAG_NMI)
631 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
632 if (flags & ACPI_DEVFLAG_SYSMGT1)
633 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
634 if (flags & ACPI_DEVFLAG_SYSMGT2)
635 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
636 if (flags & ACPI_DEVFLAG_LINT0)
637 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
638 if (flags & ACPI_DEVFLAG_LINT1)
639 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
640
641 amd_iommu_apply_erratum_63(devid);
642
643 set_iommu_for_device(iommu, devid);
644}
645
646/*
647 * Reads the device exclusion range from ACPI and initialize IOMMU with
648 * it
649 */
650static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
651{
652 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
653
654 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
655 return;
656
657 if (iommu) {
658 /*
659 * We only can configure exclusion ranges per IOMMU, not
660 * per device. But we can enable the exclusion range per
661 * device. This is done here
662 */
663 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
664 iommu->exclusion_start = m->range_start;
665 iommu->exclusion_length = m->range_length;
666 }
667}
668
669/*
670 * This function reads some important data from the IOMMU PCI space and
671 * initializes the driver data structure with it. It reads the hardware
672 * capabilities and the first/last device entries
673 */
674static void __init init_iommu_from_pci(struct amd_iommu *iommu)
675{
676 int cap_ptr = iommu->cap_ptr;
677 u32 range, misc, low, high;
678 int i, j;
679
680 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
681 &iommu->cap);
682 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
683 &range);
684 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
685 &misc);
686
687 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
688 MMIO_GET_FD(range));
689 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
690 MMIO_GET_LD(range));
691 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
692
693 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
694 amd_iommu_iotlb_sup = false;
695
696 /* read extended feature bits */
697 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
698 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
699
700 iommu->features = ((u64)high << 32) | low;
701
702 if (!is_rd890_iommu(iommu->dev))
703 return;
704
705 /*
706 * Some rd890 systems may not be fully reconfigured by the BIOS, so
707 * it's necessary for us to store this information so it can be
708 * reprogrammed on resume
709 */
710
711 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
712 &iommu->stored_addr_lo);
713 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
714 &iommu->stored_addr_hi);
715
716 /* Low bit locks writes to configuration space */
717 iommu->stored_addr_lo &= ~1;
718
719 for (i = 0; i < 6; i++)
720 for (j = 0; j < 0x12; j++)
721 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
722
723 for (i = 0; i < 0x83; i++)
724 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
725}
726
727/*
728 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
729 * initializes the hardware and our data structures with it.
730 */
731static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
732 struct ivhd_header *h)
733{
734 u8 *p = (u8 *)h;
735 u8 *end = p, flags = 0;
736 u16 devid = 0, devid_start = 0, devid_to = 0;
737 u32 dev_i, ext_flags = 0;
738 bool alias = false;
739 struct ivhd_entry *e;
740
741 /*
742 * First save the recommended feature enable bits from ACPI
743 */
744 iommu->acpi_flags = h->flags;
745
746 /*
747 * Done. Now parse the device entries
748 */
749 p += sizeof(struct ivhd_header);
750 end += h->length;
751
752
753 while (p < end) {
754 e = (struct ivhd_entry *)p;
755 switch (e->type) {
756 case IVHD_DEV_ALL:
757
758 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
759 " last device %02x:%02x.%x flags: %02x\n",
760 PCI_BUS(iommu->first_device),
761 PCI_SLOT(iommu->first_device),
762 PCI_FUNC(iommu->first_device),
763 PCI_BUS(iommu->last_device),
764 PCI_SLOT(iommu->last_device),
765 PCI_FUNC(iommu->last_device),
766 e->flags);
767
768 for (dev_i = iommu->first_device;
769 dev_i <= iommu->last_device; ++dev_i)
770 set_dev_entry_from_acpi(iommu, dev_i,
771 e->flags, 0);
772 break;
773 case IVHD_DEV_SELECT:
774
775 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
776 "flags: %02x\n",
777 PCI_BUS(e->devid),
778 PCI_SLOT(e->devid),
779 PCI_FUNC(e->devid),
780 e->flags);
781
782 devid = e->devid;
783 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
784 break;
785 case IVHD_DEV_SELECT_RANGE_START:
786
787 DUMP_printk(" DEV_SELECT_RANGE_START\t "
788 "devid: %02x:%02x.%x flags: %02x\n",
789 PCI_BUS(e->devid),
790 PCI_SLOT(e->devid),
791 PCI_FUNC(e->devid),
792 e->flags);
793
794 devid_start = e->devid;
795 flags = e->flags;
796 ext_flags = 0;
797 alias = false;
798 break;
799 case IVHD_DEV_ALIAS:
800
801 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
802 "flags: %02x devid_to: %02x:%02x.%x\n",
803 PCI_BUS(e->devid),
804 PCI_SLOT(e->devid),
805 PCI_FUNC(e->devid),
806 e->flags,
807 PCI_BUS(e->ext >> 8),
808 PCI_SLOT(e->ext >> 8),
809 PCI_FUNC(e->ext >> 8));
810
811 devid = e->devid;
812 devid_to = e->ext >> 8;
813 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
814 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
815 amd_iommu_alias_table[devid] = devid_to;
816 break;
817 case IVHD_DEV_ALIAS_RANGE:
818
819 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
820 "devid: %02x:%02x.%x flags: %02x "
821 "devid_to: %02x:%02x.%x\n",
822 PCI_BUS(e->devid),
823 PCI_SLOT(e->devid),
824 PCI_FUNC(e->devid),
825 e->flags,
826 PCI_BUS(e->ext >> 8),
827 PCI_SLOT(e->ext >> 8),
828 PCI_FUNC(e->ext >> 8));
829
830 devid_start = e->devid;
831 flags = e->flags;
832 devid_to = e->ext >> 8;
833 ext_flags = 0;
834 alias = true;
835 break;
836 case IVHD_DEV_EXT_SELECT:
837
838 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
839 "flags: %02x ext: %08x\n",
840 PCI_BUS(e->devid),
841 PCI_SLOT(e->devid),
842 PCI_FUNC(e->devid),
843 e->flags, e->ext);
844
845 devid = e->devid;
846 set_dev_entry_from_acpi(iommu, devid, e->flags,
847 e->ext);
848 break;
849 case IVHD_DEV_EXT_SELECT_RANGE:
850
851 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
852 "%02x:%02x.%x flags: %02x ext: %08x\n",
853 PCI_BUS(e->devid),
854 PCI_SLOT(e->devid),
855 PCI_FUNC(e->devid),
856 e->flags, e->ext);
857
858 devid_start = e->devid;
859 flags = e->flags;
860 ext_flags = e->ext;
861 alias = false;
862 break;
863 case IVHD_DEV_RANGE_END:
864
865 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
866 PCI_BUS(e->devid),
867 PCI_SLOT(e->devid),
868 PCI_FUNC(e->devid));
869
870 devid = e->devid;
871 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
872 if (alias) {
873 amd_iommu_alias_table[dev_i] = devid_to;
874 set_dev_entry_from_acpi(iommu,
875 devid_to, flags, ext_flags);
876 }
877 set_dev_entry_from_acpi(iommu, dev_i,
878 flags, ext_flags);
879 }
880 break;
881 default:
882 break;
883 }
884
885 p += ivhd_entry_length(p);
886 }
887}
888
889/* Initializes the device->iommu mapping for the driver */
890static int __init init_iommu_devices(struct amd_iommu *iommu)
891{
892 u32 i;
893
894 for (i = iommu->first_device; i <= iommu->last_device; ++i)
895 set_iommu_for_device(iommu, i);
896
897 return 0;
898}
899
900static void __init free_iommu_one(struct amd_iommu *iommu)
901{
902 free_command_buffer(iommu);
903 free_event_buffer(iommu);
904 iommu_unmap_mmio_space(iommu);
905}
906
907static void __init free_iommu_all(void)
908{
909 struct amd_iommu *iommu, *next;
910
911 for_each_iommu_safe(iommu, next) {
912 list_del(&iommu->list);
913 free_iommu_one(iommu);
914 kfree(iommu);
915 }
916}
917
918/*
919 * This function clues the initialization function for one IOMMU
920 * together and also allocates the command buffer and programs the
921 * hardware. It does NOT enable the IOMMU. This is done afterwards.
922 */
923static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
924{
925 spin_lock_init(&iommu->lock);
926
927 /* Add IOMMU to internal data structures */
928 list_add_tail(&iommu->list, &amd_iommu_list);
929 iommu->index = amd_iommus_present++;
930
931 if (unlikely(iommu->index >= MAX_IOMMUS)) {
932 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
933 return -ENOSYS;
934 }
935
936 /* Index is fine - add IOMMU to the array */
937 amd_iommus[iommu->index] = iommu;
938
939 /*
940 * Copy data from ACPI table entry to the iommu struct
941 */
942 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
943 if (!iommu->dev)
944 return 1;
945
946 iommu->cap_ptr = h->cap_ptr;
947 iommu->pci_seg = h->pci_seg;
948 iommu->mmio_phys = h->mmio_phys;
949 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
950 if (!iommu->mmio_base)
951 return -ENOMEM;
952
953 iommu->cmd_buf = alloc_command_buffer(iommu);
954 if (!iommu->cmd_buf)
955 return -ENOMEM;
956
957 iommu->evt_buf = alloc_event_buffer(iommu);
958 if (!iommu->evt_buf)
959 return -ENOMEM;
960
961 iommu->int_enabled = false;
962
963 init_iommu_from_pci(iommu);
964 init_iommu_from_acpi(iommu, h);
965 init_iommu_devices(iommu);
966
967 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
968 amd_iommu_np_cache = true;
969
970 return pci_enable_device(iommu->dev);
971}
972
973/*
974 * Iterates over all IOMMU entries in the ACPI table, allocates the
975 * IOMMU structure and initializes it with init_iommu_one()
976 */
977static int __init init_iommu_all(struct acpi_table_header *table)
978{
979 u8 *p = (u8 *)table, *end = (u8 *)table;
980 struct ivhd_header *h;
981 struct amd_iommu *iommu;
982 int ret;
983
984 end += table->length;
985 p += IVRS_HEADER_LENGTH;
986
987 while (p < end) {
988 h = (struct ivhd_header *)p;
989 switch (*p) {
990 case ACPI_IVHD_TYPE:
991
992 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
993 "seg: %d flags: %01x info %04x\n",
994 PCI_BUS(h->devid), PCI_SLOT(h->devid),
995 PCI_FUNC(h->devid), h->cap_ptr,
996 h->pci_seg, h->flags, h->info);
997 DUMP_printk(" mmio-addr: %016llx\n",
998 h->mmio_phys);
999
1000 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1001 if (iommu == NULL) {
1002 amd_iommu_init_err = -ENOMEM;
1003 return 0;
1004 }
1005
1006 ret = init_iommu_one(iommu, h);
1007 if (ret) {
1008 amd_iommu_init_err = ret;
1009 return 0;
1010 }
1011 break;
1012 default:
1013 break;
1014 }
1015 p += h->length;
1016
1017 }
1018 WARN_ON(p != end);
1019
1020 return 0;
1021}
1022
1023/****************************************************************************
1024 *
1025 * The following functions initialize the MSI interrupts for all IOMMUs
1026 * in the system. Its a bit challenging because there could be multiple
1027 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1028 * pci_dev.
1029 *
1030 ****************************************************************************/
1031
1032static int iommu_setup_msi(struct amd_iommu *iommu)
1033{
1034 int r;
1035
1036 if (pci_enable_msi(iommu->dev))
1037 return 1;
1038
1039 r = request_threaded_irq(iommu->dev->irq,
1040 amd_iommu_int_handler,
1041 amd_iommu_int_thread,
1042 0, "AMD-Vi",
1043 iommu->dev);
1044
1045 if (r) {
1046 pci_disable_msi(iommu->dev);
1047 return 1;
1048 }
1049
1050 iommu->int_enabled = true;
1051 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1052
1053 return 0;
1054}
1055
1056static int iommu_init_msi(struct amd_iommu *iommu)
1057{
1058 if (iommu->int_enabled)
1059 return 0;
1060
1061 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1062 return iommu_setup_msi(iommu);
1063
1064 return 1;
1065}
1066
1067/****************************************************************************
1068 *
1069 * The next functions belong to the third pass of parsing the ACPI
1070 * table. In this last pass the memory mapping requirements are
1071 * gathered (like exclusion and unity mapping reanges).
1072 *
1073 ****************************************************************************/
1074
1075static void __init free_unity_maps(void)
1076{
1077 struct unity_map_entry *entry, *next;
1078
1079 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1080 list_del(&entry->list);
1081 kfree(entry);
1082 }
1083}
1084
1085/* called when we find an exclusion range definition in ACPI */
1086static int __init init_exclusion_range(struct ivmd_header *m)
1087{
1088 int i;
1089
1090 switch (m->type) {
1091 case ACPI_IVMD_TYPE:
1092 set_device_exclusion_range(m->devid, m);
1093 break;
1094 case ACPI_IVMD_TYPE_ALL:
1095 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1096 set_device_exclusion_range(i, m);
1097 break;
1098 case ACPI_IVMD_TYPE_RANGE:
1099 for (i = m->devid; i <= m->aux; ++i)
1100 set_device_exclusion_range(i, m);
1101 break;
1102 default:
1103 break;
1104 }
1105
1106 return 0;
1107}
1108
1109/* called for unity map ACPI definition */
1110static int __init init_unity_map_range(struct ivmd_header *m)
1111{
1112 struct unity_map_entry *e = 0;
1113 char *s;
1114
1115 e = kzalloc(sizeof(*e), GFP_KERNEL);
1116 if (e == NULL)
1117 return -ENOMEM;
1118
1119 switch (m->type) {
1120 default:
1121 kfree(e);
1122 return 0;
1123 case ACPI_IVMD_TYPE:
1124 s = "IVMD_TYPEi\t\t\t";
1125 e->devid_start = e->devid_end = m->devid;
1126 break;
1127 case ACPI_IVMD_TYPE_ALL:
1128 s = "IVMD_TYPE_ALL\t\t";
1129 e->devid_start = 0;
1130 e->devid_end = amd_iommu_last_bdf;
1131 break;
1132 case ACPI_IVMD_TYPE_RANGE:
1133 s = "IVMD_TYPE_RANGE\t\t";
1134 e->devid_start = m->devid;
1135 e->devid_end = m->aux;
1136 break;
1137 }
1138 e->address_start = PAGE_ALIGN(m->range_start);
1139 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1140 e->prot = m->flags >> 1;
1141
1142 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1143 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1144 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1145 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1146 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1147 e->address_start, e->address_end, m->flags);
1148
1149 list_add_tail(&e->list, &amd_iommu_unity_map);
1150
1151 return 0;
1152}
1153
1154/* iterates over all memory definitions we find in the ACPI table */
1155static int __init init_memory_definitions(struct acpi_table_header *table)
1156{
1157 u8 *p = (u8 *)table, *end = (u8 *)table;
1158 struct ivmd_header *m;
1159
1160 end += table->length;
1161 p += IVRS_HEADER_LENGTH;
1162
1163 while (p < end) {
1164 m = (struct ivmd_header *)p;
1165 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1166 init_exclusion_range(m);
1167 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1168 init_unity_map_range(m);
1169
1170 p += m->length;
1171 }
1172
1173 return 0;
1174}
1175
1176/*
1177 * Init the device table to not allow DMA access for devices and
1178 * suppress all page faults
1179 */
1180static void init_device_table(void)
1181{
1182 u32 devid;
1183
1184 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1185 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1186 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1187 }
1188}
1189
1190static void iommu_init_flags(struct amd_iommu *iommu)
1191{
1192 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1193 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1194 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1195
1196 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1197 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1198 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1199
1200 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1201 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1202 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1203
1204 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1205 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1206 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1207
1208 /*
1209 * make IOMMU memory accesses cache coherent
1210 */
1211 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1212}
1213
1214static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1215{
1216 int i, j;
1217 u32 ioc_feature_control;
1218 struct pci_dev *pdev = NULL;
1219
1220 /* RD890 BIOSes may not have completely reconfigured the iommu */
1221 if (!is_rd890_iommu(iommu->dev))
1222 return;
1223
1224 /*
1225 * First, we need to ensure that the iommu is enabled. This is
1226 * controlled by a register in the northbridge
1227 */
1228 pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
1229
1230 if (!pdev)
1231 return;
1232
1233 /* Select Northbridge indirect register 0x75 and enable writing */
1234 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1235 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1236
1237 /* Enable the iommu */
1238 if (!(ioc_feature_control & 0x1))
1239 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1240
1241 pci_dev_put(pdev);
1242
1243 /* Restore the iommu BAR */
1244 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1245 iommu->stored_addr_lo);
1246 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1247 iommu->stored_addr_hi);
1248
1249 /* Restore the l1 indirect regs for each of the 6 l1s */
1250 for (i = 0; i < 6; i++)
1251 for (j = 0; j < 0x12; j++)
1252 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1253
1254 /* Restore the l2 indirect regs */
1255 for (i = 0; i < 0x83; i++)
1256 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1257
1258 /* Lock PCI setup registers */
1259 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1260 iommu->stored_addr_lo | 1);
1261}
1262
1263/*
1264 * This function finally enables all IOMMUs found in the system after
1265 * they have been initialized
1266 */
1267static void enable_iommus(void)
1268{
1269 struct amd_iommu *iommu;
1270
1271 for_each_iommu(iommu) {
1272 iommu_disable(iommu);
1273 iommu_init_flags(iommu);
1274 iommu_set_device_table(iommu);
1275 iommu_enable_command_buffer(iommu);
1276 iommu_enable_event_buffer(iommu);
1277 iommu_set_exclusion_range(iommu);
1278 iommu_init_msi(iommu);
1279 iommu_enable(iommu);
1280 iommu_flush_all_caches(iommu);
1281 }
1282}
1283
1284static void disable_iommus(void)
1285{
1286 struct amd_iommu *iommu;
1287
1288 for_each_iommu(iommu)
1289 iommu_disable(iommu);
1290}
1291
1292/*
1293 * Suspend/Resume support
1294 * disable suspend until real resume implemented
1295 */
1296
1297static void amd_iommu_resume(void)
1298{
1299 struct amd_iommu *iommu;
1300
1301 for_each_iommu(iommu)
1302 iommu_apply_resume_quirks(iommu);
1303
1304 /* re-load the hardware */
1305 enable_iommus();
1306
1307 /*
1308 * we have to flush after the IOMMUs are enabled because a
1309 * disabled IOMMU will never execute the commands we send
1310 */
1311 for_each_iommu(iommu)
1312 iommu_flush_all_caches(iommu);
1313}
1314
1315static int amd_iommu_suspend(void)
1316{
1317 /* disable IOMMUs to go out of the way for BIOS */
1318 disable_iommus();
1319
1320 return 0;
1321}
1322
1323static struct syscore_ops amd_iommu_syscore_ops = {
1324 .suspend = amd_iommu_suspend,
1325 .resume = amd_iommu_resume,
1326};
1327
1328/*
1329 * This is the core init function for AMD IOMMU hardware in the system.
1330 * This function is called from the generic x86 DMA layer initialization
1331 * code.
1332 *
1333 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1334 * three times:
1335 *
1336 * 1 pass) Find the highest PCI device id the driver has to handle.
1337 * Upon this information the size of the data structures is
1338 * determined that needs to be allocated.
1339 *
1340 * 2 pass) Initialize the data structures just allocated with the
1341 * information in the ACPI table about available AMD IOMMUs
1342 * in the system. It also maps the PCI devices in the
1343 * system to specific IOMMUs
1344 *
1345 * 3 pass) After the basic data structures are allocated and
1346 * initialized we update them with information about memory
1347 * remapping requirements parsed out of the ACPI table in
1348 * this last pass.
1349 *
1350 * After that the hardware is initialized and ready to go. In the last
1351 * step we do some Linux specific things like registering the driver in
1352 * the dma_ops interface and initializing the suspend/resume support
1353 * functions. Finally it prints some information about AMD IOMMUs and
1354 * the driver state and enables the hardware.
1355 */
1356static int __init amd_iommu_init(void)
1357{
1358 int i, ret = 0;
1359
1360 /*
1361 * First parse ACPI tables to find the largest Bus/Dev/Func
1362 * we need to handle. Upon this information the shared data
1363 * structures for the IOMMUs in the system will be allocated
1364 */
1365 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1366 return -ENODEV;
1367
1368 ret = amd_iommu_init_err;
1369 if (ret)
1370 goto out;
1371
1372 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1373 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1374 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1375
1376 ret = -ENOMEM;
1377
1378 /* Device table - directly used by all IOMMUs */
1379 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1380 get_order(dev_table_size));
1381 if (amd_iommu_dev_table == NULL)
1382 goto out;
1383
1384 /*
1385 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1386 * IOMMU see for that device
1387 */
1388 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1389 get_order(alias_table_size));
1390 if (amd_iommu_alias_table == NULL)
1391 goto free;
1392
1393 /* IOMMU rlookup table - find the IOMMU for a specific device */
1394 amd_iommu_rlookup_table = (void *)__get_free_pages(
1395 GFP_KERNEL | __GFP_ZERO,
1396 get_order(rlookup_table_size));
1397 if (amd_iommu_rlookup_table == NULL)
1398 goto free;
1399
1400 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1401 GFP_KERNEL | __GFP_ZERO,
1402 get_order(MAX_DOMAIN_ID/8));
1403 if (amd_iommu_pd_alloc_bitmap == NULL)
1404 goto free;
1405
1406 /* init the device table */
1407 init_device_table();
1408
1409 /*
1410 * let all alias entries point to itself
1411 */
1412 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1413 amd_iommu_alias_table[i] = i;
1414
1415 /*
1416 * never allocate domain 0 because its used as the non-allocated and
1417 * error value placeholder
1418 */
1419 amd_iommu_pd_alloc_bitmap[0] = 1;
1420
1421 spin_lock_init(&amd_iommu_pd_lock);
1422
1423 /*
1424 * now the data structures are allocated and basically initialized
1425 * start the real acpi table scan
1426 */
1427 ret = -ENODEV;
1428 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1429 goto free;
1430
1431 if (amd_iommu_init_err) {
1432 ret = amd_iommu_init_err;
1433 goto free;
1434 }
1435
1436 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1437 goto free;
1438
1439 if (amd_iommu_init_err) {
1440 ret = amd_iommu_init_err;
1441 goto free;
1442 }
1443
1444 ret = amd_iommu_init_devices();
1445 if (ret)
1446 goto free;
1447
1448 enable_iommus();
1449
1450 if (iommu_pass_through)
1451 ret = amd_iommu_init_passthrough();
1452 else
1453 ret = amd_iommu_init_dma_ops();
1454
1455 if (ret)
1456 goto free_disable;
1457
1458 amd_iommu_init_api();
1459
1460 amd_iommu_init_notifier();
1461
1462 register_syscore_ops(&amd_iommu_syscore_ops);
1463
1464 if (iommu_pass_through)
1465 goto out;
1466
1467 if (amd_iommu_unmap_flush)
1468 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1469 else
1470 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1471
1472 x86_platform.iommu_shutdown = disable_iommus;
1473out:
1474 return ret;
1475
1476free_disable:
1477 disable_iommus();
1478
1479free:
1480 amd_iommu_uninit_devices();
1481
1482 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1483 get_order(MAX_DOMAIN_ID/8));
1484
1485 free_pages((unsigned long)amd_iommu_rlookup_table,
1486 get_order(rlookup_table_size));
1487
1488 free_pages((unsigned long)amd_iommu_alias_table,
1489 get_order(alias_table_size));
1490
1491 free_pages((unsigned long)amd_iommu_dev_table,
1492 get_order(dev_table_size));
1493
1494 free_iommu_all();
1495
1496 free_unity_maps();
1497
1498#ifdef CONFIG_GART_IOMMU
1499 /*
1500 * We failed to initialize the AMD IOMMU - try fallback to GART
1501 * if possible.
1502 */
1503 gart_iommu_init();
1504
1505#endif
1506
1507 goto out;
1508}
1509
1510/****************************************************************************
1511 *
1512 * Early detect code. This code runs at IOMMU detection time in the DMA
1513 * layer. It just looks if there is an IVRS ACPI table to detect AMD
1514 * IOMMUs
1515 *
1516 ****************************************************************************/
1517static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1518{
1519 return 0;
1520}
1521
1522int __init amd_iommu_detect(void)
1523{
1524 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1525 return -ENODEV;
1526
1527 if (amd_iommu_disabled)
1528 return -ENODEV;
1529
1530 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1531 iommu_detected = 1;
1532 amd_iommu_detected = 1;
1533 x86_init.iommu.iommu_init = amd_iommu_init;
1534
1535 /* Make sure ACS will be enabled */
1536 pci_request_acs();
1537 return 1;
1538 }
1539 return -ENODEV;
1540}
1541
1542/****************************************************************************
1543 *
1544 * Parsing functions for the AMD IOMMU specific kernel command line
1545 * options.
1546 *
1547 ****************************************************************************/
1548
1549static int __init parse_amd_iommu_dump(char *str)
1550{
1551 amd_iommu_dump = true;
1552
1553 return 1;
1554}
1555
1556static int __init parse_amd_iommu_options(char *str)
1557{
1558 for (; *str; ++str) {
1559 if (strncmp(str, "fullflush", 9) == 0)
1560 amd_iommu_unmap_flush = true;
1561 if (strncmp(str, "off", 3) == 0)
1562 amd_iommu_disabled = true;
1563 }
1564
1565 return 1;
1566}
1567
1568__setup("amd_iommu_dump", parse_amd_iommu_dump);
1569__setup("amd_iommu=", parse_amd_iommu_options);
1570
1571IOMMU_INIT_FINISH(amd_iommu_detect,
1572 gart_iommu_hole_init,
1573 0,
1574 0);
1/*
2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <linux/list.h>
23#include <linux/slab.h>
24#include <linux/syscore_ops.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
27#include <linux/amd-iommu.h>
28#include <linux/export.h>
29#include <asm/pci-direct.h>
30#include <asm/iommu.h>
31#include <asm/gart.h>
32#include <asm/x86_init.h>
33#include <asm/iommu_table.h>
34
35#include "amd_iommu_proto.h"
36#include "amd_iommu_types.h"
37
38/*
39 * definitions for the ACPI scanning code
40 */
41#define IVRS_HEADER_LENGTH 48
42
43#define ACPI_IVHD_TYPE 0x10
44#define ACPI_IVMD_TYPE_ALL 0x20
45#define ACPI_IVMD_TYPE 0x21
46#define ACPI_IVMD_TYPE_RANGE 0x22
47
48#define IVHD_DEV_ALL 0x01
49#define IVHD_DEV_SELECT 0x02
50#define IVHD_DEV_SELECT_RANGE_START 0x03
51#define IVHD_DEV_RANGE_END 0x04
52#define IVHD_DEV_ALIAS 0x42
53#define IVHD_DEV_ALIAS_RANGE 0x43
54#define IVHD_DEV_EXT_SELECT 0x46
55#define IVHD_DEV_EXT_SELECT_RANGE 0x47
56
57#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
58#define IVHD_FLAG_PASSPW_EN_MASK 0x02
59#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
60#define IVHD_FLAG_ISOC_EN_MASK 0x08
61
62#define IVMD_FLAG_EXCL_RANGE 0x08
63#define IVMD_FLAG_UNITY_MAP 0x01
64
65#define ACPI_DEVFLAG_INITPASS 0x01
66#define ACPI_DEVFLAG_EXTINT 0x02
67#define ACPI_DEVFLAG_NMI 0x04
68#define ACPI_DEVFLAG_SYSMGT1 0x10
69#define ACPI_DEVFLAG_SYSMGT2 0x20
70#define ACPI_DEVFLAG_LINT0 0x40
71#define ACPI_DEVFLAG_LINT1 0x80
72#define ACPI_DEVFLAG_ATSDIS 0x10000000
73
74/*
75 * ACPI table definitions
76 *
77 * These data structures are laid over the table to parse the important values
78 * out of it.
79 */
80
81/*
82 * structure describing one IOMMU in the ACPI table. Typically followed by one
83 * or more ivhd_entrys.
84 */
85struct ivhd_header {
86 u8 type;
87 u8 flags;
88 u16 length;
89 u16 devid;
90 u16 cap_ptr;
91 u64 mmio_phys;
92 u16 pci_seg;
93 u16 info;
94 u32 reserved;
95} __attribute__((packed));
96
97/*
98 * A device entry describing which devices a specific IOMMU translates and
99 * which requestor ids they use.
100 */
101struct ivhd_entry {
102 u8 type;
103 u16 devid;
104 u8 flags;
105 u32 ext;
106} __attribute__((packed));
107
108/*
109 * An AMD IOMMU memory definition structure. It defines things like exclusion
110 * ranges for devices and regions that should be unity mapped.
111 */
112struct ivmd_header {
113 u8 type;
114 u8 flags;
115 u16 length;
116 u16 devid;
117 u16 aux;
118 u64 resv;
119 u64 range_start;
120 u64 range_length;
121} __attribute__((packed));
122
123bool amd_iommu_dump;
124
125static int __initdata amd_iommu_detected;
126static bool __initdata amd_iommu_disabled;
127
128u16 amd_iommu_last_bdf; /* largest PCI device id we have
129 to handle */
130LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
131 we find in ACPI */
132u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */
133
134LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
135 system */
136
137/* Array to assign indices to IOMMUs*/
138struct amd_iommu *amd_iommus[MAX_IOMMUS];
139int amd_iommus_present;
140
141/* IOMMUs have a non-present cache? */
142bool amd_iommu_np_cache __read_mostly;
143bool amd_iommu_iotlb_sup __read_mostly = true;
144
145u32 amd_iommu_max_pasids __read_mostly = ~0;
146
147bool amd_iommu_v2_present __read_mostly;
148
149bool amd_iommu_force_isolation __read_mostly;
150
151/*
152 * The ACPI table parsing functions set this variable on an error
153 */
154static int __initdata amd_iommu_init_err;
155
156/*
157 * List of protection domains - used during resume
158 */
159LIST_HEAD(amd_iommu_pd_list);
160spinlock_t amd_iommu_pd_lock;
161
162/*
163 * Pointer to the device table which is shared by all AMD IOMMUs
164 * it is indexed by the PCI device id or the HT unit id and contains
165 * information about the domain the device belongs to as well as the
166 * page table root pointer.
167 */
168struct dev_table_entry *amd_iommu_dev_table;
169
170/*
171 * The alias table is a driver specific data structure which contains the
172 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
173 * More than one device can share the same requestor id.
174 */
175u16 *amd_iommu_alias_table;
176
177/*
178 * The rlookup table is used to find the IOMMU which is responsible
179 * for a specific device. It is also indexed by the PCI device id.
180 */
181struct amd_iommu **amd_iommu_rlookup_table;
182
183/*
184 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
185 * to know which ones are already in use.
186 */
187unsigned long *amd_iommu_pd_alloc_bitmap;
188
189static u32 dev_table_size; /* size of the device table */
190static u32 alias_table_size; /* size of the alias table */
191static u32 rlookup_table_size; /* size if the rlookup table */
192
193/*
194 * This function flushes all internal caches of
195 * the IOMMU used by this driver.
196 */
197extern void iommu_flush_all_caches(struct amd_iommu *iommu);
198
199static int amd_iommu_enable_interrupts(void);
200
201static inline void update_last_devid(u16 devid)
202{
203 if (devid > amd_iommu_last_bdf)
204 amd_iommu_last_bdf = devid;
205}
206
207static inline unsigned long tbl_size(int entry_size)
208{
209 unsigned shift = PAGE_SHIFT +
210 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
211
212 return 1UL << shift;
213}
214
215/* Access to l1 and l2 indexed register spaces */
216
217static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
218{
219 u32 val;
220
221 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
222 pci_read_config_dword(iommu->dev, 0xfc, &val);
223 return val;
224}
225
226static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
227{
228 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
229 pci_write_config_dword(iommu->dev, 0xfc, val);
230 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
231}
232
233static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
234{
235 u32 val;
236
237 pci_write_config_dword(iommu->dev, 0xf0, address);
238 pci_read_config_dword(iommu->dev, 0xf4, &val);
239 return val;
240}
241
242static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
243{
244 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
245 pci_write_config_dword(iommu->dev, 0xf4, val);
246}
247
248/****************************************************************************
249 *
250 * AMD IOMMU MMIO register space handling functions
251 *
252 * These functions are used to program the IOMMU device registers in
253 * MMIO space required for that driver.
254 *
255 ****************************************************************************/
256
257/*
258 * This function set the exclusion range in the IOMMU. DMA accesses to the
259 * exclusion range are passed through untranslated
260 */
261static void iommu_set_exclusion_range(struct amd_iommu *iommu)
262{
263 u64 start = iommu->exclusion_start & PAGE_MASK;
264 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
265 u64 entry;
266
267 if (!iommu->exclusion_start)
268 return;
269
270 entry = start | MMIO_EXCL_ENABLE_MASK;
271 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
272 &entry, sizeof(entry));
273
274 entry = limit;
275 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
276 &entry, sizeof(entry));
277}
278
279/* Programs the physical address of the device table into the IOMMU hardware */
280static void iommu_set_device_table(struct amd_iommu *iommu)
281{
282 u64 entry;
283
284 BUG_ON(iommu->mmio_base == NULL);
285
286 entry = virt_to_phys(amd_iommu_dev_table);
287 entry |= (dev_table_size >> 12) - 1;
288 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
289 &entry, sizeof(entry));
290}
291
292/* Generic functions to enable/disable certain features of the IOMMU. */
293static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
294{
295 u32 ctrl;
296
297 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
298 ctrl |= (1 << bit);
299 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
300}
301
302static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
303{
304 u32 ctrl;
305
306 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
307 ctrl &= ~(1 << bit);
308 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
309}
310
311static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
312{
313 u32 ctrl;
314
315 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
316 ctrl &= ~CTRL_INV_TO_MASK;
317 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
318 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
319}
320
321/* Function to enable the hardware */
322static void iommu_enable(struct amd_iommu *iommu)
323{
324 static const char * const feat_str[] = {
325 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
326 "IA", "GA", "HE", "PC", NULL
327 };
328 int i;
329
330 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
331 dev_name(&iommu->dev->dev), iommu->cap_ptr);
332
333 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
334 printk(KERN_CONT " extended features: ");
335 for (i = 0; feat_str[i]; ++i)
336 if (iommu_feature(iommu, (1ULL << i)))
337 printk(KERN_CONT " %s", feat_str[i]);
338 }
339 printk(KERN_CONT "\n");
340
341 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
342}
343
344static void iommu_disable(struct amd_iommu *iommu)
345{
346 /* Disable command buffer */
347 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
348
349 /* Disable event logging and event interrupts */
350 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
351 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
352
353 /* Disable IOMMU hardware itself */
354 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
355}
356
357/*
358 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
359 * the system has one.
360 */
361static u8 * __init iommu_map_mmio_space(u64 address)
362{
363 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
364 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
365 address);
366 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
367 return NULL;
368 }
369
370 return ioremap_nocache(address, MMIO_REGION_LENGTH);
371}
372
373static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
374{
375 if (iommu->mmio_base)
376 iounmap(iommu->mmio_base);
377 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
378}
379
380/****************************************************************************
381 *
382 * The functions below belong to the first pass of AMD IOMMU ACPI table
383 * parsing. In this pass we try to find out the highest device id this
384 * code has to handle. Upon this information the size of the shared data
385 * structures is determined later.
386 *
387 ****************************************************************************/
388
389/*
390 * This function calculates the length of a given IVHD entry
391 */
392static inline int ivhd_entry_length(u8 *ivhd)
393{
394 return 0x04 << (*ivhd >> 6);
395}
396
397/*
398 * This function reads the last device id the IOMMU has to handle from the PCI
399 * capability header for this IOMMU
400 */
401static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
402{
403 u32 cap;
404
405 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
406 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
407
408 return 0;
409}
410
411/*
412 * After reading the highest device id from the IOMMU PCI capability header
413 * this function looks if there is a higher device id defined in the ACPI table
414 */
415static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
416{
417 u8 *p = (void *)h, *end = (void *)h;
418 struct ivhd_entry *dev;
419
420 p += sizeof(*h);
421 end += h->length;
422
423 find_last_devid_on_pci(PCI_BUS(h->devid),
424 PCI_SLOT(h->devid),
425 PCI_FUNC(h->devid),
426 h->cap_ptr);
427
428 while (p < end) {
429 dev = (struct ivhd_entry *)p;
430 switch (dev->type) {
431 case IVHD_DEV_SELECT:
432 case IVHD_DEV_RANGE_END:
433 case IVHD_DEV_ALIAS:
434 case IVHD_DEV_EXT_SELECT:
435 /* all the above subfield types refer to device ids */
436 update_last_devid(dev->devid);
437 break;
438 default:
439 break;
440 }
441 p += ivhd_entry_length(p);
442 }
443
444 WARN_ON(p != end);
445
446 return 0;
447}
448
449/*
450 * Iterate over all IVHD entries in the ACPI table and find the highest device
451 * id which we need to handle. This is the first of three functions which parse
452 * the ACPI table. So we check the checksum here.
453 */
454static int __init find_last_devid_acpi(struct acpi_table_header *table)
455{
456 int i;
457 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
458 struct ivhd_header *h;
459
460 /*
461 * Validate checksum here so we don't need to do it when
462 * we actually parse the table
463 */
464 for (i = 0; i < table->length; ++i)
465 checksum += p[i];
466 if (checksum != 0) {
467 /* ACPI table corrupt */
468 amd_iommu_init_err = -ENODEV;
469 return 0;
470 }
471
472 p += IVRS_HEADER_LENGTH;
473
474 end += table->length;
475 while (p < end) {
476 h = (struct ivhd_header *)p;
477 switch (h->type) {
478 case ACPI_IVHD_TYPE:
479 find_last_devid_from_ivhd(h);
480 break;
481 default:
482 break;
483 }
484 p += h->length;
485 }
486 WARN_ON(p != end);
487
488 return 0;
489}
490
491/****************************************************************************
492 *
493 * The following functions belong the the code path which parses the ACPI table
494 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
495 * data structures, initialize the device/alias/rlookup table and also
496 * basically initialize the hardware.
497 *
498 ****************************************************************************/
499
500/*
501 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
502 * write commands to that buffer later and the IOMMU will execute them
503 * asynchronously
504 */
505static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
506{
507 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
508 get_order(CMD_BUFFER_SIZE));
509
510 if (cmd_buf == NULL)
511 return NULL;
512
513 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
514
515 return cmd_buf;
516}
517
518/*
519 * This function resets the command buffer if the IOMMU stopped fetching
520 * commands from it.
521 */
522void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
523{
524 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
525
526 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
527 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
528
529 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
530}
531
532/*
533 * This function writes the command buffer address to the hardware and
534 * enables it.
535 */
536static void iommu_enable_command_buffer(struct amd_iommu *iommu)
537{
538 u64 entry;
539
540 BUG_ON(iommu->cmd_buf == NULL);
541
542 entry = (u64)virt_to_phys(iommu->cmd_buf);
543 entry |= MMIO_CMD_SIZE_512;
544
545 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
546 &entry, sizeof(entry));
547
548 amd_iommu_reset_cmd_buffer(iommu);
549 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
550}
551
552static void __init free_command_buffer(struct amd_iommu *iommu)
553{
554 free_pages((unsigned long)iommu->cmd_buf,
555 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
556}
557
558/* allocates the memory where the IOMMU will log its events to */
559static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
560{
561 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
562 get_order(EVT_BUFFER_SIZE));
563
564 if (iommu->evt_buf == NULL)
565 return NULL;
566
567 iommu->evt_buf_size = EVT_BUFFER_SIZE;
568
569 return iommu->evt_buf;
570}
571
572static void iommu_enable_event_buffer(struct amd_iommu *iommu)
573{
574 u64 entry;
575
576 BUG_ON(iommu->evt_buf == NULL);
577
578 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
579
580 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
581 &entry, sizeof(entry));
582
583 /* set head and tail to zero manually */
584 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
585 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
586
587 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
588}
589
590static void __init free_event_buffer(struct amd_iommu *iommu)
591{
592 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
593}
594
595/* allocates the memory where the IOMMU will log its events to */
596static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
597{
598 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
599 get_order(PPR_LOG_SIZE));
600
601 if (iommu->ppr_log == NULL)
602 return NULL;
603
604 return iommu->ppr_log;
605}
606
607static void iommu_enable_ppr_log(struct amd_iommu *iommu)
608{
609 u64 entry;
610
611 if (iommu->ppr_log == NULL)
612 return;
613
614 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
615
616 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
617 &entry, sizeof(entry));
618
619 /* set head and tail to zero manually */
620 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
621 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
622
623 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
624 iommu_feature_enable(iommu, CONTROL_PPR_EN);
625}
626
627static void __init free_ppr_log(struct amd_iommu *iommu)
628{
629 if (iommu->ppr_log == NULL)
630 return;
631
632 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
633}
634
635static void iommu_enable_gt(struct amd_iommu *iommu)
636{
637 if (!iommu_feature(iommu, FEATURE_GT))
638 return;
639
640 iommu_feature_enable(iommu, CONTROL_GT_EN);
641}
642
643/* sets a specific bit in the device table entry. */
644static void set_dev_entry_bit(u16 devid, u8 bit)
645{
646 int i = (bit >> 6) & 0x03;
647 int _bit = bit & 0x3f;
648
649 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
650}
651
652static int get_dev_entry_bit(u16 devid, u8 bit)
653{
654 int i = (bit >> 6) & 0x03;
655 int _bit = bit & 0x3f;
656
657 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
658}
659
660
661void amd_iommu_apply_erratum_63(u16 devid)
662{
663 int sysmgt;
664
665 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
666 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
667
668 if (sysmgt == 0x01)
669 set_dev_entry_bit(devid, DEV_ENTRY_IW);
670}
671
672/* Writes the specific IOMMU for a device into the rlookup table */
673static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
674{
675 amd_iommu_rlookup_table[devid] = iommu;
676}
677
678/*
679 * This function takes the device specific flags read from the ACPI
680 * table and sets up the device table entry with that information
681 */
682static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
683 u16 devid, u32 flags, u32 ext_flags)
684{
685 if (flags & ACPI_DEVFLAG_INITPASS)
686 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
687 if (flags & ACPI_DEVFLAG_EXTINT)
688 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
689 if (flags & ACPI_DEVFLAG_NMI)
690 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
691 if (flags & ACPI_DEVFLAG_SYSMGT1)
692 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
693 if (flags & ACPI_DEVFLAG_SYSMGT2)
694 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
695 if (flags & ACPI_DEVFLAG_LINT0)
696 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
697 if (flags & ACPI_DEVFLAG_LINT1)
698 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
699
700 amd_iommu_apply_erratum_63(devid);
701
702 set_iommu_for_device(iommu, devid);
703}
704
705/*
706 * Reads the device exclusion range from ACPI and initialize IOMMU with
707 * it
708 */
709static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
710{
711 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
712
713 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
714 return;
715
716 if (iommu) {
717 /*
718 * We only can configure exclusion ranges per IOMMU, not
719 * per device. But we can enable the exclusion range per
720 * device. This is done here
721 */
722 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
723 iommu->exclusion_start = m->range_start;
724 iommu->exclusion_length = m->range_length;
725 }
726}
727
728/*
729 * This function reads some important data from the IOMMU PCI space and
730 * initializes the driver data structure with it. It reads the hardware
731 * capabilities and the first/last device entries
732 */
733static void __init init_iommu_from_pci(struct amd_iommu *iommu)
734{
735 int cap_ptr = iommu->cap_ptr;
736 u32 range, misc, low, high;
737 int i, j;
738
739 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
740 &iommu->cap);
741 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
742 &range);
743 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
744 &misc);
745
746 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
747 MMIO_GET_FD(range));
748 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
749 MMIO_GET_LD(range));
750 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
751
752 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
753 amd_iommu_iotlb_sup = false;
754
755 /* read extended feature bits */
756 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
757 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
758
759 iommu->features = ((u64)high << 32) | low;
760
761 if (iommu_feature(iommu, FEATURE_GT)) {
762 int glxval;
763 u32 pasids;
764 u64 shift;
765
766 shift = iommu->features & FEATURE_PASID_MASK;
767 shift >>= FEATURE_PASID_SHIFT;
768 pasids = (1 << shift);
769
770 amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
771
772 glxval = iommu->features & FEATURE_GLXVAL_MASK;
773 glxval >>= FEATURE_GLXVAL_SHIFT;
774
775 if (amd_iommu_max_glx_val == -1)
776 amd_iommu_max_glx_val = glxval;
777 else
778 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
779 }
780
781 if (iommu_feature(iommu, FEATURE_GT) &&
782 iommu_feature(iommu, FEATURE_PPR)) {
783 iommu->is_iommu_v2 = true;
784 amd_iommu_v2_present = true;
785 }
786
787 if (!is_rd890_iommu(iommu->dev))
788 return;
789
790 /*
791 * Some rd890 systems may not be fully reconfigured by the BIOS, so
792 * it's necessary for us to store this information so it can be
793 * reprogrammed on resume
794 */
795
796 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
797 &iommu->stored_addr_lo);
798 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
799 &iommu->stored_addr_hi);
800
801 /* Low bit locks writes to configuration space */
802 iommu->stored_addr_lo &= ~1;
803
804 for (i = 0; i < 6; i++)
805 for (j = 0; j < 0x12; j++)
806 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
807
808 for (i = 0; i < 0x83; i++)
809 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
810}
811
812/*
813 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
814 * initializes the hardware and our data structures with it.
815 */
816static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
817 struct ivhd_header *h)
818{
819 u8 *p = (u8 *)h;
820 u8 *end = p, flags = 0;
821 u16 devid = 0, devid_start = 0, devid_to = 0;
822 u32 dev_i, ext_flags = 0;
823 bool alias = false;
824 struct ivhd_entry *e;
825
826 /*
827 * First save the recommended feature enable bits from ACPI
828 */
829 iommu->acpi_flags = h->flags;
830
831 /*
832 * Done. Now parse the device entries
833 */
834 p += sizeof(struct ivhd_header);
835 end += h->length;
836
837
838 while (p < end) {
839 e = (struct ivhd_entry *)p;
840 switch (e->type) {
841 case IVHD_DEV_ALL:
842
843 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
844 " last device %02x:%02x.%x flags: %02x\n",
845 PCI_BUS(iommu->first_device),
846 PCI_SLOT(iommu->first_device),
847 PCI_FUNC(iommu->first_device),
848 PCI_BUS(iommu->last_device),
849 PCI_SLOT(iommu->last_device),
850 PCI_FUNC(iommu->last_device),
851 e->flags);
852
853 for (dev_i = iommu->first_device;
854 dev_i <= iommu->last_device; ++dev_i)
855 set_dev_entry_from_acpi(iommu, dev_i,
856 e->flags, 0);
857 break;
858 case IVHD_DEV_SELECT:
859
860 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
861 "flags: %02x\n",
862 PCI_BUS(e->devid),
863 PCI_SLOT(e->devid),
864 PCI_FUNC(e->devid),
865 e->flags);
866
867 devid = e->devid;
868 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
869 break;
870 case IVHD_DEV_SELECT_RANGE_START:
871
872 DUMP_printk(" DEV_SELECT_RANGE_START\t "
873 "devid: %02x:%02x.%x flags: %02x\n",
874 PCI_BUS(e->devid),
875 PCI_SLOT(e->devid),
876 PCI_FUNC(e->devid),
877 e->flags);
878
879 devid_start = e->devid;
880 flags = e->flags;
881 ext_flags = 0;
882 alias = false;
883 break;
884 case IVHD_DEV_ALIAS:
885
886 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
887 "flags: %02x devid_to: %02x:%02x.%x\n",
888 PCI_BUS(e->devid),
889 PCI_SLOT(e->devid),
890 PCI_FUNC(e->devid),
891 e->flags,
892 PCI_BUS(e->ext >> 8),
893 PCI_SLOT(e->ext >> 8),
894 PCI_FUNC(e->ext >> 8));
895
896 devid = e->devid;
897 devid_to = e->ext >> 8;
898 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
899 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
900 amd_iommu_alias_table[devid] = devid_to;
901 break;
902 case IVHD_DEV_ALIAS_RANGE:
903
904 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
905 "devid: %02x:%02x.%x flags: %02x "
906 "devid_to: %02x:%02x.%x\n",
907 PCI_BUS(e->devid),
908 PCI_SLOT(e->devid),
909 PCI_FUNC(e->devid),
910 e->flags,
911 PCI_BUS(e->ext >> 8),
912 PCI_SLOT(e->ext >> 8),
913 PCI_FUNC(e->ext >> 8));
914
915 devid_start = e->devid;
916 flags = e->flags;
917 devid_to = e->ext >> 8;
918 ext_flags = 0;
919 alias = true;
920 break;
921 case IVHD_DEV_EXT_SELECT:
922
923 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
924 "flags: %02x ext: %08x\n",
925 PCI_BUS(e->devid),
926 PCI_SLOT(e->devid),
927 PCI_FUNC(e->devid),
928 e->flags, e->ext);
929
930 devid = e->devid;
931 set_dev_entry_from_acpi(iommu, devid, e->flags,
932 e->ext);
933 break;
934 case IVHD_DEV_EXT_SELECT_RANGE:
935
936 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
937 "%02x:%02x.%x flags: %02x ext: %08x\n",
938 PCI_BUS(e->devid),
939 PCI_SLOT(e->devid),
940 PCI_FUNC(e->devid),
941 e->flags, e->ext);
942
943 devid_start = e->devid;
944 flags = e->flags;
945 ext_flags = e->ext;
946 alias = false;
947 break;
948 case IVHD_DEV_RANGE_END:
949
950 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
951 PCI_BUS(e->devid),
952 PCI_SLOT(e->devid),
953 PCI_FUNC(e->devid));
954
955 devid = e->devid;
956 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
957 if (alias) {
958 amd_iommu_alias_table[dev_i] = devid_to;
959 set_dev_entry_from_acpi(iommu,
960 devid_to, flags, ext_flags);
961 }
962 set_dev_entry_from_acpi(iommu, dev_i,
963 flags, ext_flags);
964 }
965 break;
966 default:
967 break;
968 }
969
970 p += ivhd_entry_length(p);
971 }
972}
973
974/* Initializes the device->iommu mapping for the driver */
975static int __init init_iommu_devices(struct amd_iommu *iommu)
976{
977 u32 i;
978
979 for (i = iommu->first_device; i <= iommu->last_device; ++i)
980 set_iommu_for_device(iommu, i);
981
982 return 0;
983}
984
985static void __init free_iommu_one(struct amd_iommu *iommu)
986{
987 free_command_buffer(iommu);
988 free_event_buffer(iommu);
989 free_ppr_log(iommu);
990 iommu_unmap_mmio_space(iommu);
991}
992
993static void __init free_iommu_all(void)
994{
995 struct amd_iommu *iommu, *next;
996
997 for_each_iommu_safe(iommu, next) {
998 list_del(&iommu->list);
999 free_iommu_one(iommu);
1000 kfree(iommu);
1001 }
1002}
1003
1004/*
1005 * This function clues the initialization function for one IOMMU
1006 * together and also allocates the command buffer and programs the
1007 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1008 */
1009static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1010{
1011 spin_lock_init(&iommu->lock);
1012
1013 /* Add IOMMU to internal data structures */
1014 list_add_tail(&iommu->list, &amd_iommu_list);
1015 iommu->index = amd_iommus_present++;
1016
1017 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1018 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1019 return -ENOSYS;
1020 }
1021
1022 /* Index is fine - add IOMMU to the array */
1023 amd_iommus[iommu->index] = iommu;
1024
1025 /*
1026 * Copy data from ACPI table entry to the iommu struct
1027 */
1028 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
1029 if (!iommu->dev)
1030 return 1;
1031
1032 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1033 PCI_DEVFN(0, 0));
1034
1035 iommu->cap_ptr = h->cap_ptr;
1036 iommu->pci_seg = h->pci_seg;
1037 iommu->mmio_phys = h->mmio_phys;
1038 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
1039 if (!iommu->mmio_base)
1040 return -ENOMEM;
1041
1042 iommu->cmd_buf = alloc_command_buffer(iommu);
1043 if (!iommu->cmd_buf)
1044 return -ENOMEM;
1045
1046 iommu->evt_buf = alloc_event_buffer(iommu);
1047 if (!iommu->evt_buf)
1048 return -ENOMEM;
1049
1050 iommu->int_enabled = false;
1051
1052 init_iommu_from_pci(iommu);
1053 init_iommu_from_acpi(iommu, h);
1054 init_iommu_devices(iommu);
1055
1056 if (iommu_feature(iommu, FEATURE_PPR)) {
1057 iommu->ppr_log = alloc_ppr_log(iommu);
1058 if (!iommu->ppr_log)
1059 return -ENOMEM;
1060 }
1061
1062 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1063 amd_iommu_np_cache = true;
1064
1065 return pci_enable_device(iommu->dev);
1066}
1067
1068/*
1069 * Iterates over all IOMMU entries in the ACPI table, allocates the
1070 * IOMMU structure and initializes it with init_iommu_one()
1071 */
1072static int __init init_iommu_all(struct acpi_table_header *table)
1073{
1074 u8 *p = (u8 *)table, *end = (u8 *)table;
1075 struct ivhd_header *h;
1076 struct amd_iommu *iommu;
1077 int ret;
1078
1079 end += table->length;
1080 p += IVRS_HEADER_LENGTH;
1081
1082 while (p < end) {
1083 h = (struct ivhd_header *)p;
1084 switch (*p) {
1085 case ACPI_IVHD_TYPE:
1086
1087 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1088 "seg: %d flags: %01x info %04x\n",
1089 PCI_BUS(h->devid), PCI_SLOT(h->devid),
1090 PCI_FUNC(h->devid), h->cap_ptr,
1091 h->pci_seg, h->flags, h->info);
1092 DUMP_printk(" mmio-addr: %016llx\n",
1093 h->mmio_phys);
1094
1095 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1096 if (iommu == NULL) {
1097 amd_iommu_init_err = -ENOMEM;
1098 return 0;
1099 }
1100
1101 ret = init_iommu_one(iommu, h);
1102 if (ret) {
1103 amd_iommu_init_err = ret;
1104 return 0;
1105 }
1106 break;
1107 default:
1108 break;
1109 }
1110 p += h->length;
1111
1112 }
1113 WARN_ON(p != end);
1114
1115 return 0;
1116}
1117
1118/****************************************************************************
1119 *
1120 * The following functions initialize the MSI interrupts for all IOMMUs
1121 * in the system. Its a bit challenging because there could be multiple
1122 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1123 * pci_dev.
1124 *
1125 ****************************************************************************/
1126
1127static int iommu_setup_msi(struct amd_iommu *iommu)
1128{
1129 int r;
1130
1131 r = pci_enable_msi(iommu->dev);
1132 if (r)
1133 return r;
1134
1135 r = request_threaded_irq(iommu->dev->irq,
1136 amd_iommu_int_handler,
1137 amd_iommu_int_thread,
1138 0, "AMD-Vi",
1139 iommu->dev);
1140
1141 if (r) {
1142 pci_disable_msi(iommu->dev);
1143 return r;
1144 }
1145
1146 iommu->int_enabled = true;
1147
1148 return 0;
1149}
1150
1151static int iommu_init_msi(struct amd_iommu *iommu)
1152{
1153 int ret;
1154
1155 if (iommu->int_enabled)
1156 goto enable_faults;
1157
1158 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1159 ret = iommu_setup_msi(iommu);
1160 else
1161 ret = -ENODEV;
1162
1163 if (ret)
1164 return ret;
1165
1166enable_faults:
1167 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1168
1169 if (iommu->ppr_log != NULL)
1170 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1171
1172 return 0;
1173}
1174
1175/****************************************************************************
1176 *
1177 * The next functions belong to the third pass of parsing the ACPI
1178 * table. In this last pass the memory mapping requirements are
1179 * gathered (like exclusion and unity mapping reanges).
1180 *
1181 ****************************************************************************/
1182
1183static void __init free_unity_maps(void)
1184{
1185 struct unity_map_entry *entry, *next;
1186
1187 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1188 list_del(&entry->list);
1189 kfree(entry);
1190 }
1191}
1192
1193/* called when we find an exclusion range definition in ACPI */
1194static int __init init_exclusion_range(struct ivmd_header *m)
1195{
1196 int i;
1197
1198 switch (m->type) {
1199 case ACPI_IVMD_TYPE:
1200 set_device_exclusion_range(m->devid, m);
1201 break;
1202 case ACPI_IVMD_TYPE_ALL:
1203 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1204 set_device_exclusion_range(i, m);
1205 break;
1206 case ACPI_IVMD_TYPE_RANGE:
1207 for (i = m->devid; i <= m->aux; ++i)
1208 set_device_exclusion_range(i, m);
1209 break;
1210 default:
1211 break;
1212 }
1213
1214 return 0;
1215}
1216
1217/* called for unity map ACPI definition */
1218static int __init init_unity_map_range(struct ivmd_header *m)
1219{
1220 struct unity_map_entry *e = 0;
1221 char *s;
1222
1223 e = kzalloc(sizeof(*e), GFP_KERNEL);
1224 if (e == NULL)
1225 return -ENOMEM;
1226
1227 switch (m->type) {
1228 default:
1229 kfree(e);
1230 return 0;
1231 case ACPI_IVMD_TYPE:
1232 s = "IVMD_TYPEi\t\t\t";
1233 e->devid_start = e->devid_end = m->devid;
1234 break;
1235 case ACPI_IVMD_TYPE_ALL:
1236 s = "IVMD_TYPE_ALL\t\t";
1237 e->devid_start = 0;
1238 e->devid_end = amd_iommu_last_bdf;
1239 break;
1240 case ACPI_IVMD_TYPE_RANGE:
1241 s = "IVMD_TYPE_RANGE\t\t";
1242 e->devid_start = m->devid;
1243 e->devid_end = m->aux;
1244 break;
1245 }
1246 e->address_start = PAGE_ALIGN(m->range_start);
1247 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1248 e->prot = m->flags >> 1;
1249
1250 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1251 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1252 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1253 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1254 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1255 e->address_start, e->address_end, m->flags);
1256
1257 list_add_tail(&e->list, &amd_iommu_unity_map);
1258
1259 return 0;
1260}
1261
1262/* iterates over all memory definitions we find in the ACPI table */
1263static int __init init_memory_definitions(struct acpi_table_header *table)
1264{
1265 u8 *p = (u8 *)table, *end = (u8 *)table;
1266 struct ivmd_header *m;
1267
1268 end += table->length;
1269 p += IVRS_HEADER_LENGTH;
1270
1271 while (p < end) {
1272 m = (struct ivmd_header *)p;
1273 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1274 init_exclusion_range(m);
1275 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1276 init_unity_map_range(m);
1277
1278 p += m->length;
1279 }
1280
1281 return 0;
1282}
1283
1284/*
1285 * Init the device table to not allow DMA access for devices and
1286 * suppress all page faults
1287 */
1288static void init_device_table(void)
1289{
1290 u32 devid;
1291
1292 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1293 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1294 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1295 }
1296}
1297
1298static void iommu_init_flags(struct amd_iommu *iommu)
1299{
1300 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1301 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1302 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1303
1304 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1305 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1306 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1307
1308 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1309 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1310 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1311
1312 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1313 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1314 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1315
1316 /*
1317 * make IOMMU memory accesses cache coherent
1318 */
1319 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1320
1321 /* Set IOTLB invalidation timeout to 1s */
1322 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1323}
1324
1325static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1326{
1327 int i, j;
1328 u32 ioc_feature_control;
1329 struct pci_dev *pdev = iommu->root_pdev;
1330
1331 /* RD890 BIOSes may not have completely reconfigured the iommu */
1332 if (!is_rd890_iommu(iommu->dev) || !pdev)
1333 return;
1334
1335 /*
1336 * First, we need to ensure that the iommu is enabled. This is
1337 * controlled by a register in the northbridge
1338 */
1339
1340 /* Select Northbridge indirect register 0x75 and enable writing */
1341 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1342 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1343
1344 /* Enable the iommu */
1345 if (!(ioc_feature_control & 0x1))
1346 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1347
1348 /* Restore the iommu BAR */
1349 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1350 iommu->stored_addr_lo);
1351 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1352 iommu->stored_addr_hi);
1353
1354 /* Restore the l1 indirect regs for each of the 6 l1s */
1355 for (i = 0; i < 6; i++)
1356 for (j = 0; j < 0x12; j++)
1357 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1358
1359 /* Restore the l2 indirect regs */
1360 for (i = 0; i < 0x83; i++)
1361 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1362
1363 /* Lock PCI setup registers */
1364 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1365 iommu->stored_addr_lo | 1);
1366}
1367
1368/*
1369 * This function finally enables all IOMMUs found in the system after
1370 * they have been initialized
1371 */
1372static void enable_iommus(void)
1373{
1374 struct amd_iommu *iommu;
1375
1376 for_each_iommu(iommu) {
1377 iommu_disable(iommu);
1378 iommu_init_flags(iommu);
1379 iommu_set_device_table(iommu);
1380 iommu_enable_command_buffer(iommu);
1381 iommu_enable_event_buffer(iommu);
1382 iommu_enable_ppr_log(iommu);
1383 iommu_enable_gt(iommu);
1384 iommu_set_exclusion_range(iommu);
1385 iommu_enable(iommu);
1386 iommu_flush_all_caches(iommu);
1387 }
1388}
1389
1390static void disable_iommus(void)
1391{
1392 struct amd_iommu *iommu;
1393
1394 for_each_iommu(iommu)
1395 iommu_disable(iommu);
1396}
1397
1398/*
1399 * Suspend/Resume support
1400 * disable suspend until real resume implemented
1401 */
1402
1403static void amd_iommu_resume(void)
1404{
1405 struct amd_iommu *iommu;
1406
1407 for_each_iommu(iommu)
1408 iommu_apply_resume_quirks(iommu);
1409
1410 /* re-load the hardware */
1411 enable_iommus();
1412
1413 amd_iommu_enable_interrupts();
1414}
1415
1416static int amd_iommu_suspend(void)
1417{
1418 /* disable IOMMUs to go out of the way for BIOS */
1419 disable_iommus();
1420
1421 return 0;
1422}
1423
1424static struct syscore_ops amd_iommu_syscore_ops = {
1425 .suspend = amd_iommu_suspend,
1426 .resume = amd_iommu_resume,
1427};
1428
1429static void __init free_on_init_error(void)
1430{
1431 amd_iommu_uninit_devices();
1432
1433 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1434 get_order(MAX_DOMAIN_ID/8));
1435
1436 free_pages((unsigned long)amd_iommu_rlookup_table,
1437 get_order(rlookup_table_size));
1438
1439 free_pages((unsigned long)amd_iommu_alias_table,
1440 get_order(alias_table_size));
1441
1442 free_pages((unsigned long)amd_iommu_dev_table,
1443 get_order(dev_table_size));
1444
1445 free_iommu_all();
1446
1447 free_unity_maps();
1448
1449#ifdef CONFIG_GART_IOMMU
1450 /*
1451 * We failed to initialize the AMD IOMMU - try fallback to GART
1452 * if possible.
1453 */
1454 gart_iommu_init();
1455
1456#endif
1457}
1458
1459/*
1460 * This is the hardware init function for AMD IOMMU in the system.
1461 * This function is called either from amd_iommu_init or from the interrupt
1462 * remapping setup code.
1463 *
1464 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1465 * three times:
1466 *
1467 * 1 pass) Find the highest PCI device id the driver has to handle.
1468 * Upon this information the size of the data structures is
1469 * determined that needs to be allocated.
1470 *
1471 * 2 pass) Initialize the data structures just allocated with the
1472 * information in the ACPI table about available AMD IOMMUs
1473 * in the system. It also maps the PCI devices in the
1474 * system to specific IOMMUs
1475 *
1476 * 3 pass) After the basic data structures are allocated and
1477 * initialized we update them with information about memory
1478 * remapping requirements parsed out of the ACPI table in
1479 * this last pass.
1480 *
1481 * After everything is set up the IOMMUs are enabled and the necessary
1482 * hotplug and suspend notifiers are registered.
1483 */
1484int __init amd_iommu_init_hardware(void)
1485{
1486 int i, ret = 0;
1487
1488 if (!amd_iommu_detected)
1489 return -ENODEV;
1490
1491 if (amd_iommu_dev_table != NULL) {
1492 /* Hardware already initialized */
1493 return 0;
1494 }
1495
1496 /*
1497 * First parse ACPI tables to find the largest Bus/Dev/Func
1498 * we need to handle. Upon this information the shared data
1499 * structures for the IOMMUs in the system will be allocated
1500 */
1501 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1502 return -ENODEV;
1503
1504 ret = amd_iommu_init_err;
1505 if (ret)
1506 goto out;
1507
1508 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1509 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1510 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1511
1512 /* Device table - directly used by all IOMMUs */
1513 ret = -ENOMEM;
1514 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1515 get_order(dev_table_size));
1516 if (amd_iommu_dev_table == NULL)
1517 goto out;
1518
1519 /*
1520 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1521 * IOMMU see for that device
1522 */
1523 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1524 get_order(alias_table_size));
1525 if (amd_iommu_alias_table == NULL)
1526 goto free;
1527
1528 /* IOMMU rlookup table - find the IOMMU for a specific device */
1529 amd_iommu_rlookup_table = (void *)__get_free_pages(
1530 GFP_KERNEL | __GFP_ZERO,
1531 get_order(rlookup_table_size));
1532 if (amd_iommu_rlookup_table == NULL)
1533 goto free;
1534
1535 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1536 GFP_KERNEL | __GFP_ZERO,
1537 get_order(MAX_DOMAIN_ID/8));
1538 if (amd_iommu_pd_alloc_bitmap == NULL)
1539 goto free;
1540
1541 /* init the device table */
1542 init_device_table();
1543
1544 /*
1545 * let all alias entries point to itself
1546 */
1547 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1548 amd_iommu_alias_table[i] = i;
1549
1550 /*
1551 * never allocate domain 0 because its used as the non-allocated and
1552 * error value placeholder
1553 */
1554 amd_iommu_pd_alloc_bitmap[0] = 1;
1555
1556 spin_lock_init(&amd_iommu_pd_lock);
1557
1558 /*
1559 * now the data structures are allocated and basically initialized
1560 * start the real acpi table scan
1561 */
1562 ret = -ENODEV;
1563 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1564 goto free;
1565
1566 if (amd_iommu_init_err) {
1567 ret = amd_iommu_init_err;
1568 goto free;
1569 }
1570
1571 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1572 goto free;
1573
1574 if (amd_iommu_init_err) {
1575 ret = amd_iommu_init_err;
1576 goto free;
1577 }
1578
1579 ret = amd_iommu_init_devices();
1580 if (ret)
1581 goto free;
1582
1583 enable_iommus();
1584
1585 amd_iommu_init_notifier();
1586
1587 register_syscore_ops(&amd_iommu_syscore_ops);
1588
1589out:
1590 return ret;
1591
1592free:
1593 free_on_init_error();
1594
1595 return ret;
1596}
1597
1598static int amd_iommu_enable_interrupts(void)
1599{
1600 struct amd_iommu *iommu;
1601 int ret = 0;
1602
1603 for_each_iommu(iommu) {
1604 ret = iommu_init_msi(iommu);
1605 if (ret)
1606 goto out;
1607 }
1608
1609out:
1610 return ret;
1611}
1612
1613/*
1614 * This is the core init function for AMD IOMMU hardware in the system.
1615 * This function is called from the generic x86 DMA layer initialization
1616 * code.
1617 *
1618 * The function calls amd_iommu_init_hardware() to setup and enable the
1619 * IOMMU hardware if this has not happened yet. After that the driver
1620 * registers for the DMA-API and for the IOMMU-API as necessary.
1621 */
1622static int __init amd_iommu_init(void)
1623{
1624 int ret = 0;
1625
1626 ret = amd_iommu_init_hardware();
1627 if (ret)
1628 goto out;
1629
1630 ret = amd_iommu_enable_interrupts();
1631 if (ret)
1632 goto free;
1633
1634 if (iommu_pass_through)
1635 ret = amd_iommu_init_passthrough();
1636 else
1637 ret = amd_iommu_init_dma_ops();
1638
1639 if (ret)
1640 goto free;
1641
1642 amd_iommu_init_api();
1643
1644 x86_platform.iommu_shutdown = disable_iommus;
1645
1646 if (iommu_pass_through)
1647 goto out;
1648
1649 if (amd_iommu_unmap_flush)
1650 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1651 else
1652 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1653
1654out:
1655 return ret;
1656
1657free:
1658 disable_iommus();
1659
1660 free_on_init_error();
1661
1662 goto out;
1663}
1664
1665/****************************************************************************
1666 *
1667 * Early detect code. This code runs at IOMMU detection time in the DMA
1668 * layer. It just looks if there is an IVRS ACPI table to detect AMD
1669 * IOMMUs
1670 *
1671 ****************************************************************************/
1672static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1673{
1674 return 0;
1675}
1676
1677int __init amd_iommu_detect(void)
1678{
1679 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1680 return -ENODEV;
1681
1682 if (amd_iommu_disabled)
1683 return -ENODEV;
1684
1685 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1686 iommu_detected = 1;
1687 amd_iommu_detected = 1;
1688 x86_init.iommu.iommu_init = amd_iommu_init;
1689
1690 /* Make sure ACS will be enabled */
1691 pci_request_acs();
1692 return 1;
1693 }
1694 return -ENODEV;
1695}
1696
1697/****************************************************************************
1698 *
1699 * Parsing functions for the AMD IOMMU specific kernel command line
1700 * options.
1701 *
1702 ****************************************************************************/
1703
1704static int __init parse_amd_iommu_dump(char *str)
1705{
1706 amd_iommu_dump = true;
1707
1708 return 1;
1709}
1710
1711static int __init parse_amd_iommu_options(char *str)
1712{
1713 for (; *str; ++str) {
1714 if (strncmp(str, "fullflush", 9) == 0)
1715 amd_iommu_unmap_flush = true;
1716 if (strncmp(str, "off", 3) == 0)
1717 amd_iommu_disabled = true;
1718 if (strncmp(str, "force_isolation", 15) == 0)
1719 amd_iommu_force_isolation = true;
1720 }
1721
1722 return 1;
1723}
1724
1725__setup("amd_iommu_dump", parse_amd_iommu_dump);
1726__setup("amd_iommu=", parse_amd_iommu_options);
1727
1728IOMMU_INIT_FINISH(amd_iommu_detect,
1729 gart_iommu_hole_init,
1730 0,
1731 0);
1732
1733bool amd_iommu_v2_supported(void)
1734{
1735 return amd_iommu_v2_present;
1736}
1737EXPORT_SYMBOL(amd_iommu_v2_supported);