Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/dma-map-ops.h>
3#include <linux/dma-direct.h>
4#include <linux/iommu.h>
5#include <linux/dmar.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/gfp.h>
9#include <linux/pci.h>
10#include <linux/amd-iommu.h>
11
12#include <asm/proto.h>
13#include <asm/dma.h>
14#include <asm/iommu.h>
15#include <asm/gart.h>
16#include <asm/x86_init.h>
17
18#include <xen/xen.h>
19#include <xen/swiotlb-xen.h>
20
21static bool disable_dac_quirk __read_mostly;
22
23const struct dma_map_ops *dma_ops;
24EXPORT_SYMBOL(dma_ops);
25
26#ifdef CONFIG_IOMMU_DEBUG
27int panic_on_overflow __read_mostly = 1;
28int force_iommu __read_mostly = 1;
29#else
30int panic_on_overflow __read_mostly = 0;
31int force_iommu __read_mostly = 0;
32#endif
33
34int iommu_merge __read_mostly = 0;
35
36int no_iommu __read_mostly;
37/* Set this to 1 if there is a HW IOMMU in the system */
38int iommu_detected __read_mostly = 0;
39
40#ifdef CONFIG_SWIOTLB
41bool x86_swiotlb_enable;
42static unsigned int x86_swiotlb_flags;
43
44static void __init pci_swiotlb_detect(void)
45{
46 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
47 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
48 x86_swiotlb_enable = true;
49
50 /*
51 * Set swiotlb to 1 so that bounce buffers are allocated and used for
52 * devices that can't support DMA to encrypted memory.
53 */
54 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
55 x86_swiotlb_enable = true;
56
57 /*
58 * Guest with guest memory encryption currently perform all DMA through
59 * bounce buffers as the hypervisor can't access arbitrary VM memory
60 * that is not explicitly shared with it.
61 */
62 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
63 x86_swiotlb_enable = true;
64 x86_swiotlb_flags |= SWIOTLB_FORCE;
65 }
66}
67#else
68static inline void __init pci_swiotlb_detect(void)
69{
70}
71#define x86_swiotlb_flags 0
72#endif /* CONFIG_SWIOTLB */
73
74#ifdef CONFIG_SWIOTLB_XEN
75static bool xen_swiotlb_enabled(void)
76{
77 return xen_initial_domain() || x86_swiotlb_enable ||
78 (IS_ENABLED(CONFIG_XEN_PCIDEV_FRONTEND) && xen_pv_pci_possible);
79}
80
81static void __init pci_xen_swiotlb_init(void)
82{
83 if (!xen_swiotlb_enabled())
84 return;
85 x86_swiotlb_enable = true;
86 x86_swiotlb_flags |= SWIOTLB_ANY;
87 swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
88 dma_ops = &xen_swiotlb_dma_ops;
89 if (IS_ENABLED(CONFIG_PCI))
90 pci_request_acs();
91}
92#else
93static inline void __init pci_xen_swiotlb_init(void)
94{
95}
96#endif /* CONFIG_SWIOTLB_XEN */
97
98void __init pci_iommu_alloc(void)
99{
100 if (xen_pv_domain()) {
101 pci_xen_swiotlb_init();
102 return;
103 }
104 pci_swiotlb_detect();
105 gart_iommu_hole_init();
106 amd_iommu_detect();
107 detect_intel_iommu();
108 swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
109}
110
111/*
112 * See <Documentation/arch/x86/x86_64/boot-options.rst> for the iommu kernel
113 * parameter documentation.
114 */
115static __init int iommu_setup(char *p)
116{
117 iommu_merge = 1;
118
119 if (!p)
120 return -EINVAL;
121
122 while (*p) {
123 if (!strncmp(p, "off", 3))
124 no_iommu = 1;
125 /* gart_parse_options has more force support */
126 if (!strncmp(p, "force", 5))
127 force_iommu = 1;
128 if (!strncmp(p, "noforce", 7)) {
129 iommu_merge = 0;
130 force_iommu = 0;
131 }
132
133 if (!strncmp(p, "biomerge", 8)) {
134 iommu_merge = 1;
135 force_iommu = 1;
136 }
137 if (!strncmp(p, "panic", 5))
138 panic_on_overflow = 1;
139 if (!strncmp(p, "nopanic", 7))
140 panic_on_overflow = 0;
141 if (!strncmp(p, "merge", 5)) {
142 iommu_merge = 1;
143 force_iommu = 1;
144 }
145 if (!strncmp(p, "nomerge", 7))
146 iommu_merge = 0;
147 if (!strncmp(p, "forcesac", 8))
148 pr_warn("forcesac option ignored.\n");
149 if (!strncmp(p, "allowdac", 8))
150 pr_warn("allowdac option ignored.\n");
151 if (!strncmp(p, "nodac", 5))
152 pr_warn("nodac option ignored.\n");
153 if (!strncmp(p, "usedac", 6)) {
154 disable_dac_quirk = true;
155 return 1;
156 }
157#ifdef CONFIG_SWIOTLB
158 if (!strncmp(p, "soft", 4))
159 x86_swiotlb_enable = true;
160#endif
161 if (!strncmp(p, "pt", 2))
162 iommu_set_default_passthrough(true);
163 if (!strncmp(p, "nopt", 4))
164 iommu_set_default_translated(true);
165
166 gart_parse_options(p);
167
168 p += strcspn(p, ",");
169 if (*p == ',')
170 ++p;
171 }
172 return 0;
173}
174early_param("iommu", iommu_setup);
175
176static int __init pci_iommu_init(void)
177{
178 x86_init.iommu.iommu_init();
179
180#ifdef CONFIG_SWIOTLB
181 /* An IOMMU turned us off. */
182 if (x86_swiotlb_enable) {
183 pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
184 swiotlb_print_info();
185 } else {
186 swiotlb_exit();
187 }
188#endif
189
190 return 0;
191}
192/* Must execute after PCI subsystem */
193rootfs_initcall(pci_iommu_init);
194
195#ifdef CONFIG_PCI
196/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
197
198static int via_no_dac_cb(struct pci_dev *pdev, void *data)
199{
200 pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
201 return 0;
202}
203
204static void via_no_dac(struct pci_dev *dev)
205{
206 if (!disable_dac_quirk) {
207 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
208 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
209 }
210}
211DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
212 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
213#endif
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/dma-direct.h>
3#include <linux/dma-debug.h>
4#include <linux/iommu.h>
5#include <linux/dmar.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/gfp.h>
9#include <linux/pci.h>
10
11#include <asm/proto.h>
12#include <asm/dma.h>
13#include <asm/iommu.h>
14#include <asm/gart.h>
15#include <asm/x86_init.h>
16#include <asm/iommu_table.h>
17
18static bool disable_dac_quirk __read_mostly;
19
20const struct dma_map_ops *dma_ops;
21EXPORT_SYMBOL(dma_ops);
22
23#ifdef CONFIG_IOMMU_DEBUG
24int panic_on_overflow __read_mostly = 1;
25int force_iommu __read_mostly = 1;
26#else
27int panic_on_overflow __read_mostly = 0;
28int force_iommu __read_mostly = 0;
29#endif
30
31int iommu_merge __read_mostly = 0;
32
33int no_iommu __read_mostly;
34/* Set this to 1 if there is a HW IOMMU in the system */
35int iommu_detected __read_mostly = 0;
36
37extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
38
39void __init pci_iommu_alloc(void)
40{
41 struct iommu_table_entry *p;
42
43 sort_iommu_table(__iommu_table, __iommu_table_end);
44 check_iommu_entries(__iommu_table, __iommu_table_end);
45
46 for (p = __iommu_table; p < __iommu_table_end; p++) {
47 if (p && p->detect && p->detect() > 0) {
48 p->flags |= IOMMU_DETECTED;
49 if (p->early_init)
50 p->early_init();
51 if (p->flags & IOMMU_FINISH_IF_DETECTED)
52 break;
53 }
54 }
55}
56
57/*
58 * See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel
59 * parameter documentation.
60 */
61static __init int iommu_setup(char *p)
62{
63 iommu_merge = 1;
64
65 if (!p)
66 return -EINVAL;
67
68 while (*p) {
69 if (!strncmp(p, "off", 3))
70 no_iommu = 1;
71 /* gart_parse_options has more force support */
72 if (!strncmp(p, "force", 5))
73 force_iommu = 1;
74 if (!strncmp(p, "noforce", 7)) {
75 iommu_merge = 0;
76 force_iommu = 0;
77 }
78
79 if (!strncmp(p, "biomerge", 8)) {
80 iommu_merge = 1;
81 force_iommu = 1;
82 }
83 if (!strncmp(p, "panic", 5))
84 panic_on_overflow = 1;
85 if (!strncmp(p, "nopanic", 7))
86 panic_on_overflow = 0;
87 if (!strncmp(p, "merge", 5)) {
88 iommu_merge = 1;
89 force_iommu = 1;
90 }
91 if (!strncmp(p, "nomerge", 7))
92 iommu_merge = 0;
93 if (!strncmp(p, "forcesac", 8))
94 pr_warn("forcesac option ignored.\n");
95 if (!strncmp(p, "allowdac", 8))
96 pr_warn("allowdac option ignored.\n");
97 if (!strncmp(p, "nodac", 5))
98 pr_warn("nodac option ignored.\n");
99 if (!strncmp(p, "usedac", 6)) {
100 disable_dac_quirk = true;
101 return 1;
102 }
103#ifdef CONFIG_SWIOTLB
104 if (!strncmp(p, "soft", 4))
105 swiotlb = 1;
106#endif
107 if (!strncmp(p, "pt", 2))
108 iommu_set_default_passthrough(true);
109 if (!strncmp(p, "nopt", 4))
110 iommu_set_default_translated(true);
111
112 gart_parse_options(p);
113
114 p += strcspn(p, ",");
115 if (*p == ',')
116 ++p;
117 }
118 return 0;
119}
120early_param("iommu", iommu_setup);
121
122static int __init pci_iommu_init(void)
123{
124 struct iommu_table_entry *p;
125
126 x86_init.iommu.iommu_init();
127
128 for (p = __iommu_table; p < __iommu_table_end; p++) {
129 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
130 p->late_init();
131 }
132
133 return 0;
134}
135/* Must execute after PCI subsystem */
136rootfs_initcall(pci_iommu_init);
137
138#ifdef CONFIG_PCI
139/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
140
141static int via_no_dac_cb(struct pci_dev *pdev, void *data)
142{
143 pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
144 return 0;
145}
146
147static void via_no_dac(struct pci_dev *dev)
148{
149 if (!disable_dac_quirk) {
150 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
151 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
152 }
153}
154DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
155 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
156#endif