Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/dma-map-ops.h>
3#include <linux/dma-direct.h>
4#include <linux/iommu.h>
5#include <linux/dmar.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/gfp.h>
9#include <linux/pci.h>
10#include <linux/amd-iommu.h>
11
12#include <asm/proto.h>
13#include <asm/dma.h>
14#include <asm/iommu.h>
15#include <asm/gart.h>
16#include <asm/x86_init.h>
17
18#include <xen/xen.h>
19#include <xen/swiotlb-xen.h>
20
21static bool disable_dac_quirk __read_mostly;
22
23const struct dma_map_ops *dma_ops;
24EXPORT_SYMBOL(dma_ops);
25
26#ifdef CONFIG_IOMMU_DEBUG
27int panic_on_overflow __read_mostly = 1;
28int force_iommu __read_mostly = 1;
29#else
30int panic_on_overflow __read_mostly = 0;
31int force_iommu __read_mostly = 0;
32#endif
33
34int iommu_merge __read_mostly = 0;
35
36int no_iommu __read_mostly;
37/* Set this to 1 if there is a HW IOMMU in the system */
38int iommu_detected __read_mostly = 0;
39
40#ifdef CONFIG_SWIOTLB
41bool x86_swiotlb_enable;
42static unsigned int x86_swiotlb_flags;
43
44static void __init pci_swiotlb_detect(void)
45{
46 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
47 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
48 x86_swiotlb_enable = true;
49
50 /*
51 * Set swiotlb to 1 so that bounce buffers are allocated and used for
52 * devices that can't support DMA to encrypted memory.
53 */
54 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
55 x86_swiotlb_enable = true;
56
57 /*
58 * Guest with guest memory encryption currently perform all DMA through
59 * bounce buffers as the hypervisor can't access arbitrary VM memory
60 * that is not explicitly shared with it.
61 */
62 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
63 x86_swiotlb_enable = true;
64 x86_swiotlb_flags |= SWIOTLB_FORCE;
65 }
66}
67#else
68static inline void __init pci_swiotlb_detect(void)
69{
70}
71#define x86_swiotlb_flags 0
72#endif /* CONFIG_SWIOTLB */
73
74#ifdef CONFIG_SWIOTLB_XEN
75static bool xen_swiotlb_enabled(void)
76{
77 return xen_initial_domain() || x86_swiotlb_enable ||
78 (IS_ENABLED(CONFIG_XEN_PCIDEV_FRONTEND) && xen_pv_pci_possible);
79}
80
81static void __init pci_xen_swiotlb_init(void)
82{
83 if (!xen_swiotlb_enabled())
84 return;
85 x86_swiotlb_enable = true;
86 x86_swiotlb_flags |= SWIOTLB_ANY;
87 swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
88 dma_ops = &xen_swiotlb_dma_ops;
89 if (IS_ENABLED(CONFIG_PCI))
90 pci_request_acs();
91}
92#else
93static inline void __init pci_xen_swiotlb_init(void)
94{
95}
96#endif /* CONFIG_SWIOTLB_XEN */
97
98void __init pci_iommu_alloc(void)
99{
100 if (xen_pv_domain()) {
101 pci_xen_swiotlb_init();
102 return;
103 }
104 pci_swiotlb_detect();
105 gart_iommu_hole_init();
106 amd_iommu_detect();
107 detect_intel_iommu();
108 swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
109}
110
111/*
112 * See <Documentation/arch/x86/x86_64/boot-options.rst> for the iommu kernel
113 * parameter documentation.
114 */
115static __init int iommu_setup(char *p)
116{
117 iommu_merge = 1;
118
119 if (!p)
120 return -EINVAL;
121
122 while (*p) {
123 if (!strncmp(p, "off", 3))
124 no_iommu = 1;
125 /* gart_parse_options has more force support */
126 if (!strncmp(p, "force", 5))
127 force_iommu = 1;
128 if (!strncmp(p, "noforce", 7)) {
129 iommu_merge = 0;
130 force_iommu = 0;
131 }
132
133 if (!strncmp(p, "biomerge", 8)) {
134 iommu_merge = 1;
135 force_iommu = 1;
136 }
137 if (!strncmp(p, "panic", 5))
138 panic_on_overflow = 1;
139 if (!strncmp(p, "nopanic", 7))
140 panic_on_overflow = 0;
141 if (!strncmp(p, "merge", 5)) {
142 iommu_merge = 1;
143 force_iommu = 1;
144 }
145 if (!strncmp(p, "nomerge", 7))
146 iommu_merge = 0;
147 if (!strncmp(p, "forcesac", 8))
148 pr_warn("forcesac option ignored.\n");
149 if (!strncmp(p, "allowdac", 8))
150 pr_warn("allowdac option ignored.\n");
151 if (!strncmp(p, "nodac", 5))
152 pr_warn("nodac option ignored.\n");
153 if (!strncmp(p, "usedac", 6)) {
154 disable_dac_quirk = true;
155 return 1;
156 }
157#ifdef CONFIG_SWIOTLB
158 if (!strncmp(p, "soft", 4))
159 x86_swiotlb_enable = true;
160#endif
161 if (!strncmp(p, "pt", 2))
162 iommu_set_default_passthrough(true);
163 if (!strncmp(p, "nopt", 4))
164 iommu_set_default_translated(true);
165
166 gart_parse_options(p);
167
168 p += strcspn(p, ",");
169 if (*p == ',')
170 ++p;
171 }
172 return 0;
173}
174early_param("iommu", iommu_setup);
175
176static int __init pci_iommu_init(void)
177{
178 x86_init.iommu.iommu_init();
179
180#ifdef CONFIG_SWIOTLB
181 /* An IOMMU turned us off. */
182 if (x86_swiotlb_enable) {
183 pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
184 swiotlb_print_info();
185 } else {
186 swiotlb_exit();
187 }
188#endif
189
190 return 0;
191}
192/* Must execute after PCI subsystem */
193rootfs_initcall(pci_iommu_init);
194
195#ifdef CONFIG_PCI
196/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
197
198static int via_no_dac_cb(struct pci_dev *pdev, void *data)
199{
200 pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
201 return 0;
202}
203
204static void via_no_dac(struct pci_dev *dev)
205{
206 if (!disable_dac_quirk) {
207 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
208 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
209 }
210}
211DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
212 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
213#endif
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/dma-map-ops.h>
3#include <linux/dma-direct.h>
4#include <linux/iommu.h>
5#include <linux/dmar.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/gfp.h>
9#include <linux/pci.h>
10#include <linux/amd-iommu.h>
11
12#include <asm/proto.h>
13#include <asm/dma.h>
14#include <asm/iommu.h>
15#include <asm/gart.h>
16#include <asm/x86_init.h>
17
18#include <xen/xen.h>
19#include <xen/swiotlb-xen.h>
20
21static bool disable_dac_quirk __read_mostly;
22
23const struct dma_map_ops *dma_ops;
24EXPORT_SYMBOL(dma_ops);
25
26#ifdef CONFIG_IOMMU_DEBUG
27int panic_on_overflow __read_mostly = 1;
28int force_iommu __read_mostly = 1;
29#else
30int panic_on_overflow __read_mostly = 0;
31int force_iommu __read_mostly = 0;
32#endif
33
34int iommu_merge __read_mostly = 0;
35
36int no_iommu __read_mostly;
37/* Set this to 1 if there is a HW IOMMU in the system */
38int iommu_detected __read_mostly = 0;
39
40#ifdef CONFIG_SWIOTLB
41bool x86_swiotlb_enable;
42static unsigned int x86_swiotlb_flags;
43
44static void __init pci_swiotlb_detect(void)
45{
46 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
47 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
48 x86_swiotlb_enable = true;
49
50 /*
51 * Set swiotlb to 1 so that bounce buffers are allocated and used for
52 * devices that can't support DMA to encrypted memory.
53 */
54 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
55 x86_swiotlb_enable = true;
56
57 /*
58 * Guest with guest memory encryption currently perform all DMA through
59 * bounce buffers as the hypervisor can't access arbitrary VM memory
60 * that is not explicitly shared with it.
61 */
62 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
63 x86_swiotlb_enable = true;
64 x86_swiotlb_flags |= SWIOTLB_FORCE;
65 }
66}
67#else
68static inline void __init pci_swiotlb_detect(void)
69{
70}
71#define x86_swiotlb_flags 0
72#endif /* CONFIG_SWIOTLB */
73
74#ifdef CONFIG_SWIOTLB_XEN
75static void __init pci_xen_swiotlb_init(void)
76{
77 if (!xen_initial_domain() && !x86_swiotlb_enable)
78 return;
79 x86_swiotlb_enable = true;
80 x86_swiotlb_flags |= SWIOTLB_ANY;
81 swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
82 dma_ops = &xen_swiotlb_dma_ops;
83 if (IS_ENABLED(CONFIG_PCI))
84 pci_request_acs();
85}
86
87int pci_xen_swiotlb_init_late(void)
88{
89 if (dma_ops == &xen_swiotlb_dma_ops)
90 return 0;
91
92 /* we can work with the default swiotlb */
93 if (!io_tlb_default_mem.nslabs) {
94 int rc = swiotlb_init_late(swiotlb_size_or_default(),
95 GFP_KERNEL, xen_swiotlb_fixup);
96 if (rc < 0)
97 return rc;
98 }
99
100 /* XXX: this switches the dma ops under live devices! */
101 dma_ops = &xen_swiotlb_dma_ops;
102 if (IS_ENABLED(CONFIG_PCI))
103 pci_request_acs();
104 return 0;
105}
106EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
107#else
108static inline void __init pci_xen_swiotlb_init(void)
109{
110}
111#endif /* CONFIG_SWIOTLB_XEN */
112
113void __init pci_iommu_alloc(void)
114{
115 if (xen_pv_domain()) {
116 pci_xen_swiotlb_init();
117 return;
118 }
119 pci_swiotlb_detect();
120 gart_iommu_hole_init();
121 amd_iommu_detect();
122 detect_intel_iommu();
123 swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
124}
125
126/*
127 * See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel
128 * parameter documentation.
129 */
130static __init int iommu_setup(char *p)
131{
132 iommu_merge = 1;
133
134 if (!p)
135 return -EINVAL;
136
137 while (*p) {
138 if (!strncmp(p, "off", 3))
139 no_iommu = 1;
140 /* gart_parse_options has more force support */
141 if (!strncmp(p, "force", 5))
142 force_iommu = 1;
143 if (!strncmp(p, "noforce", 7)) {
144 iommu_merge = 0;
145 force_iommu = 0;
146 }
147
148 if (!strncmp(p, "biomerge", 8)) {
149 iommu_merge = 1;
150 force_iommu = 1;
151 }
152 if (!strncmp(p, "panic", 5))
153 panic_on_overflow = 1;
154 if (!strncmp(p, "nopanic", 7))
155 panic_on_overflow = 0;
156 if (!strncmp(p, "merge", 5)) {
157 iommu_merge = 1;
158 force_iommu = 1;
159 }
160 if (!strncmp(p, "nomerge", 7))
161 iommu_merge = 0;
162 if (!strncmp(p, "forcesac", 8))
163 pr_warn("forcesac option ignored.\n");
164 if (!strncmp(p, "allowdac", 8))
165 pr_warn("allowdac option ignored.\n");
166 if (!strncmp(p, "nodac", 5))
167 pr_warn("nodac option ignored.\n");
168 if (!strncmp(p, "usedac", 6)) {
169 disable_dac_quirk = true;
170 return 1;
171 }
172#ifdef CONFIG_SWIOTLB
173 if (!strncmp(p, "soft", 4))
174 x86_swiotlb_enable = true;
175#endif
176 if (!strncmp(p, "pt", 2))
177 iommu_set_default_passthrough(true);
178 if (!strncmp(p, "nopt", 4))
179 iommu_set_default_translated(true);
180
181 gart_parse_options(p);
182
183 p += strcspn(p, ",");
184 if (*p == ',')
185 ++p;
186 }
187 return 0;
188}
189early_param("iommu", iommu_setup);
190
191static int __init pci_iommu_init(void)
192{
193 x86_init.iommu.iommu_init();
194
195#ifdef CONFIG_SWIOTLB
196 /* An IOMMU turned us off. */
197 if (x86_swiotlb_enable) {
198 pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
199 swiotlb_print_info();
200 } else {
201 swiotlb_exit();
202 }
203#endif
204
205 return 0;
206}
207/* Must execute after PCI subsystem */
208rootfs_initcall(pci_iommu_init);
209
210#ifdef CONFIG_PCI
211/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
212
213static int via_no_dac_cb(struct pci_dev *pdev, void *data)
214{
215 pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
216 return 0;
217}
218
219static void via_no_dac(struct pci_dev *dev)
220{
221 if (!disable_dac_quirk) {
222 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
223 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
224 }
225}
226DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
227 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
228#endif