Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/dma-map-ops.h>
3#include <linux/dma-direct.h>
4#include <linux/iommu.h>
5#include <linux/dmar.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/gfp.h>
9#include <linux/pci.h>
10#include <linux/amd-iommu.h>
11
12#include <asm/proto.h>
13#include <asm/dma.h>
14#include <asm/iommu.h>
15#include <asm/gart.h>
16#include <asm/x86_init.h>
17
18#include <xen/xen.h>
19#include <xen/swiotlb-xen.h>
20
21static bool disable_dac_quirk __read_mostly;
22
23const struct dma_map_ops *dma_ops;
24EXPORT_SYMBOL(dma_ops);
25
26#ifdef CONFIG_IOMMU_DEBUG
27int panic_on_overflow __read_mostly = 1;
28int force_iommu __read_mostly = 1;
29#else
30int panic_on_overflow __read_mostly = 0;
31int force_iommu __read_mostly = 0;
32#endif
33
34int iommu_merge __read_mostly = 0;
35
36int no_iommu __read_mostly;
37/* Set this to 1 if there is a HW IOMMU in the system */
38int iommu_detected __read_mostly = 0;
39
40#ifdef CONFIG_SWIOTLB
41bool x86_swiotlb_enable;
42static unsigned int x86_swiotlb_flags;
43
44static void __init pci_swiotlb_detect(void)
45{
46 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
47 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
48 x86_swiotlb_enable = true;
49
50 /*
51 * Set swiotlb to 1 so that bounce buffers are allocated and used for
52 * devices that can't support DMA to encrypted memory.
53 */
54 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
55 x86_swiotlb_enable = true;
56
57 /*
58 * Guest with guest memory encryption currently perform all DMA through
59 * bounce buffers as the hypervisor can't access arbitrary VM memory
60 * that is not explicitly shared with it.
61 */
62 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
63 x86_swiotlb_enable = true;
64 x86_swiotlb_flags |= SWIOTLB_FORCE;
65 }
66}
67#else
68static inline void __init pci_swiotlb_detect(void)
69{
70}
71#define x86_swiotlb_flags 0
72#endif /* CONFIG_SWIOTLB */
73
74#ifdef CONFIG_SWIOTLB_XEN
75static bool xen_swiotlb_enabled(void)
76{
77 return xen_initial_domain() || x86_swiotlb_enable ||
78 (IS_ENABLED(CONFIG_XEN_PCIDEV_FRONTEND) && xen_pv_pci_possible);
79}
80
81static void __init pci_xen_swiotlb_init(void)
82{
83 if (!xen_swiotlb_enabled())
84 return;
85 x86_swiotlb_enable = true;
86 x86_swiotlb_flags |= SWIOTLB_ANY;
87 swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
88 dma_ops = &xen_swiotlb_dma_ops;
89 if (IS_ENABLED(CONFIG_PCI))
90 pci_request_acs();
91}
92#else
93static inline void __init pci_xen_swiotlb_init(void)
94{
95}
96#endif /* CONFIG_SWIOTLB_XEN */
97
98void __init pci_iommu_alloc(void)
99{
100 if (xen_pv_domain()) {
101 pci_xen_swiotlb_init();
102 return;
103 }
104 pci_swiotlb_detect();
105 gart_iommu_hole_init();
106 amd_iommu_detect();
107 detect_intel_iommu();
108 swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
109}
110
111/*
112 * See <Documentation/arch/x86/x86_64/boot-options.rst> for the iommu kernel
113 * parameter documentation.
114 */
115static __init int iommu_setup(char *p)
116{
117 iommu_merge = 1;
118
119 if (!p)
120 return -EINVAL;
121
122 while (*p) {
123 if (!strncmp(p, "off", 3))
124 no_iommu = 1;
125 /* gart_parse_options has more force support */
126 if (!strncmp(p, "force", 5))
127 force_iommu = 1;
128 if (!strncmp(p, "noforce", 7)) {
129 iommu_merge = 0;
130 force_iommu = 0;
131 }
132
133 if (!strncmp(p, "biomerge", 8)) {
134 iommu_merge = 1;
135 force_iommu = 1;
136 }
137 if (!strncmp(p, "panic", 5))
138 panic_on_overflow = 1;
139 if (!strncmp(p, "nopanic", 7))
140 panic_on_overflow = 0;
141 if (!strncmp(p, "merge", 5)) {
142 iommu_merge = 1;
143 force_iommu = 1;
144 }
145 if (!strncmp(p, "nomerge", 7))
146 iommu_merge = 0;
147 if (!strncmp(p, "forcesac", 8))
148 pr_warn("forcesac option ignored.\n");
149 if (!strncmp(p, "allowdac", 8))
150 pr_warn("allowdac option ignored.\n");
151 if (!strncmp(p, "nodac", 5))
152 pr_warn("nodac option ignored.\n");
153 if (!strncmp(p, "usedac", 6)) {
154 disable_dac_quirk = true;
155 return 1;
156 }
157#ifdef CONFIG_SWIOTLB
158 if (!strncmp(p, "soft", 4))
159 x86_swiotlb_enable = true;
160#endif
161 if (!strncmp(p, "pt", 2))
162 iommu_set_default_passthrough(true);
163 if (!strncmp(p, "nopt", 4))
164 iommu_set_default_translated(true);
165
166 gart_parse_options(p);
167
168 p += strcspn(p, ",");
169 if (*p == ',')
170 ++p;
171 }
172 return 0;
173}
174early_param("iommu", iommu_setup);
175
176static int __init pci_iommu_init(void)
177{
178 x86_init.iommu.iommu_init();
179
180#ifdef CONFIG_SWIOTLB
181 /* An IOMMU turned us off. */
182 if (x86_swiotlb_enable) {
183 pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
184 swiotlb_print_info();
185 } else {
186 swiotlb_exit();
187 }
188#endif
189
190 return 0;
191}
192/* Must execute after PCI subsystem */
193rootfs_initcall(pci_iommu_init);
194
195#ifdef CONFIG_PCI
196/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
197
198static int via_no_dac_cb(struct pci_dev *pdev, void *data)
199{
200 pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
201 return 0;
202}
203
204static void via_no_dac(struct pci_dev *dev)
205{
206 if (!disable_dac_quirk) {
207 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
208 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
209 }
210}
211DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
212 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
213#endif
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/dma-direct.h>
3#include <linux/dma-debug.h>
4#include <linux/iommu.h>
5#include <linux/dmar.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/gfp.h>
9#include <linux/pci.h>
10
11#include <asm/proto.h>
12#include <asm/dma.h>
13#include <asm/iommu.h>
14#include <asm/gart.h>
15#include <asm/calgary.h>
16#include <asm/x86_init.h>
17#include <asm/iommu_table.h>
18
19static bool disable_dac_quirk __read_mostly;
20
21const struct dma_map_ops *dma_ops;
22EXPORT_SYMBOL(dma_ops);
23
24#ifdef CONFIG_IOMMU_DEBUG
25int panic_on_overflow __read_mostly = 1;
26int force_iommu __read_mostly = 1;
27#else
28int panic_on_overflow __read_mostly = 0;
29int force_iommu __read_mostly = 0;
30#endif
31
32int iommu_merge __read_mostly = 0;
33
34int no_iommu __read_mostly;
35/* Set this to 1 if there is a HW IOMMU in the system */
36int iommu_detected __read_mostly = 0;
37
38extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
39
40void __init pci_iommu_alloc(void)
41{
42 struct iommu_table_entry *p;
43
44 sort_iommu_table(__iommu_table, __iommu_table_end);
45 check_iommu_entries(__iommu_table, __iommu_table_end);
46
47 for (p = __iommu_table; p < __iommu_table_end; p++) {
48 if (p && p->detect && p->detect() > 0) {
49 p->flags |= IOMMU_DETECTED;
50 if (p->early_init)
51 p->early_init();
52 if (p->flags & IOMMU_FINISH_IF_DETECTED)
53 break;
54 }
55 }
56}
57
58/*
59 * See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel
60 * parameter documentation.
61 */
62static __init int iommu_setup(char *p)
63{
64 iommu_merge = 1;
65
66 if (!p)
67 return -EINVAL;
68
69 while (*p) {
70 if (!strncmp(p, "off", 3))
71 no_iommu = 1;
72 /* gart_parse_options has more force support */
73 if (!strncmp(p, "force", 5))
74 force_iommu = 1;
75 if (!strncmp(p, "noforce", 7)) {
76 iommu_merge = 0;
77 force_iommu = 0;
78 }
79
80 if (!strncmp(p, "biomerge", 8)) {
81 iommu_merge = 1;
82 force_iommu = 1;
83 }
84 if (!strncmp(p, "panic", 5))
85 panic_on_overflow = 1;
86 if (!strncmp(p, "nopanic", 7))
87 panic_on_overflow = 0;
88 if (!strncmp(p, "merge", 5)) {
89 iommu_merge = 1;
90 force_iommu = 1;
91 }
92 if (!strncmp(p, "nomerge", 7))
93 iommu_merge = 0;
94 if (!strncmp(p, "forcesac", 8))
95 pr_warn("forcesac option ignored.\n");
96 if (!strncmp(p, "allowdac", 8))
97 pr_warn("allowdac option ignored.\n");
98 if (!strncmp(p, "nodac", 5))
99 pr_warn("nodac option ignored.\n");
100 if (!strncmp(p, "usedac", 6)) {
101 disable_dac_quirk = true;
102 return 1;
103 }
104#ifdef CONFIG_SWIOTLB
105 if (!strncmp(p, "soft", 4))
106 swiotlb = 1;
107#endif
108 if (!strncmp(p, "pt", 2))
109 iommu_set_default_passthrough(true);
110 if (!strncmp(p, "nopt", 4))
111 iommu_set_default_translated(true);
112
113 gart_parse_options(p);
114
115#ifdef CONFIG_CALGARY_IOMMU
116 if (!strncmp(p, "calgary", 7))
117 use_calgary = 1;
118#endif /* CONFIG_CALGARY_IOMMU */
119
120 p += strcspn(p, ",");
121 if (*p == ',')
122 ++p;
123 }
124 return 0;
125}
126early_param("iommu", iommu_setup);
127
128static int __init pci_iommu_init(void)
129{
130 struct iommu_table_entry *p;
131
132 x86_init.iommu.iommu_init();
133
134 for (p = __iommu_table; p < __iommu_table_end; p++) {
135 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
136 p->late_init();
137 }
138
139 return 0;
140}
141/* Must execute after PCI subsystem */
142rootfs_initcall(pci_iommu_init);
143
144#ifdef CONFIG_PCI
145/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
146
147static int via_no_dac_cb(struct pci_dev *pdev, void *data)
148{
149 pdev->dev.bus_dma_mask = DMA_BIT_MASK(32);
150 return 0;
151}
152
153static void via_no_dac(struct pci_dev *dev)
154{
155 if (!disable_dac_quirk) {
156 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
157 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
158 }
159}
160DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
161 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
162#endif