Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
 
  2#include <linux/dma-direct.h>
  3#include <linux/dma-debug.h>
  4#include <linux/iommu.h>
  5#include <linux/dmar.h>
  6#include <linux/export.h>
  7#include <linux/memblock.h>
  8#include <linux/gfp.h>
  9#include <linux/pci.h>
 
 10
 11#include <asm/proto.h>
 12#include <asm/dma.h>
 13#include <asm/iommu.h>
 14#include <asm/gart.h>
 15#include <asm/calgary.h>
 16#include <asm/x86_init.h>
 17#include <asm/iommu_table.h>
 
 
 18
 19static bool disable_dac_quirk __read_mostly;
 20
 21const struct dma_map_ops *dma_ops;
 22EXPORT_SYMBOL(dma_ops);
 23
 24#ifdef CONFIG_IOMMU_DEBUG
 25int panic_on_overflow __read_mostly = 1;
 26int force_iommu __read_mostly = 1;
 27#else
 28int panic_on_overflow __read_mostly = 0;
 29int force_iommu __read_mostly = 0;
 30#endif
 31
 32int iommu_merge __read_mostly = 0;
 33
 34int no_iommu __read_mostly;
 35/* Set this to 1 if there is a HW IOMMU in the system */
 36int iommu_detected __read_mostly = 0;
 37
 38extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
 
 
 39
 40void __init pci_iommu_alloc(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41{
 42	struct iommu_table_entry *p;
 
 
 43
 44	sort_iommu_table(__iommu_table, __iommu_table_end);
 45	check_iommu_entries(__iommu_table, __iommu_table_end);
 
 
 
 
 
 
 
 
 
 
 46
 47	for (p = __iommu_table; p < __iommu_table_end; p++) {
 48		if (p && p->detect && p->detect() > 0) {
 49			p->flags |= IOMMU_DETECTED;
 50			if (p->early_init)
 51				p->early_init();
 52			if (p->flags & IOMMU_FINISH_IF_DETECTED)
 53				break;
 54		}
 
 
 
 55	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56}
 57
 58/*
 59 * See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel
 60 * parameter documentation.
 61 */
 62static __init int iommu_setup(char *p)
 63{
 64	iommu_merge = 1;
 65
 66	if (!p)
 67		return -EINVAL;
 68
 69	while (*p) {
 70		if (!strncmp(p, "off", 3))
 71			no_iommu = 1;
 72		/* gart_parse_options has more force support */
 73		if (!strncmp(p, "force", 5))
 74			force_iommu = 1;
 75		if (!strncmp(p, "noforce", 7)) {
 76			iommu_merge = 0;
 77			force_iommu = 0;
 78		}
 79
 80		if (!strncmp(p, "biomerge", 8)) {
 81			iommu_merge = 1;
 82			force_iommu = 1;
 83		}
 84		if (!strncmp(p, "panic", 5))
 85			panic_on_overflow = 1;
 86		if (!strncmp(p, "nopanic", 7))
 87			panic_on_overflow = 0;
 88		if (!strncmp(p, "merge", 5)) {
 89			iommu_merge = 1;
 90			force_iommu = 1;
 91		}
 92		if (!strncmp(p, "nomerge", 7))
 93			iommu_merge = 0;
 94		if (!strncmp(p, "forcesac", 8))
 95			pr_warn("forcesac option ignored.\n");
 96		if (!strncmp(p, "allowdac", 8))
 97			pr_warn("allowdac option ignored.\n");
 98		if (!strncmp(p, "nodac", 5))
 99			pr_warn("nodac option ignored.\n");
100		if (!strncmp(p, "usedac", 6)) {
101			disable_dac_quirk = true;
102			return 1;
103		}
104#ifdef CONFIG_SWIOTLB
105		if (!strncmp(p, "soft", 4))
106			swiotlb = 1;
107#endif
108		if (!strncmp(p, "pt", 2))
109			iommu_set_default_passthrough(true);
110		if (!strncmp(p, "nopt", 4))
111			iommu_set_default_translated(true);
112
113		gart_parse_options(p);
114
115#ifdef CONFIG_CALGARY_IOMMU
116		if (!strncmp(p, "calgary", 7))
117			use_calgary = 1;
118#endif /* CONFIG_CALGARY_IOMMU */
119
120		p += strcspn(p, ",");
121		if (*p == ',')
122			++p;
123	}
124	return 0;
125}
126early_param("iommu", iommu_setup);
127
128static int __init pci_iommu_init(void)
129{
130	struct iommu_table_entry *p;
131
132	x86_init.iommu.iommu_init();
133
134	for (p = __iommu_table; p < __iommu_table_end; p++) {
135		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
136			p->late_init();
 
 
 
 
137	}
 
138
139	return 0;
140}
141/* Must execute after PCI subsystem */
142rootfs_initcall(pci_iommu_init);
143
144#ifdef CONFIG_PCI
145/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
146
147static int via_no_dac_cb(struct pci_dev *pdev, void *data)
148{
149	pdev->dev.bus_dma_mask = DMA_BIT_MASK(32);
150	return 0;
151}
152
153static void via_no_dac(struct pci_dev *dev)
154{
155	if (!disable_dac_quirk) {
156		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
157		pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
158	}
159}
160DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
161				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
162#endif
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/dma-map-ops.h>
  3#include <linux/dma-direct.h>
 
  4#include <linux/iommu.h>
  5#include <linux/dmar.h>
  6#include <linux/export.h>
  7#include <linux/memblock.h>
  8#include <linux/gfp.h>
  9#include <linux/pci.h>
 10#include <linux/amd-iommu.h>
 11
 12#include <asm/proto.h>
 13#include <asm/dma.h>
 14#include <asm/iommu.h>
 15#include <asm/gart.h>
 
 16#include <asm/x86_init.h>
 17
 18#include <xen/xen.h>
 19#include <xen/swiotlb-xen.h>
 20
 21static bool disable_dac_quirk __read_mostly;
 22
 23const struct dma_map_ops *dma_ops;
 24EXPORT_SYMBOL(dma_ops);
 25
 26#ifdef CONFIG_IOMMU_DEBUG
 27int panic_on_overflow __read_mostly = 1;
 28int force_iommu __read_mostly = 1;
 29#else
 30int panic_on_overflow __read_mostly = 0;
 31int force_iommu __read_mostly = 0;
 32#endif
 33
 34int iommu_merge __read_mostly = 0;
 35
 36int no_iommu __read_mostly;
 37/* Set this to 1 if there is a HW IOMMU in the system */
 38int iommu_detected __read_mostly = 0;
 39
 40#ifdef CONFIG_SWIOTLB
 41bool x86_swiotlb_enable;
 42static unsigned int x86_swiotlb_flags;
 43
 44static void __init pci_swiotlb_detect(void)
 45{
 46	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
 47	if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
 48		x86_swiotlb_enable = true;
 49
 50	/*
 51	 * Set swiotlb to 1 so that bounce buffers are allocated and used for
 52	 * devices that can't support DMA to encrypted memory.
 53	 */
 54	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
 55		x86_swiotlb_enable = true;
 56
 57	/*
 58	 * Guest with guest memory encryption currently perform all DMA through
 59	 * bounce buffers as the hypervisor can't access arbitrary VM memory
 60	 * that is not explicitly shared with it.
 61	 */
 62	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
 63		x86_swiotlb_enable = true;
 64		x86_swiotlb_flags |= SWIOTLB_FORCE;
 65	}
 66}
 67#else
 68static inline void __init pci_swiotlb_detect(void)
 69{
 70}
 71#define x86_swiotlb_flags 0
 72#endif /* CONFIG_SWIOTLB */
 73
 74#ifdef CONFIG_SWIOTLB_XEN
 75static void __init pci_xen_swiotlb_init(void)
 76{
 77	if (!xen_initial_domain() && !x86_swiotlb_enable)
 78		return;
 79	x86_swiotlb_enable = true;
 80	x86_swiotlb_flags |= SWIOTLB_ANY;
 81	swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
 82	dma_ops = &xen_swiotlb_dma_ops;
 83	if (IS_ENABLED(CONFIG_PCI))
 84		pci_request_acs();
 85}
 86
 87int pci_xen_swiotlb_init_late(void)
 88{
 89	if (dma_ops == &xen_swiotlb_dma_ops)
 90		return 0;
 91
 92	/* we can work with the default swiotlb */
 93	if (!io_tlb_default_mem.nslabs) {
 94		int rc = swiotlb_init_late(swiotlb_size_or_default(),
 95					   GFP_KERNEL, xen_swiotlb_fixup);
 96		if (rc < 0)
 97			return rc;
 98	}
 99
100	/* XXX: this switches the dma ops under live devices! */
101	dma_ops = &xen_swiotlb_dma_ops;
102	if (IS_ENABLED(CONFIG_PCI))
103		pci_request_acs();
104	return 0;
105}
106EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
107#else
108static inline void __init pci_xen_swiotlb_init(void)
109{
110}
111#endif /* CONFIG_SWIOTLB_XEN */
112
113void __init pci_iommu_alloc(void)
114{
115	if (xen_pv_domain()) {
116		pci_xen_swiotlb_init();
117		return;
118	}
119	pci_swiotlb_detect();
120	gart_iommu_hole_init();
121	amd_iommu_detect();
122	detect_intel_iommu();
123	swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
124}
125
126/*
127 * See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel
128 * parameter documentation.
129 */
130static __init int iommu_setup(char *p)
131{
132	iommu_merge = 1;
133
134	if (!p)
135		return -EINVAL;
136
137	while (*p) {
138		if (!strncmp(p, "off", 3))
139			no_iommu = 1;
140		/* gart_parse_options has more force support */
141		if (!strncmp(p, "force", 5))
142			force_iommu = 1;
143		if (!strncmp(p, "noforce", 7)) {
144			iommu_merge = 0;
145			force_iommu = 0;
146		}
147
148		if (!strncmp(p, "biomerge", 8)) {
149			iommu_merge = 1;
150			force_iommu = 1;
151		}
152		if (!strncmp(p, "panic", 5))
153			panic_on_overflow = 1;
154		if (!strncmp(p, "nopanic", 7))
155			panic_on_overflow = 0;
156		if (!strncmp(p, "merge", 5)) {
157			iommu_merge = 1;
158			force_iommu = 1;
159		}
160		if (!strncmp(p, "nomerge", 7))
161			iommu_merge = 0;
162		if (!strncmp(p, "forcesac", 8))
163			pr_warn("forcesac option ignored.\n");
164		if (!strncmp(p, "allowdac", 8))
165			pr_warn("allowdac option ignored.\n");
166		if (!strncmp(p, "nodac", 5))
167			pr_warn("nodac option ignored.\n");
168		if (!strncmp(p, "usedac", 6)) {
169			disable_dac_quirk = true;
170			return 1;
171		}
172#ifdef CONFIG_SWIOTLB
173		if (!strncmp(p, "soft", 4))
174			x86_swiotlb_enable = true;
175#endif
176		if (!strncmp(p, "pt", 2))
177			iommu_set_default_passthrough(true);
178		if (!strncmp(p, "nopt", 4))
179			iommu_set_default_translated(true);
180
181		gart_parse_options(p);
182
 
 
 
 
 
183		p += strcspn(p, ",");
184		if (*p == ',')
185			++p;
186	}
187	return 0;
188}
189early_param("iommu", iommu_setup);
190
191static int __init pci_iommu_init(void)
192{
 
 
193	x86_init.iommu.iommu_init();
194
195#ifdef CONFIG_SWIOTLB
196	/* An IOMMU turned us off. */
197	if (x86_swiotlb_enable) {
198		pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
199		swiotlb_print_info();
200	} else {
201		swiotlb_exit();
202	}
203#endif
204
205	return 0;
206}
207/* Must execute after PCI subsystem */
208rootfs_initcall(pci_iommu_init);
209
210#ifdef CONFIG_PCI
211/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
212
213static int via_no_dac_cb(struct pci_dev *pdev, void *data)
214{
215	pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
216	return 0;
217}
218
219static void via_no_dac(struct pci_dev *dev)
220{
221	if (!disable_dac_quirk) {
222		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
223		pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
224	}
225}
226DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
227				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
228#endif