Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Dynamic DMA mapping support.
  3 */
  4
  5#include <linux/types.h>
  6#include <linux/mm.h>
  7#include <linux/string.h>
  8#include <linux/pci.h>
  9#include <linux/module.h>
 10#include <linux/dmar.h>
 11#include <asm/iommu.h>
 12#include <asm/machvec.h>
 13#include <linux/dma-mapping.h>
 14
 15#include <asm/system.h>
 16
 17#ifdef CONFIG_DMAR
 18
 19#include <linux/kernel.h>
 20
 21#include <asm/page.h>
 22
 23dma_addr_t bad_dma_address __read_mostly;
 24EXPORT_SYMBOL(bad_dma_address);
 25
 26static int iommu_sac_force __read_mostly;
 27
 28int no_iommu __read_mostly;
 29#ifdef CONFIG_IOMMU_DEBUG
 30int force_iommu __read_mostly = 1;
 31#else
 32int force_iommu __read_mostly;
 33#endif
 34
 35int iommu_pass_through;
 36
 37/* Dummy device used for NULL arguments (normally ISA). Better would
 38   be probably a smaller DMA mask, but this is bug-to-bug compatible
 39   to i386. */
 40struct device fallback_dev = {
 41	.init_name = "fallback device",
 42	.coherent_dma_mask = DMA_BIT_MASK(32),
 43	.dma_mask = &fallback_dev.coherent_dma_mask,
 44};
 45
 46extern struct dma_map_ops intel_dma_ops;
 47
 48static int __init pci_iommu_init(void)
 49{
 50	if (iommu_detected)
 51		intel_iommu_init();
 52
 53	return 0;
 54}
 55
 56/* Must execute after PCI subsystem */
 57fs_initcall(pci_iommu_init);
 58
 59void pci_iommu_shutdown(void)
 60{
 61	return;
 62}
 63
 64void __init
 65iommu_dma_init(void)
 66{
 67	return;
 68}
 69
 70int iommu_dma_supported(struct device *dev, u64 mask)
 71{
 72	/* Copied from i386. Doesn't make much sense, because it will
 73	   only work for pci_alloc_coherent.
 74	   The caller just has to use GFP_DMA in this case. */
 75	if (mask < DMA_BIT_MASK(24))
 76		return 0;
 77
 78	/* Tell the device to use SAC when IOMMU force is on.  This
 79	   allows the driver to use cheaper accesses in some cases.
 80
 81	   Problem with this is that if we overflow the IOMMU area and
 82	   return DAC as fallback address the device may not handle it
 83	   correctly.
 84
 85	   As a special case some controllers have a 39bit address
 86	   mode that is as efficient as 32bit (aic79xx). Don't force
 87	   SAC for these.  Assume all masks <= 40 bits are of this
 88	   type. Normally this doesn't make any difference, but gives
 89	   more gentle handling of IOMMU overflow. */
 90	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
 91		dev_info(dev, "Force SAC with mask %llx\n", mask);
 92		return 0;
 93	}
 94
 95	return 1;
 96}
 97EXPORT_SYMBOL(iommu_dma_supported);
 98
 99void __init pci_iommu_alloc(void)
100{
101	dma_ops = &intel_dma_ops;
102
103	dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
104	dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
105	dma_ops->sync_single_for_device = machvec_dma_sync_single;
106	dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
107	dma_ops->dma_supported = iommu_dma_supported;
108
109	/*
110	 * The order of these functions is important for
111	 * fall-back/fail-over reasons
112	 */
113	detect_intel_iommu();
114
115#ifdef CONFIG_SWIOTLB
116	pci_swiotlb_init();
117#endif
118}
119
120#endif
v4.6
  1/*
  2 * Dynamic DMA mapping support.
  3 */
  4
  5#include <linux/types.h>
  6#include <linux/mm.h>
  7#include <linux/string.h>
  8#include <linux/pci.h>
  9#include <linux/module.h>
 10#include <linux/dmar.h>
 11#include <asm/iommu.h>
 12#include <asm/machvec.h>
 13#include <linux/dma-mapping.h>
 14
 
 15
 16#ifdef CONFIG_INTEL_IOMMU
 17
 18#include <linux/kernel.h>
 19
 20#include <asm/page.h>
 21
 22dma_addr_t bad_dma_address __read_mostly;
 23EXPORT_SYMBOL(bad_dma_address);
 24
 25static int iommu_sac_force __read_mostly;
 26
 27int no_iommu __read_mostly;
 28#ifdef CONFIG_IOMMU_DEBUG
 29int force_iommu __read_mostly = 1;
 30#else
 31int force_iommu __read_mostly;
 32#endif
 33
 34int iommu_pass_through;
 
 
 
 
 
 
 
 
 
 35
 36extern struct dma_map_ops intel_dma_ops;
 37
 38static int __init pci_iommu_init(void)
 39{
 40	if (iommu_detected)
 41		intel_iommu_init();
 42
 43	return 0;
 44}
 45
 46/* Must execute after PCI subsystem */
 47fs_initcall(pci_iommu_init);
 48
 49void pci_iommu_shutdown(void)
 50{
 51	return;
 52}
 53
 54void __init
 55iommu_dma_init(void)
 56{
 57	return;
 58}
 59
 60int iommu_dma_supported(struct device *dev, u64 mask)
 61{
 62	/* Copied from i386. Doesn't make much sense, because it will
 63	   only work for pci_alloc_coherent.
 64	   The caller just has to use GFP_DMA in this case. */
 65	if (mask < DMA_BIT_MASK(24))
 66		return 0;
 67
 68	/* Tell the device to use SAC when IOMMU force is on.  This
 69	   allows the driver to use cheaper accesses in some cases.
 70
 71	   Problem with this is that if we overflow the IOMMU area and
 72	   return DAC as fallback address the device may not handle it
 73	   correctly.
 74
 75	   As a special case some controllers have a 39bit address
 76	   mode that is as efficient as 32bit (aic79xx). Don't force
 77	   SAC for these.  Assume all masks <= 40 bits are of this
 78	   type. Normally this doesn't make any difference, but gives
 79	   more gentle handling of IOMMU overflow. */
 80	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
 81		dev_info(dev, "Force SAC with mask %llx\n", mask);
 82		return 0;
 83	}
 84
 85	return 1;
 86}
 87EXPORT_SYMBOL(iommu_dma_supported);
 88
 89void __init pci_iommu_alloc(void)
 90{
 91	dma_ops = &intel_dma_ops;
 92
 93	dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
 94	dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
 95	dma_ops->sync_single_for_device = machvec_dma_sync_single;
 96	dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
 97	dma_ops->dma_supported = iommu_dma_supported;
 98
 99	/*
100	 * The order of these functions is important for
101	 * fall-back/fail-over reasons
102	 */
103	detect_intel_iommu();
104
105#ifdef CONFIG_SWIOTLB
106	pci_swiotlb_init();
107#endif
108}
109
110#endif