Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 2018 Christoph Hellwig.
  4 *
  5 * DMA operations that map physical memory directly without using an IOMMU.
  6 */
  7#ifndef _KERNEL_DMA_DIRECT_H
  8#define _KERNEL_DMA_DIRECT_H
  9
 10#include <linux/dma-direct.h>
 11#include <linux/memremap.h>
 12
 13int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
 14		void *cpu_addr, dma_addr_t dma_addr, size_t size,
 15		unsigned long attrs);
 16bool dma_direct_can_mmap(struct device *dev);
 17int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
 18		void *cpu_addr, dma_addr_t dma_addr, size_t size,
 19		unsigned long attrs);
 20bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
 21int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 22		enum dma_data_direction dir, unsigned long attrs);
 
 23size_t dma_direct_max_mapping_size(struct device *dev);
 24
 25#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
 26    defined(CONFIG_SWIOTLB)
 27void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
 28		int nents, enum dma_data_direction dir);
 29#else
 30static inline void dma_direct_sync_sg_for_device(struct device *dev,
 31		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
 32{
 33}
 34#endif
 35
 36#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
 37    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
 38    defined(CONFIG_SWIOTLB)
 39void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
 40		int nents, enum dma_data_direction dir, unsigned long attrs);
 41void dma_direct_sync_sg_for_cpu(struct device *dev,
 42		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
 43#else
 44static inline void dma_direct_unmap_sg(struct device *dev,
 45		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
 46		unsigned long attrs)
 47{
 48}
 49static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
 50		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
 51{
 52}
 53#endif
 54
 55static inline void dma_direct_sync_single_for_device(struct device *dev,
 56		dma_addr_t addr, size_t size, enum dma_data_direction dir)
 57{
 58	phys_addr_t paddr = dma_to_phys(dev, addr);
 59
 60	if (unlikely(is_swiotlb_buffer(dev, paddr)))
 61		swiotlb_sync_single_for_device(dev, paddr, size, dir);
 62
 63	if (!dev_is_dma_coherent(dev))
 64		arch_sync_dma_for_device(paddr, size, dir);
 65}
 66
 67static inline void dma_direct_sync_single_for_cpu(struct device *dev,
 68		dma_addr_t addr, size_t size, enum dma_data_direction dir)
 69{
 70	phys_addr_t paddr = dma_to_phys(dev, addr);
 71
 72	if (!dev_is_dma_coherent(dev)) {
 73		arch_sync_dma_for_cpu(paddr, size, dir);
 74		arch_sync_dma_for_cpu_all();
 75	}
 76
 77	if (unlikely(is_swiotlb_buffer(dev, paddr)))
 78		swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
 79
 80	if (dir == DMA_FROM_DEVICE)
 81		arch_dma_mark_clean(paddr, size);
 82}
 83
 84static inline dma_addr_t dma_direct_map_page(struct device *dev,
 85		struct page *page, unsigned long offset, size_t size,
 86		enum dma_data_direction dir, unsigned long attrs)
 87{
 88	phys_addr_t phys = page_to_phys(page) + offset;
 89	dma_addr_t dma_addr = phys_to_dma(dev, phys);
 90
 91	if (is_swiotlb_force_bounce(dev)) {
 92		if (is_pci_p2pdma_page(page))
 93			return DMA_MAPPING_ERROR;
 94		return swiotlb_map(dev, phys, size, dir, attrs);
 95	}
 96
 97	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
 
 98		if (is_pci_p2pdma_page(page))
 99			return DMA_MAPPING_ERROR;
100		if (is_swiotlb_active(dev))
101			return swiotlb_map(dev, phys, size, dir, attrs);
102
103		dev_WARN_ONCE(dev, 1,
104			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
105			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
106		return DMA_MAPPING_ERROR;
107	}
108
109	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
110		arch_sync_dma_for_device(phys, size, dir);
111	return dma_addr;
112}
113
114static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
115		size_t size, enum dma_data_direction dir, unsigned long attrs)
116{
117	phys_addr_t phys = dma_to_phys(dev, addr);
118
119	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
120		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
121
122	if (unlikely(is_swiotlb_buffer(dev, phys)))
123		swiotlb_tbl_unmap_single(dev, phys, size, dir,
124					 attrs | DMA_ATTR_SKIP_CPU_SYNC);
125}
126#endif /* _KERNEL_DMA_DIRECT_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 2018 Christoph Hellwig.
  4 *
  5 * DMA operations that map physical memory directly without using an IOMMU.
  6 */
  7#ifndef _KERNEL_DMA_DIRECT_H
  8#define _KERNEL_DMA_DIRECT_H
  9
 10#include <linux/dma-direct.h>
 11#include <linux/memremap.h>
 12
 13int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
 14		void *cpu_addr, dma_addr_t dma_addr, size_t size,
 15		unsigned long attrs);
 16bool dma_direct_can_mmap(struct device *dev);
 17int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
 18		void *cpu_addr, dma_addr_t dma_addr, size_t size,
 19		unsigned long attrs);
 20bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
 21int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 22		enum dma_data_direction dir, unsigned long attrs);
 23bool dma_direct_all_ram_mapped(struct device *dev);
 24size_t dma_direct_max_mapping_size(struct device *dev);
 25
 26#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
 27    defined(CONFIG_SWIOTLB)
 28void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
 29		int nents, enum dma_data_direction dir);
 30#else
 31static inline void dma_direct_sync_sg_for_device(struct device *dev,
 32		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
 33{
 34}
 35#endif
 36
 37#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
 38    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
 39    defined(CONFIG_SWIOTLB)
 40void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
 41		int nents, enum dma_data_direction dir, unsigned long attrs);
 42void dma_direct_sync_sg_for_cpu(struct device *dev,
 43		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
 44#else
 45static inline void dma_direct_unmap_sg(struct device *dev,
 46		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
 47		unsigned long attrs)
 48{
 49}
 50static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
 51		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
 52{
 53}
 54#endif
 55
 56static inline void dma_direct_sync_single_for_device(struct device *dev,
 57		dma_addr_t addr, size_t size, enum dma_data_direction dir)
 58{
 59	phys_addr_t paddr = dma_to_phys(dev, addr);
 60
 61	if (unlikely(is_swiotlb_buffer(dev, paddr)))
 62		swiotlb_sync_single_for_device(dev, paddr, size, dir);
 63
 64	if (!dev_is_dma_coherent(dev))
 65		arch_sync_dma_for_device(paddr, size, dir);
 66}
 67
 68static inline void dma_direct_sync_single_for_cpu(struct device *dev,
 69		dma_addr_t addr, size_t size, enum dma_data_direction dir)
 70{
 71	phys_addr_t paddr = dma_to_phys(dev, addr);
 72
 73	if (!dev_is_dma_coherent(dev)) {
 74		arch_sync_dma_for_cpu(paddr, size, dir);
 75		arch_sync_dma_for_cpu_all();
 76	}
 77
 78	if (unlikely(is_swiotlb_buffer(dev, paddr)))
 79		swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
 80
 81	if (dir == DMA_FROM_DEVICE)
 82		arch_dma_mark_clean(paddr, size);
 83}
 84
 85static inline dma_addr_t dma_direct_map_page(struct device *dev,
 86		struct page *page, unsigned long offset, size_t size,
 87		enum dma_data_direction dir, unsigned long attrs)
 88{
 89	phys_addr_t phys = page_to_phys(page) + offset;
 90	dma_addr_t dma_addr = phys_to_dma(dev, phys);
 91
 92	if (is_swiotlb_force_bounce(dev)) {
 93		if (is_pci_p2pdma_page(page))
 94			return DMA_MAPPING_ERROR;
 95		return swiotlb_map(dev, phys, size, dir, attrs);
 96	}
 97
 98	if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
 99	    dma_kmalloc_needs_bounce(dev, size, dir)) {
100		if (is_pci_p2pdma_page(page))
101			return DMA_MAPPING_ERROR;
102		if (is_swiotlb_active(dev))
103			return swiotlb_map(dev, phys, size, dir, attrs);
104
105		dev_WARN_ONCE(dev, 1,
106			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
107			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
108		return DMA_MAPPING_ERROR;
109	}
110
111	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
112		arch_sync_dma_for_device(phys, size, dir);
113	return dma_addr;
114}
115
116static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
117		size_t size, enum dma_data_direction dir, unsigned long attrs)
118{
119	phys_addr_t phys = dma_to_phys(dev, addr);
120
121	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
122		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
123
124	if (unlikely(is_swiotlb_buffer(dev, phys)))
125		swiotlb_tbl_unmap_single(dev, phys, size, dir,
126					 attrs | DMA_ATTR_SKIP_CPU_SYNC);
127}
128#endif /* _KERNEL_DMA_DIRECT_H */