Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1#ifndef _ASM_GENERIC_DMA_MAPPING_H
  2#define _ASM_GENERIC_DMA_MAPPING_H
  3
  4#include <linux/kmemcheck.h>
  5#include <linux/bug.h>
  6#include <linux/scatterlist.h>
  7#include <linux/dma-debug.h>
  8#include <linux/dma-attrs.h>
  9
 10static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
 11					      size_t size,
 12					      enum dma_data_direction dir,
 13					      struct dma_attrs *attrs)
 14{
 15	struct dma_map_ops *ops = get_dma_ops(dev);
 16	dma_addr_t addr;
 17
 18	kmemcheck_mark_initialized(ptr, size);
 19	BUG_ON(!valid_dma_direction(dir));
 20	addr = ops->map_page(dev, virt_to_page(ptr),
 21			     (unsigned long)ptr & ~PAGE_MASK, size,
 22			     dir, attrs);
 23	debug_dma_map_page(dev, virt_to_page(ptr),
 24			   (unsigned long)ptr & ~PAGE_MASK, size,
 25			   dir, addr, true);
 26	return addr;
 27}
 28
 29static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
 30					  size_t size,
 31					  enum dma_data_direction dir,
 32					  struct dma_attrs *attrs)
 33{
 34	struct dma_map_ops *ops = get_dma_ops(dev);
 35
 36	BUG_ON(!valid_dma_direction(dir));
 37	if (ops->unmap_page)
 38		ops->unmap_page(dev, addr, size, dir, attrs);
 39	debug_dma_unmap_page(dev, addr, size, dir, true);
 40}
 41
 42static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 43				   int nents, enum dma_data_direction dir,
 44				   struct dma_attrs *attrs)
 45{
 46	struct dma_map_ops *ops = get_dma_ops(dev);
 47	int i, ents;
 48	struct scatterlist *s;
 49
 50	for_each_sg(sg, s, nents, i)
 51		kmemcheck_mark_initialized(sg_virt(s), s->length);
 52	BUG_ON(!valid_dma_direction(dir));
 53	ents = ops->map_sg(dev, sg, nents, dir, attrs);
 54	debug_dma_map_sg(dev, sg, nents, ents, dir);
 55
 56	return ents;
 57}
 58
 59static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
 60				      int nents, enum dma_data_direction dir,
 61				      struct dma_attrs *attrs)
 62{
 63	struct dma_map_ops *ops = get_dma_ops(dev);
 64
 65	BUG_ON(!valid_dma_direction(dir));
 66	debug_dma_unmap_sg(dev, sg, nents, dir);
 67	if (ops->unmap_sg)
 68		ops->unmap_sg(dev, sg, nents, dir, attrs);
 69}
 70
 71static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 72				      size_t offset, size_t size,
 73				      enum dma_data_direction dir)
 74{
 75	struct dma_map_ops *ops = get_dma_ops(dev);
 76	dma_addr_t addr;
 77
 78	kmemcheck_mark_initialized(page_address(page) + offset, size);
 79	BUG_ON(!valid_dma_direction(dir));
 80	addr = ops->map_page(dev, page, offset, size, dir, NULL);
 81	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 82
 83	return addr;
 84}
 85
 86static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 87				  size_t size, enum dma_data_direction dir)
 88{
 89	struct dma_map_ops *ops = get_dma_ops(dev);
 90
 91	BUG_ON(!valid_dma_direction(dir));
 92	if (ops->unmap_page)
 93		ops->unmap_page(dev, addr, size, dir, NULL);
 94	debug_dma_unmap_page(dev, addr, size, dir, false);
 95}
 96
 97static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
 98					   size_t size,
 99					   enum dma_data_direction dir)
100{
101	struct dma_map_ops *ops = get_dma_ops(dev);
102
103	BUG_ON(!valid_dma_direction(dir));
104	if (ops->sync_single_for_cpu)
105		ops->sync_single_for_cpu(dev, addr, size, dir);
106	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
107}
108
109static inline void dma_sync_single_for_device(struct device *dev,
110					      dma_addr_t addr, size_t size,
111					      enum dma_data_direction dir)
112{
113	struct dma_map_ops *ops = get_dma_ops(dev);
114
115	BUG_ON(!valid_dma_direction(dir));
116	if (ops->sync_single_for_device)
117		ops->sync_single_for_device(dev, addr, size, dir);
118	debug_dma_sync_single_for_device(dev, addr, size, dir);
119}
120
121static inline void dma_sync_single_range_for_cpu(struct device *dev,
122						 dma_addr_t addr,
123						 unsigned long offset,
124						 size_t size,
125						 enum dma_data_direction dir)
126{
127	const struct dma_map_ops *ops = get_dma_ops(dev);
128
129	BUG_ON(!valid_dma_direction(dir));
130	if (ops->sync_single_for_cpu)
131		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
132	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
133}
134
135static inline void dma_sync_single_range_for_device(struct device *dev,
136						    dma_addr_t addr,
137						    unsigned long offset,
138						    size_t size,
139						    enum dma_data_direction dir)
140{
141	const struct dma_map_ops *ops = get_dma_ops(dev);
142
143	BUG_ON(!valid_dma_direction(dir));
144	if (ops->sync_single_for_device)
145		ops->sync_single_for_device(dev, addr + offset, size, dir);
146	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
147}
148
149static inline void
150dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
151		    int nelems, enum dma_data_direction dir)
152{
153	struct dma_map_ops *ops = get_dma_ops(dev);
154
155	BUG_ON(!valid_dma_direction(dir));
156	if (ops->sync_sg_for_cpu)
157		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
158	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
159}
160
161static inline void
162dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
163		       int nelems, enum dma_data_direction dir)
164{
165	struct dma_map_ops *ops = get_dma_ops(dev);
166
167	BUG_ON(!valid_dma_direction(dir));
168	if (ops->sync_sg_for_device)
169		ops->sync_sg_for_device(dev, sg, nelems, dir);
170	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
171
172}
173
174#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
175#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
176#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
177#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
178
179#endif