Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v3.1
  1#ifndef _ASM_GENERIC_DMA_MAPPING_H
  2#define _ASM_GENERIC_DMA_MAPPING_H
  3
  4#include <linux/kmemcheck.h>
 
  5#include <linux/scatterlist.h>
  6#include <linux/dma-debug.h>
  7#include <linux/dma-attrs.h>
  8
  9static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
 10					      size_t size,
 11					      enum dma_data_direction dir,
 12					      struct dma_attrs *attrs)
 13{
 14	struct dma_map_ops *ops = get_dma_ops(dev);
 15	dma_addr_t addr;
 16
 17	kmemcheck_mark_initialized(ptr, size);
 18	BUG_ON(!valid_dma_direction(dir));
 19	addr = ops->map_page(dev, virt_to_page(ptr),
 20			     (unsigned long)ptr & ~PAGE_MASK, size,
 21			     dir, attrs);
 22	debug_dma_map_page(dev, virt_to_page(ptr),
 23			   (unsigned long)ptr & ~PAGE_MASK, size,
 24			   dir, addr, true);
 25	return addr;
 26}
 27
 28static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
 29					  size_t size,
 30					  enum dma_data_direction dir,
 31					  struct dma_attrs *attrs)
 32{
 33	struct dma_map_ops *ops = get_dma_ops(dev);
 34
 35	BUG_ON(!valid_dma_direction(dir));
 36	if (ops->unmap_page)
 37		ops->unmap_page(dev, addr, size, dir, attrs);
 38	debug_dma_unmap_page(dev, addr, size, dir, true);
 39}
 40
 41static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 42				   int nents, enum dma_data_direction dir,
 43				   struct dma_attrs *attrs)
 44{
 45	struct dma_map_ops *ops = get_dma_ops(dev);
 46	int i, ents;
 47	struct scatterlist *s;
 48
 49	for_each_sg(sg, s, nents, i)
 50		kmemcheck_mark_initialized(sg_virt(s), s->length);
 51	BUG_ON(!valid_dma_direction(dir));
 52	ents = ops->map_sg(dev, sg, nents, dir, attrs);
 53	debug_dma_map_sg(dev, sg, nents, ents, dir);
 54
 55	return ents;
 56}
 57
 58static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
 59				      int nents, enum dma_data_direction dir,
 60				      struct dma_attrs *attrs)
 61{
 62	struct dma_map_ops *ops = get_dma_ops(dev);
 63
 64	BUG_ON(!valid_dma_direction(dir));
 65	debug_dma_unmap_sg(dev, sg, nents, dir);
 66	if (ops->unmap_sg)
 67		ops->unmap_sg(dev, sg, nents, dir, attrs);
 68}
 69
 70static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 71				      size_t offset, size_t size,
 72				      enum dma_data_direction dir)
 73{
 74	struct dma_map_ops *ops = get_dma_ops(dev);
 75	dma_addr_t addr;
 76
 77	kmemcheck_mark_initialized(page_address(page) + offset, size);
 78	BUG_ON(!valid_dma_direction(dir));
 79	addr = ops->map_page(dev, page, offset, size, dir, NULL);
 80	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 81
 82	return addr;
 83}
 84
 85static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 86				  size_t size, enum dma_data_direction dir)
 87{
 88	struct dma_map_ops *ops = get_dma_ops(dev);
 89
 90	BUG_ON(!valid_dma_direction(dir));
 91	if (ops->unmap_page)
 92		ops->unmap_page(dev, addr, size, dir, NULL);
 93	debug_dma_unmap_page(dev, addr, size, dir, false);
 94}
 95
 96static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
 97					   size_t size,
 98					   enum dma_data_direction dir)
 99{
100	struct dma_map_ops *ops = get_dma_ops(dev);
101
102	BUG_ON(!valid_dma_direction(dir));
103	if (ops->sync_single_for_cpu)
104		ops->sync_single_for_cpu(dev, addr, size, dir);
105	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
106}
107
108static inline void dma_sync_single_for_device(struct device *dev,
109					      dma_addr_t addr, size_t size,
110					      enum dma_data_direction dir)
111{
112	struct dma_map_ops *ops = get_dma_ops(dev);
113
114	BUG_ON(!valid_dma_direction(dir));
115	if (ops->sync_single_for_device)
116		ops->sync_single_for_device(dev, addr, size, dir);
117	debug_dma_sync_single_for_device(dev, addr, size, dir);
118}
119
120static inline void dma_sync_single_range_for_cpu(struct device *dev,
121						 dma_addr_t addr,
122						 unsigned long offset,
123						 size_t size,
124						 enum dma_data_direction dir)
125{
126	dma_sync_single_for_cpu(dev, addr + offset, size, dir);
 
 
 
 
 
127}
128
129static inline void dma_sync_single_range_for_device(struct device *dev,
130						    dma_addr_t addr,
131						    unsigned long offset,
132						    size_t size,
133						    enum dma_data_direction dir)
134{
135	dma_sync_single_for_device(dev, addr + offset, size, dir);
 
 
 
 
 
136}
137
138static inline void
139dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
140		    int nelems, enum dma_data_direction dir)
141{
142	struct dma_map_ops *ops = get_dma_ops(dev);
143
144	BUG_ON(!valid_dma_direction(dir));
145	if (ops->sync_sg_for_cpu)
146		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
147	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
148}
149
150static inline void
151dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
152		       int nelems, enum dma_data_direction dir)
153{
154	struct dma_map_ops *ops = get_dma_ops(dev);
155
156	BUG_ON(!valid_dma_direction(dir));
157	if (ops->sync_sg_for_device)
158		ops->sync_sg_for_device(dev, sg, nelems, dir);
159	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
160
161}
162
163#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
164#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
165#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
166#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
167
168#endif
v3.5.6
  1#ifndef _ASM_GENERIC_DMA_MAPPING_H
  2#define _ASM_GENERIC_DMA_MAPPING_H
  3
  4#include <linux/kmemcheck.h>
  5#include <linux/bug.h>
  6#include <linux/scatterlist.h>
  7#include <linux/dma-debug.h>
  8#include <linux/dma-attrs.h>
  9
 10static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
 11					      size_t size,
 12					      enum dma_data_direction dir,
 13					      struct dma_attrs *attrs)
 14{
 15	struct dma_map_ops *ops = get_dma_ops(dev);
 16	dma_addr_t addr;
 17
 18	kmemcheck_mark_initialized(ptr, size);
 19	BUG_ON(!valid_dma_direction(dir));
 20	addr = ops->map_page(dev, virt_to_page(ptr),
 21			     (unsigned long)ptr & ~PAGE_MASK, size,
 22			     dir, attrs);
 23	debug_dma_map_page(dev, virt_to_page(ptr),
 24			   (unsigned long)ptr & ~PAGE_MASK, size,
 25			   dir, addr, true);
 26	return addr;
 27}
 28
 29static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
 30					  size_t size,
 31					  enum dma_data_direction dir,
 32					  struct dma_attrs *attrs)
 33{
 34	struct dma_map_ops *ops = get_dma_ops(dev);
 35
 36	BUG_ON(!valid_dma_direction(dir));
 37	if (ops->unmap_page)
 38		ops->unmap_page(dev, addr, size, dir, attrs);
 39	debug_dma_unmap_page(dev, addr, size, dir, true);
 40}
 41
 42static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 43				   int nents, enum dma_data_direction dir,
 44				   struct dma_attrs *attrs)
 45{
 46	struct dma_map_ops *ops = get_dma_ops(dev);
 47	int i, ents;
 48	struct scatterlist *s;
 49
 50	for_each_sg(sg, s, nents, i)
 51		kmemcheck_mark_initialized(sg_virt(s), s->length);
 52	BUG_ON(!valid_dma_direction(dir));
 53	ents = ops->map_sg(dev, sg, nents, dir, attrs);
 54	debug_dma_map_sg(dev, sg, nents, ents, dir);
 55
 56	return ents;
 57}
 58
 59static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
 60				      int nents, enum dma_data_direction dir,
 61				      struct dma_attrs *attrs)
 62{
 63	struct dma_map_ops *ops = get_dma_ops(dev);
 64
 65	BUG_ON(!valid_dma_direction(dir));
 66	debug_dma_unmap_sg(dev, sg, nents, dir);
 67	if (ops->unmap_sg)
 68		ops->unmap_sg(dev, sg, nents, dir, attrs);
 69}
 70
 71static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 72				      size_t offset, size_t size,
 73				      enum dma_data_direction dir)
 74{
 75	struct dma_map_ops *ops = get_dma_ops(dev);
 76	dma_addr_t addr;
 77
 78	kmemcheck_mark_initialized(page_address(page) + offset, size);
 79	BUG_ON(!valid_dma_direction(dir));
 80	addr = ops->map_page(dev, page, offset, size, dir, NULL);
 81	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 82
 83	return addr;
 84}
 85
 86static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 87				  size_t size, enum dma_data_direction dir)
 88{
 89	struct dma_map_ops *ops = get_dma_ops(dev);
 90
 91	BUG_ON(!valid_dma_direction(dir));
 92	if (ops->unmap_page)
 93		ops->unmap_page(dev, addr, size, dir, NULL);
 94	debug_dma_unmap_page(dev, addr, size, dir, false);
 95}
 96
 97static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
 98					   size_t size,
 99					   enum dma_data_direction dir)
100{
101	struct dma_map_ops *ops = get_dma_ops(dev);
102
103	BUG_ON(!valid_dma_direction(dir));
104	if (ops->sync_single_for_cpu)
105		ops->sync_single_for_cpu(dev, addr, size, dir);
106	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
107}
108
109static inline void dma_sync_single_for_device(struct device *dev,
110					      dma_addr_t addr, size_t size,
111					      enum dma_data_direction dir)
112{
113	struct dma_map_ops *ops = get_dma_ops(dev);
114
115	BUG_ON(!valid_dma_direction(dir));
116	if (ops->sync_single_for_device)
117		ops->sync_single_for_device(dev, addr, size, dir);
118	debug_dma_sync_single_for_device(dev, addr, size, dir);
119}
120
121static inline void dma_sync_single_range_for_cpu(struct device *dev,
122						 dma_addr_t addr,
123						 unsigned long offset,
124						 size_t size,
125						 enum dma_data_direction dir)
126{
127	const struct dma_map_ops *ops = get_dma_ops(dev);
128
129	BUG_ON(!valid_dma_direction(dir));
130	if (ops->sync_single_for_cpu)
131		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
132	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
133}
134
135static inline void dma_sync_single_range_for_device(struct device *dev,
136						    dma_addr_t addr,
137						    unsigned long offset,
138						    size_t size,
139						    enum dma_data_direction dir)
140{
141	const struct dma_map_ops *ops = get_dma_ops(dev);
142
143	BUG_ON(!valid_dma_direction(dir));
144	if (ops->sync_single_for_device)
145		ops->sync_single_for_device(dev, addr + offset, size, dir);
146	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
147}
148
149static inline void
150dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
151		    int nelems, enum dma_data_direction dir)
152{
153	struct dma_map_ops *ops = get_dma_ops(dev);
154
155	BUG_ON(!valid_dma_direction(dir));
156	if (ops->sync_sg_for_cpu)
157		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
158	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
159}
160
161static inline void
162dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
163		       int nelems, enum dma_data_direction dir)
164{
165	struct dma_map_ops *ops = get_dma_ops(dev);
166
167	BUG_ON(!valid_dma_direction(dir));
168	if (ops->sync_sg_for_device)
169		ops->sync_sg_for_device(dev, sg, nelems, dir);
170	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
171
172}
173
174#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
175#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
176#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
177#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
178
179#endif