Loading...
1#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-attrs.h>
9#include <linux/dma-debug.h>
10
11#include <asm-generic/dma-coherent.h>
12#include <asm/memory.h>
13
14#include <xen/xen.h>
15#include <asm/xen/hypervisor.h>
16
17#define DMA_ERROR_CODE (~0)
18extern struct dma_map_ops arm_dma_ops;
19extern struct dma_map_ops arm_coherent_dma_ops;
20
21static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
22{
23 if (dev && dev->archdata.dma_ops)
24 return dev->archdata.dma_ops;
25 return &arm_dma_ops;
26}
27
28static inline struct dma_map_ops *get_dma_ops(struct device *dev)
29{
30 if (xen_initial_domain())
31 return xen_dma_ops;
32 else
33 return __generic_dma_ops(dev);
34}
35
36static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
37{
38 BUG_ON(!dev);
39 dev->archdata.dma_ops = ops;
40}
41
42#include <asm-generic/dma-mapping-common.h>
43
44static inline int dma_set_mask(struct device *dev, u64 mask)
45{
46 return get_dma_ops(dev)->set_dma_mask(dev, mask);
47}
48
49#ifdef __arch_page_to_dma
50#error Please update to __arch_pfn_to_dma
51#endif
52
53/*
54 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
55 * functions used internally by the DMA-mapping API to provide DMA
56 * addresses. They must not be used by drivers.
57 */
58#ifndef __arch_pfn_to_dma
59static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
60{
61 return (dma_addr_t)__pfn_to_bus(pfn);
62}
63
64static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
65{
66 return __bus_to_pfn(addr);
67}
68
69static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
70{
71 return (void *)__bus_to_virt((unsigned long)addr);
72}
73
74static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
75{
76 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
77}
78
79#else
80static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
81{
82 return __arch_pfn_to_dma(dev, pfn);
83}
84
85static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
86{
87 return __arch_dma_to_pfn(dev, addr);
88}
89
90static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
91{
92 return __arch_dma_to_virt(dev, addr);
93}
94
95static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
96{
97 return __arch_virt_to_dma(dev, addr);
98}
99#endif
100
101/* The ARM override for dma_max_pfn() */
102static inline unsigned long dma_max_pfn(struct device *dev)
103{
104 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
105}
106#define dma_max_pfn(dev) dma_max_pfn(dev)
107
108static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
109{
110 unsigned int offset = paddr & ~PAGE_MASK;
111 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
112}
113
114static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
115{
116 unsigned int offset = dev_addr & ~PAGE_MASK;
117 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
118}
119
120static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
121{
122 u64 limit, mask;
123
124 if (!dev->dma_mask)
125 return 0;
126
127 mask = *dev->dma_mask;
128
129 limit = (mask + 1) & ~mask;
130 if (limit && size > limit)
131 return 0;
132
133 if ((addr | (addr + size - 1)) & ~mask)
134 return 0;
135
136 return 1;
137}
138
139static inline void dma_mark_clean(void *addr, size_t size) { }
140
141/*
142 * DMA errors are defined by all-bits-set in the DMA address.
143 */
144static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
145{
146 debug_dma_mapping_error(dev, dma_addr);
147 return dma_addr == DMA_ERROR_CODE;
148}
149
150/*
151 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
152 * function so drivers using this API are highlighted with build warnings.
153 */
154static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
155 dma_addr_t *handle, gfp_t gfp)
156{
157 return NULL;
158}
159
160static inline void dma_free_noncoherent(struct device *dev, size_t size,
161 void *cpu_addr, dma_addr_t handle)
162{
163}
164
165extern int dma_supported(struct device *dev, u64 mask);
166
167extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
168
169/**
170 * arm_dma_alloc - allocate consistent memory for DMA
171 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
172 * @size: required memory size
173 * @handle: bus-specific DMA address
174 * @attrs: optinal attributes that specific mapping properties
175 *
176 * Allocate some memory for a device for performing DMA. This function
177 * allocates pages, and will return the CPU-viewed address, and sets @handle
178 * to be the device-viewed address.
179 */
180extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
181 gfp_t gfp, struct dma_attrs *attrs);
182
183#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
184
185static inline void *dma_alloc_attrs(struct device *dev, size_t size,
186 dma_addr_t *dma_handle, gfp_t flag,
187 struct dma_attrs *attrs)
188{
189 struct dma_map_ops *ops = get_dma_ops(dev);
190 void *cpu_addr;
191 BUG_ON(!ops);
192
193 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
194 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
195 return cpu_addr;
196}
197
198/**
199 * arm_dma_free - free memory allocated by arm_dma_alloc
200 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
201 * @size: size of memory originally requested in dma_alloc_coherent
202 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
203 * @handle: device-view address returned from dma_alloc_coherent
204 * @attrs: optinal attributes that specific mapping properties
205 *
206 * Free (and unmap) a DMA buffer previously allocated by
207 * arm_dma_alloc().
208 *
209 * References to memory and mappings associated with cpu_addr/handle
210 * during and after this call executing are illegal.
211 */
212extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
213 dma_addr_t handle, struct dma_attrs *attrs);
214
215#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
216
217static inline void dma_free_attrs(struct device *dev, size_t size,
218 void *cpu_addr, dma_addr_t dma_handle,
219 struct dma_attrs *attrs)
220{
221 struct dma_map_ops *ops = get_dma_ops(dev);
222 BUG_ON(!ops);
223
224 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
225 ops->free(dev, size, cpu_addr, dma_handle, attrs);
226}
227
228/**
229 * arm_dma_mmap - map a coherent DMA allocation into user space
230 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
231 * @vma: vm_area_struct describing requested user mapping
232 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
233 * @handle: device-view address returned from dma_alloc_coherent
234 * @size: size of memory originally requested in dma_alloc_coherent
235 * @attrs: optinal attributes that specific mapping properties
236 *
237 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
238 * into user space. The coherent DMA buffer must not be freed by the
239 * driver until the user space mapping has been released.
240 */
241extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
242 void *cpu_addr, dma_addr_t dma_addr, size_t size,
243 struct dma_attrs *attrs);
244
245static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
246 dma_addr_t *dma_handle, gfp_t flag)
247{
248 DEFINE_DMA_ATTRS(attrs);
249 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
250 return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
251}
252
253static inline void dma_free_writecombine(struct device *dev, size_t size,
254 void *cpu_addr, dma_addr_t dma_handle)
255{
256 DEFINE_DMA_ATTRS(attrs);
257 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
258 return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
259}
260
261/*
262 * This can be called during early boot to increase the size of the atomic
263 * coherent DMA pool above the default value of 256KiB. It must be called
264 * before postcore_initcall.
265 */
266extern void __init init_dma_coherent_pool_size(unsigned long size);
267
268/*
269 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
270 * and utilize bounce buffers as needed to work around limited DMA windows.
271 *
272 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
273 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
274 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
275 *
276 * The following are helper functions used by the dmabounce subystem
277 *
278 */
279
280/**
281 * dmabounce_register_dev
282 *
283 * @dev: valid struct device pointer
284 * @small_buf_size: size of buffers to use with small buffer pool
285 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
286 * @needs_bounce_fn: called to determine whether buffer needs bouncing
287 *
288 * This function should be called by low-level platform code to register
289 * a device as requireing DMA buffer bouncing. The function will allocate
290 * appropriate DMA pools for the device.
291 */
292extern int dmabounce_register_dev(struct device *, unsigned long,
293 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
294
295/**
296 * dmabounce_unregister_dev
297 *
298 * @dev: valid struct device pointer
299 *
300 * This function should be called by low-level platform code when device
301 * that was previously registered with dmabounce_register_dev is removed
302 * from the system.
303 *
304 */
305extern void dmabounce_unregister_dev(struct device *);
306
307
308
309/*
310 * The scatter list versions of the above methods.
311 */
312extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
313 enum dma_data_direction, struct dma_attrs *attrs);
314extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
315 enum dma_data_direction, struct dma_attrs *attrs);
316extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
317 enum dma_data_direction);
318extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
319 enum dma_data_direction);
320extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
321 void *cpu_addr, dma_addr_t dma_addr, size_t size,
322 struct dma_attrs *attrs);
323
324#endif /* __KERNEL__ */
325#endif
1#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h>
9
10#include <asm-generic/dma-coherent.h>
11#include <asm/memory.h>
12
13#ifdef __arch_page_to_dma
14#error Please update to __arch_pfn_to_dma
15#endif
16
17/*
18 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
19 * functions used internally by the DMA-mapping API to provide DMA
20 * addresses. They must not be used by drivers.
21 */
22#ifndef __arch_pfn_to_dma
23static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
24{
25 return (dma_addr_t)__pfn_to_bus(pfn);
26}
27
28static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
29{
30 return __bus_to_pfn(addr);
31}
32
33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34{
35 return (void *)__bus_to_virt(addr);
36}
37
38static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
39{
40 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
41}
42#else
43static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
44{
45 return __arch_pfn_to_dma(dev, pfn);
46}
47
48static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
49{
50 return __arch_dma_to_pfn(dev, addr);
51}
52
53static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
54{
55 return __arch_dma_to_virt(dev, addr);
56}
57
58static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
59{
60 return __arch_virt_to_dma(dev, addr);
61}
62#endif
63
64/*
65 * The DMA API is built upon the notion of "buffer ownership". A buffer
66 * is either exclusively owned by the CPU (and therefore may be accessed
67 * by it) or exclusively owned by the DMA device. These helper functions
68 * represent the transitions between these two ownership states.
69 *
70 * Note, however, that on later ARMs, this notion does not work due to
71 * speculative prefetches. We model our approach on the assumption that
72 * the CPU does do speculative prefetches, which means we clean caches
73 * before transfers and delay cache invalidation until transfer completion.
74 *
75 * Private support functions: these are not part of the API and are
76 * liable to change. Drivers must not use these.
77 */
78static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
79 enum dma_data_direction dir)
80{
81 extern void ___dma_single_cpu_to_dev(const void *, size_t,
82 enum dma_data_direction);
83
84 if (!arch_is_coherent())
85 ___dma_single_cpu_to_dev(kaddr, size, dir);
86}
87
88static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
89 enum dma_data_direction dir)
90{
91 extern void ___dma_single_dev_to_cpu(const void *, size_t,
92 enum dma_data_direction);
93
94 if (!arch_is_coherent())
95 ___dma_single_dev_to_cpu(kaddr, size, dir);
96}
97
98static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
99 size_t size, enum dma_data_direction dir)
100{
101 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
102 size_t, enum dma_data_direction);
103
104 if (!arch_is_coherent())
105 ___dma_page_cpu_to_dev(page, off, size, dir);
106}
107
108static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
109 size_t size, enum dma_data_direction dir)
110{
111 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
112 size_t, enum dma_data_direction);
113
114 if (!arch_is_coherent())
115 ___dma_page_dev_to_cpu(page, off, size, dir);
116}
117
118extern int dma_supported(struct device *, u64);
119extern int dma_set_mask(struct device *, u64);
120
121/*
122 * DMA errors are defined by all-bits-set in the DMA address.
123 */
124static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
125{
126 return dma_addr == ~0;
127}
128
129/*
130 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
131 * function so drivers using this API are highlighted with build warnings.
132 */
133static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
134 dma_addr_t *handle, gfp_t gfp)
135{
136 return NULL;
137}
138
139static inline void dma_free_noncoherent(struct device *dev, size_t size,
140 void *cpu_addr, dma_addr_t handle)
141{
142}
143
144/**
145 * dma_alloc_coherent - allocate consistent memory for DMA
146 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
147 * @size: required memory size
148 * @handle: bus-specific DMA address
149 *
150 * Allocate some uncached, unbuffered memory for a device for
151 * performing DMA. This function allocates pages, and will
152 * return the CPU-viewed address, and sets @handle to be the
153 * device-viewed address.
154 */
155extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
156
157/**
158 * dma_free_coherent - free memory allocated by dma_alloc_coherent
159 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
160 * @size: size of memory originally requested in dma_alloc_coherent
161 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
162 * @handle: device-view address returned from dma_alloc_coherent
163 *
164 * Free (and unmap) a DMA buffer previously allocated by
165 * dma_alloc_coherent().
166 *
167 * References to memory and mappings associated with cpu_addr/handle
168 * during and after this call executing are illegal.
169 */
170extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
171
172/**
173 * dma_mmap_coherent - map a coherent DMA allocation into user space
174 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
175 * @vma: vm_area_struct describing requested user mapping
176 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
177 * @handle: device-view address returned from dma_alloc_coherent
178 * @size: size of memory originally requested in dma_alloc_coherent
179 *
180 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
181 * into user space. The coherent DMA buffer must not be freed by the
182 * driver until the user space mapping has been released.
183 */
184int dma_mmap_coherent(struct device *, struct vm_area_struct *,
185 void *, dma_addr_t, size_t);
186
187
188/**
189 * dma_alloc_writecombine - allocate writecombining memory for DMA
190 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
191 * @size: required memory size
192 * @handle: bus-specific DMA address
193 *
194 * Allocate some uncached, buffered memory for a device for
195 * performing DMA. This function allocates pages, and will
196 * return the CPU-viewed address, and sets @handle to be the
197 * device-viewed address.
198 */
199extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
200 gfp_t);
201
202#define dma_free_writecombine(dev,size,cpu_addr,handle) \
203 dma_free_coherent(dev,size,cpu_addr,handle)
204
205int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
206 void *, dma_addr_t, size_t);
207
208
209#ifdef CONFIG_DMABOUNCE
210/*
211 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
212 * and utilize bounce buffers as needed to work around limited DMA windows.
213 *
214 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
215 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
216 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
217 *
218 * The following are helper functions used by the dmabounce subystem
219 *
220 */
221
222/**
223 * dmabounce_register_dev
224 *
225 * @dev: valid struct device pointer
226 * @small_buf_size: size of buffers to use with small buffer pool
227 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
228 * @needs_bounce_fn: called to determine whether buffer needs bouncing
229 *
230 * This function should be called by low-level platform code to register
231 * a device as requireing DMA buffer bouncing. The function will allocate
232 * appropriate DMA pools for the device.
233 */
234extern int dmabounce_register_dev(struct device *, unsigned long,
235 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
236
237/**
238 * dmabounce_unregister_dev
239 *
240 * @dev: valid struct device pointer
241 *
242 * This function should be called by low-level platform code when device
243 * that was previously registered with dmabounce_register_dev is removed
244 * from the system.
245 *
246 */
247extern void dmabounce_unregister_dev(struct device *);
248
249/*
250 * The DMA API, implemented by dmabounce.c. See below for descriptions.
251 */
252extern dma_addr_t __dma_map_page(struct device *, struct page *,
253 unsigned long, size_t, enum dma_data_direction);
254extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
255 enum dma_data_direction);
256
257/*
258 * Private functions
259 */
260int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
261 size_t, enum dma_data_direction);
262int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
263 size_t, enum dma_data_direction);
264#else
265static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
266 unsigned long offset, size_t size, enum dma_data_direction dir)
267{
268 return 1;
269}
270
271static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
272 unsigned long offset, size_t size, enum dma_data_direction dir)
273{
274 return 1;
275}
276
277
278static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
279 unsigned long offset, size_t size, enum dma_data_direction dir)
280{
281 __dma_page_cpu_to_dev(page, offset, size, dir);
282 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
283}
284
285static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
286 size_t size, enum dma_data_direction dir)
287{
288 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
289 handle & ~PAGE_MASK, size, dir);
290}
291#endif /* CONFIG_DMABOUNCE */
292
293/**
294 * dma_map_single - map a single buffer for streaming DMA
295 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
296 * @cpu_addr: CPU direct mapped address of buffer
297 * @size: size of buffer to map
298 * @dir: DMA transfer direction
299 *
300 * Ensure that any data held in the cache is appropriately discarded
301 * or written back.
302 *
303 * The device owns this memory once this call has completed. The CPU
304 * can regain ownership by calling dma_unmap_single() or
305 * dma_sync_single_for_cpu().
306 */
307static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
308 size_t size, enum dma_data_direction dir)
309{
310 unsigned long offset;
311 struct page *page;
312 dma_addr_t addr;
313
314 BUG_ON(!virt_addr_valid(cpu_addr));
315 BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
316 BUG_ON(!valid_dma_direction(dir));
317
318 page = virt_to_page(cpu_addr);
319 offset = (unsigned long)cpu_addr & ~PAGE_MASK;
320 addr = __dma_map_page(dev, page, offset, size, dir);
321 debug_dma_map_page(dev, page, offset, size, dir, addr, true);
322
323 return addr;
324}
325
326/**
327 * dma_map_page - map a portion of a page for streaming DMA
328 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
329 * @page: page that buffer resides in
330 * @offset: offset into page for start of buffer
331 * @size: size of buffer to map
332 * @dir: DMA transfer direction
333 *
334 * Ensure that any data held in the cache is appropriately discarded
335 * or written back.
336 *
337 * The device owns this memory once this call has completed. The CPU
338 * can regain ownership by calling dma_unmap_page().
339 */
340static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
341 unsigned long offset, size_t size, enum dma_data_direction dir)
342{
343 dma_addr_t addr;
344
345 BUG_ON(!valid_dma_direction(dir));
346
347 addr = __dma_map_page(dev, page, offset, size, dir);
348 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
349
350 return addr;
351}
352
353/**
354 * dma_unmap_single - unmap a single buffer previously mapped
355 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
356 * @handle: DMA address of buffer
357 * @size: size of buffer (same as passed to dma_map_single)
358 * @dir: DMA transfer direction (same as passed to dma_map_single)
359 *
360 * Unmap a single streaming mode DMA translation. The handle and size
361 * must match what was provided in the previous dma_map_single() call.
362 * All other usages are undefined.
363 *
364 * After this call, reads by the CPU to the buffer are guaranteed to see
365 * whatever the device wrote there.
366 */
367static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
368 size_t size, enum dma_data_direction dir)
369{
370 debug_dma_unmap_page(dev, handle, size, dir, true);
371 __dma_unmap_page(dev, handle, size, dir);
372}
373
374/**
375 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
376 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
377 * @handle: DMA address of buffer
378 * @size: size of buffer (same as passed to dma_map_page)
379 * @dir: DMA transfer direction (same as passed to dma_map_page)
380 *
381 * Unmap a page streaming mode DMA translation. The handle and size
382 * must match what was provided in the previous dma_map_page() call.
383 * All other usages are undefined.
384 *
385 * After this call, reads by the CPU to the buffer are guaranteed to see
386 * whatever the device wrote there.
387 */
388static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
389 size_t size, enum dma_data_direction dir)
390{
391 debug_dma_unmap_page(dev, handle, size, dir, false);
392 __dma_unmap_page(dev, handle, size, dir);
393}
394
395/**
396 * dma_sync_single_range_for_cpu
397 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
398 * @handle: DMA address of buffer
399 * @offset: offset of region to start sync
400 * @size: size of region to sync
401 * @dir: DMA transfer direction (same as passed to dma_map_single)
402 *
403 * Make physical memory consistent for a single streaming mode DMA
404 * translation after a transfer.
405 *
406 * If you perform a dma_map_single() but wish to interrogate the
407 * buffer using the cpu, yet do not wish to teardown the PCI dma
408 * mapping, you must call this function before doing so. At the
409 * next point you give the PCI dma address back to the card, you
410 * must first the perform a dma_sync_for_device, and then the
411 * device again owns the buffer.
412 */
413static inline void dma_sync_single_range_for_cpu(struct device *dev,
414 dma_addr_t handle, unsigned long offset, size_t size,
415 enum dma_data_direction dir)
416{
417 BUG_ON(!valid_dma_direction(dir));
418
419 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
420
421 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
422 return;
423
424 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
425}
426
427static inline void dma_sync_single_range_for_device(struct device *dev,
428 dma_addr_t handle, unsigned long offset, size_t size,
429 enum dma_data_direction dir)
430{
431 BUG_ON(!valid_dma_direction(dir));
432
433 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
434
435 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
436 return;
437
438 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
439}
440
441static inline void dma_sync_single_for_cpu(struct device *dev,
442 dma_addr_t handle, size_t size, enum dma_data_direction dir)
443{
444 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
445}
446
447static inline void dma_sync_single_for_device(struct device *dev,
448 dma_addr_t handle, size_t size, enum dma_data_direction dir)
449{
450 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
451}
452
453/*
454 * The scatter list versions of the above methods.
455 */
456extern int dma_map_sg(struct device *, struct scatterlist *, int,
457 enum dma_data_direction);
458extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
459 enum dma_data_direction);
460extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
461 enum dma_data_direction);
462extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
463 enum dma_data_direction);
464
465
466#endif /* __KERNEL__ */
467#endif