Linux Audio

Check our new training course

Loading...
v3.15
  1#ifndef _ASM_IA64_DMA_MAPPING_H
  2#define _ASM_IA64_DMA_MAPPING_H
  3
  4/*
  5 * Copyright (C) 2003-2004 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 */
  8#include <asm/machvec.h>
  9#include <linux/scatterlist.h>
 10#include <asm/swiotlb.h>
 11#include <linux/dma-debug.h>
 12
 13#define ARCH_HAS_DMA_GET_REQUIRED_MASK
 14
 15#define DMA_ERROR_CODE 0
 16
 17extern struct dma_map_ops *dma_ops;
 18extern struct ia64_machine_vector ia64_mv;
 19extern void set_iommu_machvec(void);
 20
 21extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
 22				    enum dma_data_direction);
 23extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
 24				enum dma_data_direction);
 25
 26#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)
 27
 28static inline void *dma_alloc_attrs(struct device *dev, size_t size,
 29				    dma_addr_t *daddr, gfp_t gfp,
 30				    struct dma_attrs *attrs)
 31{
 32	struct dma_map_ops *ops = platform_dma_get_ops(dev);
 33	void *caddr;
 34
 35	caddr = ops->alloc(dev, size, daddr, gfp, attrs);
 36	debug_dma_alloc_coherent(dev, size, *daddr, caddr);
 37	return caddr;
 38}
 39
 40#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
 41
 42static inline void dma_free_attrs(struct device *dev, size_t size,
 43				  void *caddr, dma_addr_t daddr,
 44				  struct dma_attrs *attrs)
 45{
 46	struct dma_map_ops *ops = platform_dma_get_ops(dev);
 47	debug_dma_free_coherent(dev, size, caddr, daddr);
 48	ops->free(dev, size, caddr, daddr, attrs);
 49}
 50
 51#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 52#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 53
 54#define get_dma_ops(dev) platform_dma_get_ops(dev)
 55
 56#include <asm-generic/dma-mapping-common.h>
 57
 58static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
 59{
 60	struct dma_map_ops *ops = platform_dma_get_ops(dev);
 61	debug_dma_mapping_error(dev, daddr);
 62	return ops->mapping_error(dev, daddr);
 63}
 64
 65static inline int dma_supported(struct device *dev, u64 mask)
 66{
 67	struct dma_map_ops *ops = platform_dma_get_ops(dev);
 68	return ops->dma_supported(dev, mask);
 69}
 70
 71static inline int
 72dma_set_mask (struct device *dev, u64 mask)
 73{
 74	if (!dev->dma_mask || !dma_supported(dev, mask))
 75		return -EIO;
 76	*dev->dma_mask = mask;
 77	return 0;
 78}
 79
 80static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 81{
 82	if (!dev->dma_mask)
 83		return 0;
 84
 85	return addr + size - 1 <= *dev->dma_mask;
 86}
 87
 88static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 89{
 90	return paddr;
 91}
 92
 93static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 94{
 95	return daddr;
 96}
 97
 98static inline void
 99dma_cache_sync (struct device *dev, void *vaddr, size_t size,
100	enum dma_data_direction dir)
101{
102	/*
103	 * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
104	 * ensure that dma_cache_sync() enforces order, hence the mb().
105	 */
106	mb();
107}
108
109#endif /* _ASM_IA64_DMA_MAPPING_H */
v4.10.11
 1#ifndef _ASM_IA64_DMA_MAPPING_H
 2#define _ASM_IA64_DMA_MAPPING_H
 3
 4/*
 5 * Copyright (C) 2003-2004 Hewlett-Packard Co
 6 *	David Mosberger-Tang <davidm@hpl.hp.com>
 7 */
 8#include <asm/machvec.h>
 9#include <linux/scatterlist.h>
10#include <asm/swiotlb.h>
11#include <linux/dma-debug.h>
12
13#define ARCH_HAS_DMA_GET_REQUIRED_MASK
14
15#define DMA_ERROR_CODE 0
16
17extern struct dma_map_ops *dma_ops;
18extern struct ia64_machine_vector ia64_mv;
19extern void set_iommu_machvec(void);
20
21extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
22				    enum dma_data_direction);
23extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
24				enum dma_data_direction);
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26#define get_dma_ops(dev) platform_dma_get_ops(dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
28static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
29{
30	if (!dev->dma_mask)
31		return 0;
32
33	return addr + size - 1 <= *dev->dma_mask;
34}
35
36static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
37{
38	return paddr;
39}
40
41static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
42{
43	return daddr;
44}
45
46static inline void
47dma_cache_sync (struct device *dev, void *vaddr, size_t size,
48	enum dma_data_direction dir)
49{
50	/*
51	 * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
52	 * ensure that dma_cache_sync() enforces order, hence the mb().
53	 */
54	mb();
55}
56
57#endif /* _ASM_IA64_DMA_MAPPING_H */