Loading...
Note: File does not exist in v6.8.
1#ifndef _ASM_IA64_DMA_MAPPING_H
2#define _ASM_IA64_DMA_MAPPING_H
3
4/*
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8#include <asm/machvec.h>
9#include <linux/scatterlist.h>
10#include <asm/swiotlb.h>
11#include <linux/dma-debug.h>
12
13#define ARCH_HAS_DMA_GET_REQUIRED_MASK
14
15#define DMA_ERROR_CODE 0
16
17extern struct dma_map_ops *dma_ops;
18extern struct ia64_machine_vector ia64_mv;
19extern void set_iommu_machvec(void);
20
21extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
22 enum dma_data_direction);
23extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
24 enum dma_data_direction);
25
26#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
27
28static inline void *dma_alloc_attrs(struct device *dev, size_t size,
29 dma_addr_t *daddr, gfp_t gfp,
30 struct dma_attrs *attrs)
31{
32 struct dma_map_ops *ops = platform_dma_get_ops(dev);
33 void *caddr;
34
35 caddr = ops->alloc(dev, size, daddr, gfp, attrs);
36 debug_dma_alloc_coherent(dev, size, *daddr, caddr);
37 return caddr;
38}
39
40#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
41
42static inline void dma_free_attrs(struct device *dev, size_t size,
43 void *caddr, dma_addr_t daddr,
44 struct dma_attrs *attrs)
45{
46 struct dma_map_ops *ops = platform_dma_get_ops(dev);
47 debug_dma_free_coherent(dev, size, caddr, daddr);
48 ops->free(dev, size, caddr, daddr, attrs);
49}
50
51#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
52#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
53
54#define get_dma_ops(dev) platform_dma_get_ops(dev)
55
56#include <asm-generic/dma-mapping-common.h>
57
58static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
59{
60 struct dma_map_ops *ops = platform_dma_get_ops(dev);
61 return ops->mapping_error(dev, daddr);
62}
63
64static inline int dma_supported(struct device *dev, u64 mask)
65{
66 struct dma_map_ops *ops = platform_dma_get_ops(dev);
67 return ops->dma_supported(dev, mask);
68}
69
70static inline int
71dma_set_mask (struct device *dev, u64 mask)
72{
73 if (!dev->dma_mask || !dma_supported(dev, mask))
74 return -EIO;
75 *dev->dma_mask = mask;
76 return 0;
77}
78
79static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
80{
81 if (!dev->dma_mask)
82 return 0;
83
84 return addr + size - 1 <= *dev->dma_mask;
85}
86
87static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
88{
89 return paddr;
90}
91
92static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
93{
94 return daddr;
95}
96
97static inline void
98dma_cache_sync (struct device *dev, void *vaddr, size_t size,
99 enum dma_data_direction dir)
100{
101 /*
102 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
103 * ensure that dma_cache_sync() enforces order, hence the mb().
104 */
105 mb();
106}
107
108#endif /* _ASM_IA64_DMA_MAPPING_H */