Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_IA64_DMA_MAPPING_H
3#define _ASM_IA64_DMA_MAPPING_H
4
5/*
6 * Copyright (C) 2003-2004 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 */
9extern const struct dma_map_ops *dma_ops;
10
11static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
12{
13 return dma_ops;
14}
15
16#endif /* _ASM_IA64_DMA_MAPPING_H */
1#ifndef _ASM_IA64_DMA_MAPPING_H
2#define _ASM_IA64_DMA_MAPPING_H
3
4/*
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8#include <asm/machvec.h>
9#include <linux/scatterlist.h>
10#include <asm/swiotlb.h>
11#include <linux/dma-debug.h>
12
13#define ARCH_HAS_DMA_GET_REQUIRED_MASK
14
15#define DMA_ERROR_CODE 0
16
17extern struct dma_map_ops *dma_ops;
18extern struct ia64_machine_vector ia64_mv;
19extern void set_iommu_machvec(void);
20
21extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
22 enum dma_data_direction);
23extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
24 enum dma_data_direction);
25
26static inline void *dma_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *daddr, gfp_t gfp)
28{
29 struct dma_map_ops *ops = platform_dma_get_ops(dev);
30 void *caddr;
31
32 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
33 debug_dma_alloc_coherent(dev, size, *daddr, caddr);
34 return caddr;
35}
36
37static inline void dma_free_coherent(struct device *dev, size_t size,
38 void *caddr, dma_addr_t daddr)
39{
40 struct dma_map_ops *ops = platform_dma_get_ops(dev);
41 debug_dma_free_coherent(dev, size, caddr, daddr);
42 ops->free_coherent(dev, size, caddr, daddr);
43}
44
45#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
46#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
47
48#define get_dma_ops(dev) platform_dma_get_ops(dev)
49
50#include <asm-generic/dma-mapping-common.h>
51
52static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
53{
54 struct dma_map_ops *ops = platform_dma_get_ops(dev);
55 return ops->mapping_error(dev, daddr);
56}
57
58static inline int dma_supported(struct device *dev, u64 mask)
59{
60 struct dma_map_ops *ops = platform_dma_get_ops(dev);
61 return ops->dma_supported(dev, mask);
62}
63
64static inline int
65dma_set_mask (struct device *dev, u64 mask)
66{
67 if (!dev->dma_mask || !dma_supported(dev, mask))
68 return -EIO;
69 *dev->dma_mask = mask;
70 return 0;
71}
72
73static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
74{
75 if (!dev->dma_mask)
76 return 0;
77
78 return addr + size - 1 <= *dev->dma_mask;
79}
80
81static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
82{
83 return paddr;
84}
85
86static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
87{
88 return daddr;
89}
90
91static inline void
92dma_cache_sync (struct device *dev, void *vaddr, size_t size,
93 enum dma_data_direction dir)
94{
95 /*
96 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
97 * ensure that dma_cache_sync() enforces order, hence the mb().
98 */
99 mb();
100}
101
102#endif /* _ASM_IA64_DMA_MAPPING_H */