Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ASMARM_DMA_MAPPING_H
3#define ASMARM_DMA_MAPPING_H
4
5#ifdef __KERNEL__
6
7#include <linux/mm_types.h>
8#include <linux/scatterlist.h>
9#include <linux/dma-debug.h>
10
11#include <asm/memory.h>
12
13#include <xen/xen.h>
14#include <asm/xen/hypervisor.h>
15
16extern const struct dma_map_ops arm_dma_ops;
17extern const struct dma_map_ops arm_coherent_dma_ops;
18
19static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
20{
21 if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
22 return &arm_dma_ops;
23 return NULL;
24}
25
26#ifdef __arch_page_to_dma
27#error Please update to __arch_pfn_to_dma
28#endif
29
30/*
31 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
32 * functions used internally by the DMA-mapping API to provide DMA
33 * addresses. They must not be used by drivers.
34 */
35#ifndef __arch_pfn_to_dma
36static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
37{
38 if (dev)
39 pfn -= dev->dma_pfn_offset;
40 return (dma_addr_t)__pfn_to_bus(pfn);
41}
42
43static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
44{
45 unsigned long pfn = __bus_to_pfn(addr);
46
47 if (dev)
48 pfn += dev->dma_pfn_offset;
49
50 return pfn;
51}
52
53static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
54{
55 if (dev) {
56 unsigned long pfn = dma_to_pfn(dev, addr);
57
58 return phys_to_virt(__pfn_to_phys(pfn));
59 }
60
61 return (void *)__bus_to_virt((unsigned long)addr);
62}
63
64static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
65{
66 if (dev)
67 return pfn_to_dma(dev, virt_to_pfn(addr));
68
69 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
70}
71
72#else
73static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
74{
75 return __arch_pfn_to_dma(dev, pfn);
76}
77
78static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
79{
80 return __arch_dma_to_pfn(dev, addr);
81}
82
83static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
84{
85 return __arch_dma_to_virt(dev, addr);
86}
87
88static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
89{
90 return __arch_virt_to_dma(dev, addr);
91}
92#endif
93
94/**
95 * arm_dma_alloc - allocate consistent memory for DMA
96 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
97 * @size: required memory size
98 * @handle: bus-specific DMA address
99 * @attrs: optinal attributes that specific mapping properties
100 *
101 * Allocate some memory for a device for performing DMA. This function
102 * allocates pages, and will return the CPU-viewed address, and sets @handle
103 * to be the device-viewed address.
104 */
105extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
106 gfp_t gfp, unsigned long attrs);
107
108/**
109 * arm_dma_free - free memory allocated by arm_dma_alloc
110 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
111 * @size: size of memory originally requested in dma_alloc_coherent
112 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
113 * @handle: device-view address returned from dma_alloc_coherent
114 * @attrs: optinal attributes that specific mapping properties
115 *
116 * Free (and unmap) a DMA buffer previously allocated by
117 * arm_dma_alloc().
118 *
119 * References to memory and mappings associated with cpu_addr/handle
120 * during and after this call executing are illegal.
121 */
122extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
123 dma_addr_t handle, unsigned long attrs);
124
125/**
126 * arm_dma_mmap - map a coherent DMA allocation into user space
127 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
128 * @vma: vm_area_struct describing requested user mapping
129 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
130 * @handle: device-view address returned from dma_alloc_coherent
131 * @size: size of memory originally requested in dma_alloc_coherent
132 * @attrs: optinal attributes that specific mapping properties
133 *
134 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
135 * into user space. The coherent DMA buffer must not be freed by the
136 * driver until the user space mapping has been released.
137 */
138extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
139 void *cpu_addr, dma_addr_t dma_addr, size_t size,
140 unsigned long attrs);
141
142/*
143 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
144 * and utilize bounce buffers as needed to work around limited DMA windows.
145 *
146 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
147 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
148 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
149 *
150 * The following are helper functions used by the dmabounce subystem
151 *
152 */
153
154/**
155 * dmabounce_register_dev
156 *
157 * @dev: valid struct device pointer
158 * @small_buf_size: size of buffers to use with small buffer pool
159 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
160 * @needs_bounce_fn: called to determine whether buffer needs bouncing
161 *
162 * This function should be called by low-level platform code to register
163 * a device as requireing DMA buffer bouncing. The function will allocate
164 * appropriate DMA pools for the device.
165 */
166extern int dmabounce_register_dev(struct device *, unsigned long,
167 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
168
169/**
170 * dmabounce_unregister_dev
171 *
172 * @dev: valid struct device pointer
173 *
174 * This function should be called by low-level platform code when device
175 * that was previously registered with dmabounce_register_dev is removed
176 * from the system.
177 *
178 */
179extern void dmabounce_unregister_dev(struct device *);
180
181
182
183/*
184 * The scatter list versions of the above methods.
185 */
186extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
187 enum dma_data_direction, unsigned long attrs);
188extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
189 enum dma_data_direction, unsigned long attrs);
190extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
191 enum dma_data_direction);
192extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
193 enum dma_data_direction);
194extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
195 void *cpu_addr, dma_addr_t dma_addr, size_t size,
196 unsigned long attrs);
197
198#endif /* __KERNEL__ */
199#endif
1#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-attrs.h>
9#include <linux/dma-debug.h>
10
11#include <asm/memory.h>
12
13#include <xen/xen.h>
14#include <asm/xen/hypervisor.h>
15
16#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
17extern struct dma_map_ops arm_dma_ops;
18extern struct dma_map_ops arm_coherent_dma_ops;
19
20static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
21{
22 if (dev && dev->archdata.dma_ops)
23 return dev->archdata.dma_ops;
24 return &arm_dma_ops;
25}
26
27static inline struct dma_map_ops *get_dma_ops(struct device *dev)
28{
29 if (xen_initial_domain())
30 return xen_dma_ops;
31 else
32 return __generic_dma_ops(dev);
33}
34
35static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
36{
37 BUG_ON(!dev);
38 dev->archdata.dma_ops = ops;
39}
40
41#define HAVE_ARCH_DMA_SUPPORTED 1
42extern int dma_supported(struct device *dev, u64 mask);
43
44#ifdef __arch_page_to_dma
45#error Please update to __arch_pfn_to_dma
46#endif
47
48/*
49 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
50 * functions used internally by the DMA-mapping API to provide DMA
51 * addresses. They must not be used by drivers.
52 */
53#ifndef __arch_pfn_to_dma
54static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
55{
56 if (dev)
57 pfn -= dev->dma_pfn_offset;
58 return (dma_addr_t)__pfn_to_bus(pfn);
59}
60
61static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
62{
63 unsigned long pfn = __bus_to_pfn(addr);
64
65 if (dev)
66 pfn += dev->dma_pfn_offset;
67
68 return pfn;
69}
70
71static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
72{
73 if (dev) {
74 unsigned long pfn = dma_to_pfn(dev, addr);
75
76 return phys_to_virt(__pfn_to_phys(pfn));
77 }
78
79 return (void *)__bus_to_virt((unsigned long)addr);
80}
81
82static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
83{
84 if (dev)
85 return pfn_to_dma(dev, virt_to_pfn(addr));
86
87 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
88}
89
90#else
91static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
92{
93 return __arch_pfn_to_dma(dev, pfn);
94}
95
96static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
97{
98 return __arch_dma_to_pfn(dev, addr);
99}
100
101static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
102{
103 return __arch_dma_to_virt(dev, addr);
104}
105
106static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
107{
108 return __arch_virt_to_dma(dev, addr);
109}
110#endif
111
112/* The ARM override for dma_max_pfn() */
113static inline unsigned long dma_max_pfn(struct device *dev)
114{
115 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
116}
117#define dma_max_pfn(dev) dma_max_pfn(dev)
118
119#define arch_setup_dma_ops arch_setup_dma_ops
120extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
121 struct iommu_ops *iommu, bool coherent);
122
123#define arch_teardown_dma_ops arch_teardown_dma_ops
124extern void arch_teardown_dma_ops(struct device *dev);
125
126/* do not use this function in a driver */
127static inline bool is_device_dma_coherent(struct device *dev)
128{
129 return dev->archdata.dma_coherent;
130}
131
132static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
133{
134 unsigned int offset = paddr & ~PAGE_MASK;
135 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
136}
137
138static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
139{
140 unsigned int offset = dev_addr & ~PAGE_MASK;
141 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
142}
143
144static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
145{
146 u64 limit, mask;
147
148 if (!dev->dma_mask)
149 return 0;
150
151 mask = *dev->dma_mask;
152
153 limit = (mask + 1) & ~mask;
154 if (limit && size > limit)
155 return 0;
156
157 if ((addr | (addr + size - 1)) & ~mask)
158 return 0;
159
160 return 1;
161}
162
163static inline void dma_mark_clean(void *addr, size_t size) { }
164
165extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
166
167/**
168 * arm_dma_alloc - allocate consistent memory for DMA
169 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
170 * @size: required memory size
171 * @handle: bus-specific DMA address
172 * @attrs: optinal attributes that specific mapping properties
173 *
174 * Allocate some memory for a device for performing DMA. This function
175 * allocates pages, and will return the CPU-viewed address, and sets @handle
176 * to be the device-viewed address.
177 */
178extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
179 gfp_t gfp, struct dma_attrs *attrs);
180
181/**
182 * arm_dma_free - free memory allocated by arm_dma_alloc
183 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
184 * @size: size of memory originally requested in dma_alloc_coherent
185 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
186 * @handle: device-view address returned from dma_alloc_coherent
187 * @attrs: optinal attributes that specific mapping properties
188 *
189 * Free (and unmap) a DMA buffer previously allocated by
190 * arm_dma_alloc().
191 *
192 * References to memory and mappings associated with cpu_addr/handle
193 * during and after this call executing are illegal.
194 */
195extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
196 dma_addr_t handle, struct dma_attrs *attrs);
197
198/**
199 * arm_dma_mmap - map a coherent DMA allocation into user space
200 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
201 * @vma: vm_area_struct describing requested user mapping
202 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
203 * @handle: device-view address returned from dma_alloc_coherent
204 * @size: size of memory originally requested in dma_alloc_coherent
205 * @attrs: optinal attributes that specific mapping properties
206 *
207 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
208 * into user space. The coherent DMA buffer must not be freed by the
209 * driver until the user space mapping has been released.
210 */
211extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
212 void *cpu_addr, dma_addr_t dma_addr, size_t size,
213 struct dma_attrs *attrs);
214
215/*
216 * This can be called during early boot to increase the size of the atomic
217 * coherent DMA pool above the default value of 256KiB. It must be called
218 * before postcore_initcall.
219 */
220extern void __init init_dma_coherent_pool_size(unsigned long size);
221
222/*
223 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
224 * and utilize bounce buffers as needed to work around limited DMA windows.
225 *
226 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
227 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
228 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
229 *
230 * The following are helper functions used by the dmabounce subystem
231 *
232 */
233
234/**
235 * dmabounce_register_dev
236 *
237 * @dev: valid struct device pointer
238 * @small_buf_size: size of buffers to use with small buffer pool
239 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
240 * @needs_bounce_fn: called to determine whether buffer needs bouncing
241 *
242 * This function should be called by low-level platform code to register
243 * a device as requireing DMA buffer bouncing. The function will allocate
244 * appropriate DMA pools for the device.
245 */
246extern int dmabounce_register_dev(struct device *, unsigned long,
247 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
248
249/**
250 * dmabounce_unregister_dev
251 *
252 * @dev: valid struct device pointer
253 *
254 * This function should be called by low-level platform code when device
255 * that was previously registered with dmabounce_register_dev is removed
256 * from the system.
257 *
258 */
259extern void dmabounce_unregister_dev(struct device *);
260
261
262
263/*
264 * The scatter list versions of the above methods.
265 */
266extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
267 enum dma_data_direction, struct dma_attrs *attrs);
268extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
269 enum dma_data_direction, struct dma_attrs *attrs);
270extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
271 enum dma_data_direction);
272extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
273 enum dma_data_direction);
274extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
275 void *cpu_addr, dma_addr_t dma_addr, size_t size,
276 struct dma_attrs *attrs);
277
278#endif /* __KERNEL__ */
279#endif