Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2014-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
5 */
6
7#include <linux/slab.h>
8#include <linux/kernel.h>
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/spinlock.h>
12#include <linux/err.h>
13#include <linux/module.h>
14
15#include <linux/iio/iio.h>
16#include <linux/iio/sysfs.h>
17#include <linux/iio/buffer.h>
18#include <linux/iio/buffer_impl.h>
19#include <linux/iio/buffer-dma.h>
20#include <linux/iio/buffer-dmaengine.h>
21
22/*
23 * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
24 * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
25 * used to manage the buffer memory and implement the IIO buffer operations
26 * while the DMAengine framework is used to perform the DMA transfers. Combined
27 * this results in a device independent fully functional DMA buffer
28 * implementation that can be used by device drivers for peripherals which are
29 * connected to a DMA controller which has a DMAengine driver implementation.
30 */
31
32struct dmaengine_buffer {
33 struct iio_dma_buffer_queue queue;
34
35 struct dma_chan *chan;
36 struct list_head active;
37
38 size_t align;
39 size_t max_size;
40};
41
42static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
43 struct iio_buffer *buffer)
44{
45 return container_of(buffer, struct dmaengine_buffer, queue.buffer);
46}
47
48static void iio_dmaengine_buffer_block_done(void *data)
49{
50 struct iio_dma_buffer_block *block = data;
51 unsigned long flags;
52
53 spin_lock_irqsave(&block->queue->list_lock, flags);
54 list_del(&block->head);
55 spin_unlock_irqrestore(&block->queue->list_lock, flags);
56 iio_dma_buffer_block_done(block);
57}
58
59static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
60 struct iio_dma_buffer_block *block)
61{
62 struct dmaengine_buffer *dmaengine_buffer =
63 iio_buffer_to_dmaengine_buffer(&queue->buffer);
64 struct dma_async_tx_descriptor *desc;
65 dma_cookie_t cookie;
66
67 block->bytes_used = min(block->size, dmaengine_buffer->max_size);
68 block->bytes_used = rounddown(block->bytes_used,
69 dmaengine_buffer->align);
70
71 desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
72 block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
73 DMA_PREP_INTERRUPT);
74 if (!desc)
75 return -ENOMEM;
76
77 desc->callback = iio_dmaengine_buffer_block_done;
78 desc->callback_param = block;
79
80 cookie = dmaengine_submit(desc);
81 if (dma_submit_error(cookie))
82 return dma_submit_error(cookie);
83
84 spin_lock_irq(&dmaengine_buffer->queue.list_lock);
85 list_add_tail(&block->head, &dmaengine_buffer->active);
86 spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
87
88 dma_async_issue_pending(dmaengine_buffer->chan);
89
90 return 0;
91}
92
93static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
94{
95 struct dmaengine_buffer *dmaengine_buffer =
96 iio_buffer_to_dmaengine_buffer(&queue->buffer);
97
98 dmaengine_terminate_sync(dmaengine_buffer->chan);
99 iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
100}
101
102static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
103{
104 struct dmaengine_buffer *dmaengine_buffer =
105 iio_buffer_to_dmaengine_buffer(buf);
106
107 iio_dma_buffer_release(&dmaengine_buffer->queue);
108 kfree(dmaengine_buffer);
109}
110
111static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
112 .read = iio_dma_buffer_read,
113 .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
114 .set_length = iio_dma_buffer_set_length,
115 .request_update = iio_dma_buffer_request_update,
116 .enable = iio_dma_buffer_enable,
117 .disable = iio_dma_buffer_disable,
118 .data_available = iio_dma_buffer_data_available,
119 .release = iio_dmaengine_buffer_release,
120
121 .modes = INDIO_BUFFER_HARDWARE,
122 .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
123};
124
125static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
126 .submit = iio_dmaengine_buffer_submit_block,
127 .abort = iio_dmaengine_buffer_abort,
128};
129
130static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
131 struct device_attribute *attr, char *buf)
132{
133 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
134 struct dmaengine_buffer *dmaengine_buffer =
135 iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
136
137 return sprintf(buf, "%zu\n", dmaengine_buffer->align);
138}
139
140static IIO_DEVICE_ATTR(length_align_bytes, 0444,
141 iio_dmaengine_buffer_get_length_align, NULL, 0);
142
143static const struct attribute *iio_dmaengine_buffer_attrs[] = {
144 &iio_dev_attr_length_align_bytes.dev_attr.attr,
145 NULL,
146};
147
148/**
149 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
150 * @dev: Parent device for the buffer
151 * @channel: DMA channel name, typically "rx".
152 *
153 * This allocates a new IIO buffer which internally uses the DMAengine framework
154 * to perform its transfers. The parent device will be used to request the DMA
155 * channel.
156 *
157 * Once done using the buffer iio_dmaengine_buffer_free() should be used to
158 * release it.
159 */
160struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
161 const char *channel)
162{
163 struct dmaengine_buffer *dmaengine_buffer;
164 unsigned int width, src_width, dest_width;
165 struct dma_slave_caps caps;
166 struct dma_chan *chan;
167 int ret;
168
169 dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
170 if (!dmaengine_buffer)
171 return ERR_PTR(-ENOMEM);
172
173 chan = dma_request_chan(dev, channel);
174 if (IS_ERR(chan)) {
175 ret = PTR_ERR(chan);
176 goto err_free;
177 }
178
179 ret = dma_get_slave_caps(chan, &caps);
180 if (ret < 0)
181 goto err_free;
182
183 /* Needs to be aligned to the maximum of the minimums */
184 if (caps.src_addr_widths)
185 src_width = __ffs(caps.src_addr_widths);
186 else
187 src_width = 1;
188 if (caps.dst_addr_widths)
189 dest_width = __ffs(caps.dst_addr_widths);
190 else
191 dest_width = 1;
192 width = max(src_width, dest_width);
193
194 INIT_LIST_HEAD(&dmaengine_buffer->active);
195 dmaengine_buffer->chan = chan;
196 dmaengine_buffer->align = width;
197 dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
198
199 iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
200 &iio_dmaengine_default_ops);
201 iio_buffer_set_attrs(&dmaengine_buffer->queue.buffer,
202 iio_dmaengine_buffer_attrs);
203
204 dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
205
206 return &dmaengine_buffer->queue.buffer;
207
208err_free:
209 kfree(dmaengine_buffer);
210 return ERR_PTR(ret);
211}
212EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
213
214/**
215 * iio_dmaengine_buffer_free() - Free dmaengine buffer
216 * @buffer: Buffer to free
217 *
218 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
219 */
220void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
221{
222 struct dmaengine_buffer *dmaengine_buffer =
223 iio_buffer_to_dmaengine_buffer(buffer);
224
225 iio_dma_buffer_exit(&dmaengine_buffer->queue);
226 dma_release_channel(dmaengine_buffer->chan);
227
228 iio_buffer_put(buffer);
229}
230EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
231
232static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
233{
234 iio_dmaengine_buffer_free(*(struct iio_buffer **)res);
235}
236
237/**
238 * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
239 * @dev: Parent device for the buffer
240 * @channel: DMA channel name, typically "rx".
241 *
242 * This allocates a new IIO buffer which internally uses the DMAengine framework
243 * to perform its transfers. The parent device will be used to request the DMA
244 * channel.
245 *
246 * The buffer will be automatically de-allocated once the device gets destroyed.
247 */
248struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
249 const char *channel)
250{
251 struct iio_buffer **bufferp, *buffer;
252
253 bufferp = devres_alloc(__devm_iio_dmaengine_buffer_free,
254 sizeof(*bufferp), GFP_KERNEL);
255 if (!bufferp)
256 return ERR_PTR(-ENOMEM);
257
258 buffer = iio_dmaengine_buffer_alloc(dev, channel);
259 if (IS_ERR(buffer)) {
260 devres_free(bufferp);
261 return buffer;
262 }
263
264 *bufferp = buffer;
265 devres_add(dev, bufferp);
266
267 return buffer;
268}
269EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_alloc);
270
271MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
272MODULE_DESCRIPTION("DMA buffer for the IIO framework");
273MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2014-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
5 */
6
7#include <linux/slab.h>
8#include <linux/kernel.h>
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/spinlock.h>
12#include <linux/err.h>
13#include <linux/module.h>
14
15#include <linux/iio/iio.h>
16#include <linux/iio/sysfs.h>
17#include <linux/iio/buffer.h>
18#include <linux/iio/buffer_impl.h>
19#include <linux/iio/buffer-dma.h>
20#include <linux/iio/buffer-dmaengine.h>
21
22/*
23 * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
24 * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
25 * used to manage the buffer memory and implement the IIO buffer operations
26 * while the DMAengine framework is used to perform the DMA transfers. Combined
27 * this results in a device independent fully functional DMA buffer
28 * implementation that can be used by device drivers for peripherals which are
29 * connected to a DMA controller which has a DMAengine driver implementation.
30 */
31
32struct dmaengine_buffer {
33 struct iio_dma_buffer_queue queue;
34
35 struct dma_chan *chan;
36 struct list_head active;
37
38 size_t align;
39 size_t max_size;
40};
41
42static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
43 struct iio_buffer *buffer)
44{
45 return container_of(buffer, struct dmaengine_buffer, queue.buffer);
46}
47
48static void iio_dmaengine_buffer_block_done(void *data,
49 const struct dmaengine_result *result)
50{
51 struct iio_dma_buffer_block *block = data;
52 unsigned long flags;
53
54 spin_lock_irqsave(&block->queue->list_lock, flags);
55 list_del(&block->head);
56 spin_unlock_irqrestore(&block->queue->list_lock, flags);
57 block->bytes_used -= result->residue;
58 iio_dma_buffer_block_done(block);
59}
60
61static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
62 struct iio_dma_buffer_block *block)
63{
64 struct dmaengine_buffer *dmaengine_buffer =
65 iio_buffer_to_dmaengine_buffer(&queue->buffer);
66 struct dma_async_tx_descriptor *desc;
67 enum dma_transfer_direction dma_dir;
68 struct scatterlist *sgl;
69 struct dma_vec *vecs;
70 size_t max_size;
71 dma_cookie_t cookie;
72 size_t len_total;
73 unsigned int i;
74 int nents;
75
76 max_size = min(block->size, dmaengine_buffer->max_size);
77 max_size = round_down(max_size, dmaengine_buffer->align);
78
79 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
80 dma_dir = DMA_DEV_TO_MEM;
81 else
82 dma_dir = DMA_MEM_TO_DEV;
83
84 if (block->sg_table) {
85 sgl = block->sg_table->sgl;
86 nents = sg_nents_for_len(sgl, block->bytes_used);
87 if (nents < 0)
88 return nents;
89
90 vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC);
91 if (!vecs)
92 return -ENOMEM;
93
94 len_total = block->bytes_used;
95
96 for (i = 0; i < nents; i++) {
97 vecs[i].addr = sg_dma_address(sgl);
98 vecs[i].len = min(sg_dma_len(sgl), len_total);
99 len_total -= vecs[i].len;
100
101 sgl = sg_next(sgl);
102 }
103
104 desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan,
105 vecs, nents, dma_dir,
106 DMA_PREP_INTERRUPT);
107 kfree(vecs);
108 } else {
109 max_size = min(block->size, dmaengine_buffer->max_size);
110 max_size = round_down(max_size, dmaengine_buffer->align);
111
112 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
113 block->bytes_used = max_size;
114
115 if (!block->bytes_used || block->bytes_used > max_size)
116 return -EINVAL;
117
118 desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
119 block->phys_addr,
120 block->bytes_used,
121 dma_dir,
122 DMA_PREP_INTERRUPT);
123 }
124 if (!desc)
125 return -ENOMEM;
126
127 desc->callback_result = iio_dmaengine_buffer_block_done;
128 desc->callback_param = block;
129
130 cookie = dmaengine_submit(desc);
131 if (dma_submit_error(cookie))
132 return dma_submit_error(cookie);
133
134 spin_lock_irq(&dmaengine_buffer->queue.list_lock);
135 list_add_tail(&block->head, &dmaengine_buffer->active);
136 spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
137
138 dma_async_issue_pending(dmaengine_buffer->chan);
139
140 return 0;
141}
142
143static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
144{
145 struct dmaengine_buffer *dmaengine_buffer =
146 iio_buffer_to_dmaengine_buffer(&queue->buffer);
147
148 dmaengine_terminate_sync(dmaengine_buffer->chan);
149 iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
150}
151
152static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
153{
154 struct dmaengine_buffer *dmaengine_buffer =
155 iio_buffer_to_dmaengine_buffer(buf);
156
157 iio_dma_buffer_release(&dmaengine_buffer->queue);
158 kfree(dmaengine_buffer);
159}
160
161static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
162 .read = iio_dma_buffer_read,
163 .write = iio_dma_buffer_write,
164 .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
165 .set_length = iio_dma_buffer_set_length,
166 .request_update = iio_dma_buffer_request_update,
167 .enable = iio_dma_buffer_enable,
168 .disable = iio_dma_buffer_disable,
169 .data_available = iio_dma_buffer_usage,
170 .space_available = iio_dma_buffer_usage,
171 .release = iio_dmaengine_buffer_release,
172
173 .enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf,
174 .attach_dmabuf = iio_dma_buffer_attach_dmabuf,
175 .detach_dmabuf = iio_dma_buffer_detach_dmabuf,
176
177 .lock_queue = iio_dma_buffer_lock_queue,
178 .unlock_queue = iio_dma_buffer_unlock_queue,
179
180 .modes = INDIO_BUFFER_HARDWARE,
181 .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
182};
183
184static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
185 .submit = iio_dmaengine_buffer_submit_block,
186 .abort = iio_dmaengine_buffer_abort,
187};
188
189static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
190 struct device_attribute *attr, char *buf)
191{
192 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
193 struct dmaengine_buffer *dmaengine_buffer =
194 iio_buffer_to_dmaengine_buffer(buffer);
195
196 return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align);
197}
198
199static IIO_DEVICE_ATTR(length_align_bytes, 0444,
200 iio_dmaengine_buffer_get_length_align, NULL, 0);
201
202static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
203 &iio_dev_attr_length_align_bytes,
204 NULL,
205};
206
207/**
208 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
209 * @dev: Parent device for the buffer
210 * @channel: DMA channel name, typically "rx".
211 *
212 * This allocates a new IIO buffer which internally uses the DMAengine framework
213 * to perform its transfers. The parent device will be used to request the DMA
214 * channel.
215 *
216 * Once done using the buffer iio_dmaengine_buffer_free() should be used to
217 * release it.
218 */
219static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
220 const char *channel)
221{
222 struct dmaengine_buffer *dmaengine_buffer;
223 unsigned int width, src_width, dest_width;
224 struct dma_slave_caps caps;
225 struct dma_chan *chan;
226 int ret;
227
228 dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
229 if (!dmaengine_buffer)
230 return ERR_PTR(-ENOMEM);
231
232 chan = dma_request_chan(dev, channel);
233 if (IS_ERR(chan)) {
234 ret = PTR_ERR(chan);
235 goto err_free;
236 }
237
238 ret = dma_get_slave_caps(chan, &caps);
239 if (ret < 0)
240 goto err_release;
241
242 /* Needs to be aligned to the maximum of the minimums */
243 if (caps.src_addr_widths)
244 src_width = __ffs(caps.src_addr_widths);
245 else
246 src_width = 1;
247 if (caps.dst_addr_widths)
248 dest_width = __ffs(caps.dst_addr_widths);
249 else
250 dest_width = 1;
251 width = max(src_width, dest_width);
252
253 INIT_LIST_HEAD(&dmaengine_buffer->active);
254 dmaengine_buffer->chan = chan;
255 dmaengine_buffer->align = width;
256 dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
257
258 iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
259 &iio_dmaengine_default_ops);
260
261 dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs;
262 dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
263
264 return &dmaengine_buffer->queue.buffer;
265
266err_release:
267 dma_release_channel(chan);
268err_free:
269 kfree(dmaengine_buffer);
270 return ERR_PTR(ret);
271}
272
273/**
274 * iio_dmaengine_buffer_free() - Free dmaengine buffer
275 * @buffer: Buffer to free
276 *
277 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
278 */
279void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
280{
281 struct dmaengine_buffer *dmaengine_buffer =
282 iio_buffer_to_dmaengine_buffer(buffer);
283
284 iio_dma_buffer_exit(&dmaengine_buffer->queue);
285 dma_release_channel(dmaengine_buffer->chan);
286
287 iio_buffer_put(buffer);
288}
289EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
290
291struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
292 struct iio_dev *indio_dev,
293 const char *channel,
294 enum iio_buffer_direction dir)
295{
296 struct iio_buffer *buffer;
297 int ret;
298
299 buffer = iio_dmaengine_buffer_alloc(dev, channel);
300 if (IS_ERR(buffer))
301 return ERR_CAST(buffer);
302
303 indio_dev->modes |= INDIO_BUFFER_HARDWARE;
304
305 buffer->direction = dir;
306
307 ret = iio_device_attach_buffer(indio_dev, buffer);
308 if (ret) {
309 iio_dmaengine_buffer_free(buffer);
310 return ERR_PTR(ret);
311 }
312
313 return buffer;
314}
315EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
316
317static void __devm_iio_dmaengine_buffer_free(void *buffer)
318{
319 iio_dmaengine_buffer_free(buffer);
320}
321
322/**
323 * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
324 * @dev: Parent device for the buffer
325 * @indio_dev: IIO device to which to attach this buffer.
326 * @channel: DMA channel name, typically "rx".
327 * @dir: Direction of buffer (in or out)
328 *
329 * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
330 * and attaches it to an IIO device with iio_device_attach_buffer().
331 * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
332 * IIO device.
333 */
334int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
335 struct iio_dev *indio_dev,
336 const char *channel,
337 enum iio_buffer_direction dir)
338{
339 struct iio_buffer *buffer;
340
341 buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);
342 if (IS_ERR(buffer))
343 return PTR_ERR(buffer);
344
345 return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
346 buffer);
347}
348EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
349
350MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
351MODULE_DESCRIPTION("DMA buffer for the IIO framework");
352MODULE_LICENSE("GPL");
353MODULE_IMPORT_NS("IIO_DMA_BUFFER");