Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2013-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
5 */
6
7#include <linux/slab.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/workqueue.h>
12#include <linux/mutex.h>
13#include <linux/sched.h>
14#include <linux/poll.h>
15#include <linux/iio/buffer_impl.h>
16#include <linux/iio/buffer-dma.h>
17#include <linux/dma-mapping.h>
18#include <linux/sizes.h>
19
20/*
21 * For DMA buffers the storage is sub-divided into so called blocks. Each block
22 * has its own memory buffer. The size of the block is the granularity at which
23 * memory is exchanged between the hardware and the application. Increasing the
24 * basic unit of data exchange from one sample to one block decreases the
25 * management overhead that is associated with each sample. E.g. if we say the
26 * management overhead for one exchange is x and the unit of exchange is one
27 * sample the overhead will be x for each sample. Whereas when using a block
28 * which contains n samples the overhead per sample is reduced to x/n. This
29 * allows to achieve much higher samplerates than what can be sustained with
30 * the one sample approach.
31 *
32 * Blocks are exchanged between the DMA controller and the application via the
33 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
34 * incoming queue are waiting for the DMA controller to pick them up and fill
35 * them with data. Block on the outgoing queue have been filled with data and
36 * are waiting for the application to dequeue them and read the data.
37 *
38 * A block can be in one of the following states:
39 * * Owned by the application. In this state the application can read data from
40 * the block.
41 * * On the incoming list: Blocks on the incoming list are queued up to be
42 * processed by the DMA controller.
43 * * Owned by the DMA controller: The DMA controller is processing the block
44 * and filling it with data.
45 * * On the outgoing list: Blocks on the outgoing list have been successfully
46 * processed by the DMA controller and contain data. They can be dequeued by
47 * the application.
48 * * Dead: A block that is dead has been marked as to be freed. It might still
49 * be owned by either the application or the DMA controller at the moment.
50 * But once they are done processing it instead of going to either the
51 * incoming or outgoing queue the block will be freed.
52 *
53 * In addition to this blocks are reference counted and the memory associated
54 * with both the block structure as well as the storage memory for the block
55 * will be freed when the last reference to the block is dropped. This means a
56 * block must not be accessed without holding a reference.
57 *
58 * The iio_dma_buffer implementation provides a generic infrastructure for
59 * managing the blocks.
60 *
61 * A driver for a specific piece of hardware that has DMA capabilities need to
62 * implement the submit() callback from the iio_dma_buffer_ops structure. This
63 * callback is supposed to initiate the DMA transfer copying data from the
64 * converter to the memory region of the block. Once the DMA transfer has been
65 * completed the driver must call iio_dma_buffer_block_done() for the completed
66 * block.
67 *
68 * Prior to this it must set the bytes_used field of the block contains
69 * the actual number of bytes in the buffer. Typically this will be equal to the
70 * size of the block, but if the DMA hardware has certain alignment requirements
71 * for the transfer length it might choose to use less than the full size. In
72 * either case it is expected that bytes_used is a multiple of the bytes per
73 * datum, i.e. the block must not contain partial samples.
74 *
75 * The driver must call iio_dma_buffer_block_done() for each block it has
76 * received through its submit_block() callback, even if it does not actually
77 * perform a DMA transfer for the block, e.g. because the buffer was disabled
78 * before the block transfer was started. In this case it should set bytes_used
79 * to 0.
80 *
81 * In addition it is recommended that a driver implements the abort() callback.
82 * It will be called when the buffer is disabled and can be used to cancel
83 * pending and stop active transfers.
84 *
85 * The specific driver implementation should use the default callback
86 * implementations provided by this module for the iio_buffer_access_funcs
87 * struct. It may overload some callbacks with custom variants if the hardware
88 * has special requirements that are not handled by the generic functions. If a
89 * driver chooses to overload a callback it has to ensure that the generic
90 * callback is called from within the custom callback.
91 */
92
93static void iio_buffer_block_release(struct kref *kref)
94{
95 struct iio_dma_buffer_block *block = container_of(kref,
96 struct iio_dma_buffer_block, kref);
97
98 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
99
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
101 block->vaddr, block->phys_addr);
102
103 iio_buffer_put(&block->queue->buffer);
104 kfree(block);
105}
106
107static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
108{
109 kref_get(&block->kref);
110}
111
112static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
113{
114 kref_put(&block->kref, iio_buffer_block_release);
115}
116
117/*
118 * dma_free_coherent can sleep, hence we need to take some special care to be
119 * able to drop a reference from an atomic context.
120 */
121static LIST_HEAD(iio_dma_buffer_dead_blocks);
122static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
123
124static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
125{
126 struct iio_dma_buffer_block *block, *_block;
127 LIST_HEAD(block_list);
128
129 spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
130 list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
131 spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
132
133 list_for_each_entry_safe(block, _block, &block_list, head)
134 iio_buffer_block_release(&block->kref);
135}
136static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
137
138static void iio_buffer_block_release_atomic(struct kref *kref)
139{
140 struct iio_dma_buffer_block *block;
141 unsigned long flags;
142
143 block = container_of(kref, struct iio_dma_buffer_block, kref);
144
145 spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
146 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
147 spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
148
149 schedule_work(&iio_dma_buffer_cleanup_work);
150}
151
152/*
153 * Version of iio_buffer_block_put() that can be called from atomic context
154 */
155static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
156{
157 kref_put(&block->kref, iio_buffer_block_release_atomic);
158}
159
160static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
161{
162 return container_of(buf, struct iio_dma_buffer_queue, buffer);
163}
164
165static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
166 struct iio_dma_buffer_queue *queue, size_t size)
167{
168 struct iio_dma_buffer_block *block;
169
170 block = kzalloc(sizeof(*block), GFP_KERNEL);
171 if (!block)
172 return NULL;
173
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
175 &block->phys_addr, GFP_KERNEL);
176 if (!block->vaddr) {
177 kfree(block);
178 return NULL;
179 }
180
181 block->size = size;
182 block->state = IIO_BLOCK_STATE_DEQUEUED;
183 block->queue = queue;
184 INIT_LIST_HEAD(&block->head);
185 kref_init(&block->kref);
186
187 iio_buffer_get(&queue->buffer);
188
189 return block;
190}
191
192static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
193{
194 struct iio_dma_buffer_queue *queue = block->queue;
195
196 /*
197 * The buffer has already been freed by the application, just drop the
198 * reference.
199 */
200 if (block->state != IIO_BLOCK_STATE_DEAD) {
201 block->state = IIO_BLOCK_STATE_DONE;
202 list_add_tail(&block->head, &queue->outgoing);
203 }
204}
205
206/**
207 * iio_dma_buffer_block_done() - Indicate that a block has been completed
208 * @block: The completed block
209 *
210 * Should be called when the DMA controller has finished handling the block to
211 * pass back ownership of the block to the queue.
212 */
213void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
214{
215 struct iio_dma_buffer_queue *queue = block->queue;
216 unsigned long flags;
217
218 spin_lock_irqsave(&queue->list_lock, flags);
219 _iio_dma_buffer_block_done(block);
220 spin_unlock_irqrestore(&queue->list_lock, flags);
221
222 iio_buffer_block_put_atomic(block);
223 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
224}
225EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
226
227/**
228 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
229 * aborted
230 * @queue: Queue for which to complete blocks.
231 * @list: List of aborted blocks. All blocks in this list must be from @queue.
232 *
233 * Typically called from the abort() callback after the DMA controller has been
234 * stopped. This will set bytes_used to 0 for each block in the list and then
235 * hand the blocks back to the queue.
236 */
237void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
238 struct list_head *list)
239{
240 struct iio_dma_buffer_block *block, *_block;
241 unsigned long flags;
242
243 spin_lock_irqsave(&queue->list_lock, flags);
244 list_for_each_entry_safe(block, _block, list, head) {
245 list_del(&block->head);
246 block->bytes_used = 0;
247 _iio_dma_buffer_block_done(block);
248 iio_buffer_block_put_atomic(block);
249 }
250 spin_unlock_irqrestore(&queue->list_lock, flags);
251
252 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
253}
254EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
255
256static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
257{
258 /*
259 * If the core owns the block it can be re-used. This should be the
260 * default case when enabling the buffer, unless the DMA controller does
261 * not support abort and has not given back the block yet.
262 */
263 switch (block->state) {
264 case IIO_BLOCK_STATE_DEQUEUED:
265 case IIO_BLOCK_STATE_QUEUED:
266 case IIO_BLOCK_STATE_DONE:
267 return true;
268 default:
269 return false;
270 }
271}
272
273/**
274 * iio_dma_buffer_request_update() - DMA buffer request_update callback
275 * @buffer: The buffer which to request an update
276 *
277 * Should be used as the iio_dma_buffer_request_update() callback for
278 * iio_buffer_access_ops struct for DMA buffers.
279 */
280int iio_dma_buffer_request_update(struct iio_buffer *buffer)
281{
282 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
283 struct iio_dma_buffer_block *block;
284 bool try_reuse = false;
285 size_t size;
286 int ret = 0;
287 int i;
288
289 /*
290 * Split the buffer into two even parts. This is used as a double
291 * buffering scheme with usually one block at a time being used by the
292 * DMA and the other one by the application.
293 */
294 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
295 queue->buffer.length, 2);
296
297 mutex_lock(&queue->lock);
298
299 /* Allocations are page aligned */
300 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
301 try_reuse = true;
302
303 queue->fileio.block_size = size;
304 queue->fileio.active_block = NULL;
305
306 spin_lock_irq(&queue->list_lock);
307 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
308 block = queue->fileio.blocks[i];
309
310 /* If we can't re-use it free it */
311 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
312 block->state = IIO_BLOCK_STATE_DEAD;
313 }
314
315 /*
316 * At this point all blocks are either owned by the core or marked as
317 * dead. This means we can reset the lists without having to fear
318 * corrution.
319 */
320 INIT_LIST_HEAD(&queue->outgoing);
321 spin_unlock_irq(&queue->list_lock);
322
323 INIT_LIST_HEAD(&queue->incoming);
324
325 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
326 if (queue->fileio.blocks[i]) {
327 block = queue->fileio.blocks[i];
328 if (block->state == IIO_BLOCK_STATE_DEAD) {
329 /* Could not reuse it */
330 iio_buffer_block_put(block);
331 block = NULL;
332 } else {
333 block->size = size;
334 }
335 } else {
336 block = NULL;
337 }
338
339 if (!block) {
340 block = iio_dma_buffer_alloc_block(queue, size);
341 if (!block) {
342 ret = -ENOMEM;
343 goto out_unlock;
344 }
345 queue->fileio.blocks[i] = block;
346 }
347
348 block->state = IIO_BLOCK_STATE_QUEUED;
349 list_add_tail(&block->head, &queue->incoming);
350 }
351
352out_unlock:
353 mutex_unlock(&queue->lock);
354
355 return ret;
356}
357EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
358
359static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
360 struct iio_dma_buffer_block *block)
361{
362 int ret;
363
364 /*
365 * If the hardware has already been removed we put the block into
366 * limbo. It will neither be on the incoming nor outgoing list, nor will
367 * it ever complete. It will just wait to be freed eventually.
368 */
369 if (!queue->ops)
370 return;
371
372 block->state = IIO_BLOCK_STATE_ACTIVE;
373 iio_buffer_block_get(block);
374 ret = queue->ops->submit(queue, block);
375 if (ret) {
376 /*
377 * This is a bit of a problem and there is not much we can do
378 * other then wait for the buffer to be disabled and re-enabled
379 * and try again. But it should not really happen unless we run
380 * out of memory or something similar.
381 *
382 * TODO: Implement support in the IIO core to allow buffers to
383 * notify consumers that something went wrong and the buffer
384 * should be disabled.
385 */
386 iio_buffer_block_put(block);
387 }
388}
389
390/**
391 * iio_dma_buffer_enable() - Enable DMA buffer
392 * @buffer: IIO buffer to enable
393 * @indio_dev: IIO device the buffer is attached to
394 *
395 * Needs to be called when the device that the buffer is attached to starts
396 * sampling. Typically should be the iio_buffer_access_ops enable callback.
397 *
398 * This will allocate the DMA buffers and start the DMA transfers.
399 */
400int iio_dma_buffer_enable(struct iio_buffer *buffer,
401 struct iio_dev *indio_dev)
402{
403 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
404 struct iio_dma_buffer_block *block, *_block;
405
406 mutex_lock(&queue->lock);
407 queue->active = true;
408 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
409 list_del(&block->head);
410 iio_dma_buffer_submit_block(queue, block);
411 }
412 mutex_unlock(&queue->lock);
413
414 return 0;
415}
416EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
417
418/**
419 * iio_dma_buffer_disable() - Disable DMA buffer
420 * @buffer: IIO DMA buffer to disable
421 * @indio_dev: IIO device the buffer is attached to
422 *
423 * Needs to be called when the device that the buffer is attached to stops
424 * sampling. Typically should be the iio_buffer_access_ops disable callback.
425 */
426int iio_dma_buffer_disable(struct iio_buffer *buffer,
427 struct iio_dev *indio_dev)
428{
429 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
430
431 mutex_lock(&queue->lock);
432 queue->active = false;
433
434 if (queue->ops && queue->ops->abort)
435 queue->ops->abort(queue);
436 mutex_unlock(&queue->lock);
437
438 return 0;
439}
440EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
441
442static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
443 struct iio_dma_buffer_block *block)
444{
445 if (block->state == IIO_BLOCK_STATE_DEAD) {
446 iio_buffer_block_put(block);
447 } else if (queue->active) {
448 iio_dma_buffer_submit_block(queue, block);
449 } else {
450 block->state = IIO_BLOCK_STATE_QUEUED;
451 list_add_tail(&block->head, &queue->incoming);
452 }
453}
454
455static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
456 struct iio_dma_buffer_queue *queue)
457{
458 struct iio_dma_buffer_block *block;
459
460 spin_lock_irq(&queue->list_lock);
461 block = list_first_entry_or_null(&queue->outgoing, struct
462 iio_dma_buffer_block, head);
463 if (block != NULL) {
464 list_del(&block->head);
465 block->state = IIO_BLOCK_STATE_DEQUEUED;
466 }
467 spin_unlock_irq(&queue->list_lock);
468
469 return block;
470}
471
472/**
473 * iio_dma_buffer_read() - DMA buffer read callback
474 * @buffer: Buffer to read form
475 * @n: Number of bytes to read
476 * @user_buffer: Userspace buffer to copy the data to
477 *
478 * Should be used as the read callback for iio_buffer_access_ops
479 * struct for DMA buffers.
480 */
481int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
482 char __user *user_buffer)
483{
484 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
485 struct iio_dma_buffer_block *block;
486 int ret;
487
488 if (n < buffer->bytes_per_datum)
489 return -EINVAL;
490
491 mutex_lock(&queue->lock);
492
493 if (!queue->fileio.active_block) {
494 block = iio_dma_buffer_dequeue(queue);
495 if (block == NULL) {
496 ret = 0;
497 goto out_unlock;
498 }
499 queue->fileio.pos = 0;
500 queue->fileio.active_block = block;
501 } else {
502 block = queue->fileio.active_block;
503 }
504
505 n = rounddown(n, buffer->bytes_per_datum);
506 if (n > block->bytes_used - queue->fileio.pos)
507 n = block->bytes_used - queue->fileio.pos;
508
509 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
510 ret = -EFAULT;
511 goto out_unlock;
512 }
513
514 queue->fileio.pos += n;
515
516 if (queue->fileio.pos == block->bytes_used) {
517 queue->fileio.active_block = NULL;
518 iio_dma_buffer_enqueue(queue, block);
519 }
520
521 ret = n;
522
523out_unlock:
524 mutex_unlock(&queue->lock);
525
526 return ret;
527}
528EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
529
530/**
531 * iio_dma_buffer_data_available() - DMA buffer data_available callback
532 * @buf: Buffer to check for data availability
533 *
534 * Should be used as the data_available callback for iio_buffer_access_ops
535 * struct for DMA buffers.
536 */
537size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
538{
539 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
540 struct iio_dma_buffer_block *block;
541 size_t data_available = 0;
542
543 /*
544 * For counting the available bytes we'll use the size of the block not
545 * the number of actual bytes available in the block. Otherwise it is
546 * possible that we end up with a value that is lower than the watermark
547 * but won't increase since all blocks are in use.
548 */
549
550 mutex_lock(&queue->lock);
551 if (queue->fileio.active_block)
552 data_available += queue->fileio.active_block->size;
553
554 spin_lock_irq(&queue->list_lock);
555 list_for_each_entry(block, &queue->outgoing, head)
556 data_available += block->size;
557 spin_unlock_irq(&queue->list_lock);
558 mutex_unlock(&queue->lock);
559
560 return data_available;
561}
562EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
563
564/**
565 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
566 * @buffer: Buffer to set the bytes-per-datum for
567 * @bpd: The new bytes-per-datum value
568 *
569 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
570 * struct for DMA buffers.
571 */
572int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
573{
574 buffer->bytes_per_datum = bpd;
575
576 return 0;
577}
578EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
579
580/**
581 * iio_dma_buffer_set_length - DMA buffer set_length callback
582 * @buffer: Buffer to set the length for
583 * @length: The new buffer length
584 *
585 * Should be used as the set_length callback for iio_buffer_access_ops
586 * struct for DMA buffers.
587 */
588int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
589{
590 /* Avoid an invalid state */
591 if (length < 2)
592 length = 2;
593 buffer->length = length;
594 buffer->watermark = length / 2;
595
596 return 0;
597}
598EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
599
600/**
601 * iio_dma_buffer_init() - Initialize DMA buffer queue
602 * @queue: Buffer to initialize
603 * @dev: DMA device
604 * @ops: DMA buffer queue callback operations
605 *
606 * The DMA device will be used by the queue to do DMA memory allocations. So it
607 * should refer to the device that will perform the DMA to ensure that
608 * allocations are done from a memory region that can be accessed by the device.
609 */
610int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
611 struct device *dev, const struct iio_dma_buffer_ops *ops)
612{
613 iio_buffer_init(&queue->buffer);
614 queue->buffer.length = PAGE_SIZE;
615 queue->buffer.watermark = queue->buffer.length / 2;
616 queue->dev = dev;
617 queue->ops = ops;
618
619 INIT_LIST_HEAD(&queue->incoming);
620 INIT_LIST_HEAD(&queue->outgoing);
621
622 mutex_init(&queue->lock);
623 spin_lock_init(&queue->list_lock);
624
625 return 0;
626}
627EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
628
629/**
630 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
631 * @queue: Buffer to cleanup
632 *
633 * After this function has completed it is safe to free any resources that are
634 * associated with the buffer and are accessed inside the callback operations.
635 */
636void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
637{
638 unsigned int i;
639
640 mutex_lock(&queue->lock);
641
642 spin_lock_irq(&queue->list_lock);
643 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
644 if (!queue->fileio.blocks[i])
645 continue;
646 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
647 }
648 INIT_LIST_HEAD(&queue->outgoing);
649 spin_unlock_irq(&queue->list_lock);
650
651 INIT_LIST_HEAD(&queue->incoming);
652
653 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
654 if (!queue->fileio.blocks[i])
655 continue;
656 iio_buffer_block_put(queue->fileio.blocks[i]);
657 queue->fileio.blocks[i] = NULL;
658 }
659 queue->fileio.active_block = NULL;
660 queue->ops = NULL;
661
662 mutex_unlock(&queue->lock);
663}
664EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
665
666/**
667 * iio_dma_buffer_release() - Release final buffer resources
668 * @queue: Buffer to release
669 *
670 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
671 * called in the buffers release callback implementation right before freeing
672 * the memory associated with the buffer.
673 */
674void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
675{
676 mutex_destroy(&queue->lock);
677}
678EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
679
680MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
681MODULE_DESCRIPTION("DMA buffer for the IIO framework");
682MODULE_LICENSE("GPL v2");
1/*
2 * Copyright 2013-2015 Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de>
4 *
5 * Licensed under the GPL-2.
6 */
7
8#include <linux/slab.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/workqueue.h>
13#include <linux/mutex.h>
14#include <linux/sched.h>
15#include <linux/poll.h>
16#include <linux/iio/buffer.h>
17#include <linux/iio/buffer-dma.h>
18#include <linux/dma-mapping.h>
19#include <linux/sizes.h>
20
21/*
22 * For DMA buffers the storage is sub-divided into so called blocks. Each block
23 * has its own memory buffer. The size of the block is the granularity at which
24 * memory is exchanged between the hardware and the application. Increasing the
25 * basic unit of data exchange from one sample to one block decreases the
26 * management overhead that is associated with each sample. E.g. if we say the
27 * management overhead for one exchange is x and the unit of exchange is one
28 * sample the overhead will be x for each sample. Whereas when using a block
29 * which contains n samples the overhead per sample is reduced to x/n. This
30 * allows to achieve much higher samplerates than what can be sustained with
31 * the one sample approach.
32 *
33 * Blocks are exchanged between the DMA controller and the application via the
34 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
35 * incoming queue are waiting for the DMA controller to pick them up and fill
36 * them with data. Block on the outgoing queue have been filled with data and
37 * are waiting for the application to dequeue them and read the data.
38 *
39 * A block can be in one of the following states:
40 * * Owned by the application. In this state the application can read data from
41 * the block.
42 * * On the incoming list: Blocks on the incoming list are queued up to be
43 * processed by the DMA controller.
44 * * Owned by the DMA controller: The DMA controller is processing the block
45 * and filling it with data.
46 * * On the outgoing list: Blocks on the outgoing list have been successfully
47 * processed by the DMA controller and contain data. They can be dequeued by
48 * the application.
49 * * Dead: A block that is dead has been marked as to be freed. It might still
50 * be owned by either the application or the DMA controller at the moment.
51 * But once they are done processing it instead of going to either the
52 * incoming or outgoing queue the block will be freed.
53 *
54 * In addition to this blocks are reference counted and the memory associated
55 * with both the block structure as well as the storage memory for the block
56 * will be freed when the last reference to the block is dropped. This means a
57 * block must not be accessed without holding a reference.
58 *
59 * The iio_dma_buffer implementation provides a generic infrastructure for
60 * managing the blocks.
61 *
62 * A driver for a specific piece of hardware that has DMA capabilities need to
63 * implement the submit() callback from the iio_dma_buffer_ops structure. This
64 * callback is supposed to initiate the DMA transfer copying data from the
65 * converter to the memory region of the block. Once the DMA transfer has been
66 * completed the driver must call iio_dma_buffer_block_done() for the completed
67 * block.
68 *
69 * Prior to this it must set the bytes_used field of the block contains
70 * the actual number of bytes in the buffer. Typically this will be equal to the
71 * size of the block, but if the DMA hardware has certain alignment requirements
72 * for the transfer length it might choose to use less than the full size. In
73 * either case it is expected that bytes_used is a multiple of the bytes per
74 * datum, i.e. the block must not contain partial samples.
75 *
76 * The driver must call iio_dma_buffer_block_done() for each block it has
77 * received through its submit_block() callback, even if it does not actually
78 * perform a DMA transfer for the block, e.g. because the buffer was disabled
79 * before the block transfer was started. In this case it should set bytes_used
80 * to 0.
81 *
82 * In addition it is recommended that a driver implements the abort() callback.
83 * It will be called when the buffer is disabled and can be used to cancel
84 * pending and stop active transfers.
85 *
86 * The specific driver implementation should use the default callback
87 * implementations provided by this module for the iio_buffer_access_funcs
88 * struct. It may overload some callbacks with custom variants if the hardware
89 * has special requirements that are not handled by the generic functions. If a
90 * driver chooses to overload a callback it has to ensure that the generic
91 * callback is called from within the custom callback.
92 */
93
94static void iio_buffer_block_release(struct kref *kref)
95{
96 struct iio_dma_buffer_block *block = container_of(kref,
97 struct iio_dma_buffer_block, kref);
98
99 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
100
101 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
102 block->vaddr, block->phys_addr);
103
104 iio_buffer_put(&block->queue->buffer);
105 kfree(block);
106}
107
108static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
109{
110 kref_get(&block->kref);
111}
112
113static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
114{
115 kref_put(&block->kref, iio_buffer_block_release);
116}
117
118/*
119 * dma_free_coherent can sleep, hence we need to take some special care to be
120 * able to drop a reference from an atomic context.
121 */
122static LIST_HEAD(iio_dma_buffer_dead_blocks);
123static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
124
125static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
126{
127 struct iio_dma_buffer_block *block, *_block;
128 LIST_HEAD(block_list);
129
130 spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
131 list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
132 spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
133
134 list_for_each_entry_safe(block, _block, &block_list, head)
135 iio_buffer_block_release(&block->kref);
136}
137static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
138
139static void iio_buffer_block_release_atomic(struct kref *kref)
140{
141 struct iio_dma_buffer_block *block;
142 unsigned long flags;
143
144 block = container_of(kref, struct iio_dma_buffer_block, kref);
145
146 spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
147 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
148 spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
149
150 schedule_work(&iio_dma_buffer_cleanup_work);
151}
152
153/*
154 * Version of iio_buffer_block_put() that can be called from atomic context
155 */
156static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
157{
158 kref_put(&block->kref, iio_buffer_block_release_atomic);
159}
160
161static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
162{
163 return container_of(buf, struct iio_dma_buffer_queue, buffer);
164}
165
166static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
167 struct iio_dma_buffer_queue *queue, size_t size)
168{
169 struct iio_dma_buffer_block *block;
170
171 block = kzalloc(sizeof(*block), GFP_KERNEL);
172 if (!block)
173 return NULL;
174
175 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
176 &block->phys_addr, GFP_KERNEL);
177 if (!block->vaddr) {
178 kfree(block);
179 return NULL;
180 }
181
182 block->size = size;
183 block->state = IIO_BLOCK_STATE_DEQUEUED;
184 block->queue = queue;
185 INIT_LIST_HEAD(&block->head);
186 kref_init(&block->kref);
187
188 iio_buffer_get(&queue->buffer);
189
190 return block;
191}
192
193static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
194{
195 struct iio_dma_buffer_queue *queue = block->queue;
196
197 /*
198 * The buffer has already been freed by the application, just drop the
199 * reference.
200 */
201 if (block->state != IIO_BLOCK_STATE_DEAD) {
202 block->state = IIO_BLOCK_STATE_DONE;
203 list_add_tail(&block->head, &queue->outgoing);
204 }
205}
206
207/**
208 * iio_dma_buffer_block_done() - Indicate that a block has been completed
209 * @block: The completed block
210 *
211 * Should be called when the DMA controller has finished handling the block to
212 * pass back ownership of the block to the queue.
213 */
214void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
215{
216 struct iio_dma_buffer_queue *queue = block->queue;
217 unsigned long flags;
218
219 spin_lock_irqsave(&queue->list_lock, flags);
220 _iio_dma_buffer_block_done(block);
221 spin_unlock_irqrestore(&queue->list_lock, flags);
222
223 iio_buffer_block_put_atomic(block);
224 wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM);
225}
226EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
227
228/**
229 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
230 * aborted
231 * @queue: Queue for which to complete blocks.
232 * @list: List of aborted blocks. All blocks in this list must be from @queue.
233 *
234 * Typically called from the abort() callback after the DMA controller has been
235 * stopped. This will set bytes_used to 0 for each block in the list and then
236 * hand the blocks back to the queue.
237 */
238void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
239 struct list_head *list)
240{
241 struct iio_dma_buffer_block *block, *_block;
242 unsigned long flags;
243
244 spin_lock_irqsave(&queue->list_lock, flags);
245 list_for_each_entry_safe(block, _block, list, head) {
246 list_del(&block->head);
247 block->bytes_used = 0;
248 _iio_dma_buffer_block_done(block);
249 iio_buffer_block_put_atomic(block);
250 }
251 spin_unlock_irqrestore(&queue->list_lock, flags);
252
253 wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM);
254}
255EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
256
257static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
258{
259 /*
260 * If the core owns the block it can be re-used. This should be the
261 * default case when enabling the buffer, unless the DMA controller does
262 * not support abort and has not given back the block yet.
263 */
264 switch (block->state) {
265 case IIO_BLOCK_STATE_DEQUEUED:
266 case IIO_BLOCK_STATE_QUEUED:
267 case IIO_BLOCK_STATE_DONE:
268 return true;
269 default:
270 return false;
271 }
272}
273
274/**
275 * iio_dma_buffer_request_update() - DMA buffer request_update callback
276 * @buffer: The buffer which to request an update
277 *
278 * Should be used as the iio_dma_buffer_request_update() callback for
279 * iio_buffer_access_ops struct for DMA buffers.
280 */
281int iio_dma_buffer_request_update(struct iio_buffer *buffer)
282{
283 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
284 struct iio_dma_buffer_block *block;
285 bool try_reuse = false;
286 size_t size;
287 int ret = 0;
288 int i;
289
290 /*
291 * Split the buffer into two even parts. This is used as a double
292 * buffering scheme with usually one block at a time being used by the
293 * DMA and the other one by the application.
294 */
295 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
296 queue->buffer.length, 2);
297
298 mutex_lock(&queue->lock);
299
300 /* Allocations are page aligned */
301 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
302 try_reuse = true;
303
304 queue->fileio.block_size = size;
305 queue->fileio.active_block = NULL;
306
307 spin_lock_irq(&queue->list_lock);
308 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
309 block = queue->fileio.blocks[i];
310
311 /* If we can't re-use it free it */
312 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
313 block->state = IIO_BLOCK_STATE_DEAD;
314 }
315
316 /*
317 * At this point all blocks are either owned by the core or marked as
318 * dead. This means we can reset the lists without having to fear
319 * corrution.
320 */
321 INIT_LIST_HEAD(&queue->outgoing);
322 spin_unlock_irq(&queue->list_lock);
323
324 INIT_LIST_HEAD(&queue->incoming);
325
326 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
327 if (queue->fileio.blocks[i]) {
328 block = queue->fileio.blocks[i];
329 if (block->state == IIO_BLOCK_STATE_DEAD) {
330 /* Could not reuse it */
331 iio_buffer_block_put(block);
332 block = NULL;
333 } else {
334 block->size = size;
335 }
336 } else {
337 block = NULL;
338 }
339
340 if (!block) {
341 block = iio_dma_buffer_alloc_block(queue, size);
342 if (!block) {
343 ret = -ENOMEM;
344 goto out_unlock;
345 }
346 queue->fileio.blocks[i] = block;
347 }
348
349 block->state = IIO_BLOCK_STATE_QUEUED;
350 list_add_tail(&block->head, &queue->incoming);
351 }
352
353out_unlock:
354 mutex_unlock(&queue->lock);
355
356 return ret;
357}
358EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
359
360static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
361 struct iio_dma_buffer_block *block)
362{
363 int ret;
364
365 /*
366 * If the hardware has already been removed we put the block into
367 * limbo. It will neither be on the incoming nor outgoing list, nor will
368 * it ever complete. It will just wait to be freed eventually.
369 */
370 if (!queue->ops)
371 return;
372
373 block->state = IIO_BLOCK_STATE_ACTIVE;
374 iio_buffer_block_get(block);
375 ret = queue->ops->submit(queue, block);
376 if (ret) {
377 /*
378 * This is a bit of a problem and there is not much we can do
379 * other then wait for the buffer to be disabled and re-enabled
380 * and try again. But it should not really happen unless we run
381 * out of memory or something similar.
382 *
383 * TODO: Implement support in the IIO core to allow buffers to
384 * notify consumers that something went wrong and the buffer
385 * should be disabled.
386 */
387 iio_buffer_block_put(block);
388 }
389}
390
391/**
392 * iio_dma_buffer_enable() - Enable DMA buffer
393 * @buffer: IIO buffer to enable
394 * @indio_dev: IIO device the buffer is attached to
395 *
396 * Needs to be called when the device that the buffer is attached to starts
397 * sampling. Typically should be the iio_buffer_access_ops enable callback.
398 *
399 * This will allocate the DMA buffers and start the DMA transfers.
400 */
401int iio_dma_buffer_enable(struct iio_buffer *buffer,
402 struct iio_dev *indio_dev)
403{
404 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
405 struct iio_dma_buffer_block *block, *_block;
406
407 mutex_lock(&queue->lock);
408 queue->active = true;
409 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
410 list_del(&block->head);
411 iio_dma_buffer_submit_block(queue, block);
412 }
413 mutex_unlock(&queue->lock);
414
415 return 0;
416}
417EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
418
419/**
420 * iio_dma_buffer_disable() - Disable DMA buffer
421 * @buffer: IIO DMA buffer to disable
422 * @indio_dev: IIO device the buffer is attached to
423 *
424 * Needs to be called when the device that the buffer is attached to stops
425 * sampling. Typically should be the iio_buffer_access_ops disable callback.
426 */
427int iio_dma_buffer_disable(struct iio_buffer *buffer,
428 struct iio_dev *indio_dev)
429{
430 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
431
432 mutex_lock(&queue->lock);
433 queue->active = false;
434
435 if (queue->ops && queue->ops->abort)
436 queue->ops->abort(queue);
437 mutex_unlock(&queue->lock);
438
439 return 0;
440}
441EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
442
443static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
444 struct iio_dma_buffer_block *block)
445{
446 if (block->state == IIO_BLOCK_STATE_DEAD) {
447 iio_buffer_block_put(block);
448 } else if (queue->active) {
449 iio_dma_buffer_submit_block(queue, block);
450 } else {
451 block->state = IIO_BLOCK_STATE_QUEUED;
452 list_add_tail(&block->head, &queue->incoming);
453 }
454}
455
456static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
457 struct iio_dma_buffer_queue *queue)
458{
459 struct iio_dma_buffer_block *block;
460
461 spin_lock_irq(&queue->list_lock);
462 block = list_first_entry_or_null(&queue->outgoing, struct
463 iio_dma_buffer_block, head);
464 if (block != NULL) {
465 list_del(&block->head);
466 block->state = IIO_BLOCK_STATE_DEQUEUED;
467 }
468 spin_unlock_irq(&queue->list_lock);
469
470 return block;
471}
472
473/**
474 * iio_dma_buffer_read() - DMA buffer read callback
475 * @buffer: Buffer to read form
476 * @n: Number of bytes to read
477 * @user_buffer: Userspace buffer to copy the data to
478 *
479 * Should be used as the read_first_n callback for iio_buffer_access_ops
480 * struct for DMA buffers.
481 */
482int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
483 char __user *user_buffer)
484{
485 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
486 struct iio_dma_buffer_block *block;
487 int ret;
488
489 if (n < buffer->bytes_per_datum)
490 return -EINVAL;
491
492 mutex_lock(&queue->lock);
493
494 if (!queue->fileio.active_block) {
495 block = iio_dma_buffer_dequeue(queue);
496 if (block == NULL) {
497 ret = 0;
498 goto out_unlock;
499 }
500 queue->fileio.pos = 0;
501 queue->fileio.active_block = block;
502 } else {
503 block = queue->fileio.active_block;
504 }
505
506 n = rounddown(n, buffer->bytes_per_datum);
507 if (n > block->bytes_used - queue->fileio.pos)
508 n = block->bytes_used - queue->fileio.pos;
509
510 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
511 ret = -EFAULT;
512 goto out_unlock;
513 }
514
515 queue->fileio.pos += n;
516
517 if (queue->fileio.pos == block->bytes_used) {
518 queue->fileio.active_block = NULL;
519 iio_dma_buffer_enqueue(queue, block);
520 }
521
522 ret = n;
523
524out_unlock:
525 mutex_unlock(&queue->lock);
526
527 return ret;
528}
529EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
530
531/**
532 * iio_dma_buffer_data_available() - DMA buffer data_available callback
533 * @buf: Buffer to check for data availability
534 *
535 * Should be used as the data_available callback for iio_buffer_access_ops
536 * struct for DMA buffers.
537 */
538size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
539{
540 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
541 struct iio_dma_buffer_block *block;
542 size_t data_available = 0;
543
544 /*
545 * For counting the available bytes we'll use the size of the block not
546 * the number of actual bytes available in the block. Otherwise it is
547 * possible that we end up with a value that is lower than the watermark
548 * but won't increase since all blocks are in use.
549 */
550
551 mutex_lock(&queue->lock);
552 if (queue->fileio.active_block)
553 data_available += queue->fileio.active_block->size;
554
555 spin_lock_irq(&queue->list_lock);
556 list_for_each_entry(block, &queue->outgoing, head)
557 data_available += block->size;
558 spin_unlock_irq(&queue->list_lock);
559 mutex_unlock(&queue->lock);
560
561 return data_available;
562}
563EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
564
565/**
566 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
567 * @buffer: Buffer to set the bytes-per-datum for
568 * @bpd: The new bytes-per-datum value
569 *
570 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
571 * struct for DMA buffers.
572 */
573int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
574{
575 buffer->bytes_per_datum = bpd;
576
577 return 0;
578}
579EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
580
581/**
582 * iio_dma_buffer_set_length - DMA buffer set_length callback
583 * @buffer: Buffer to set the length for
584 * @length: The new buffer length
585 *
586 * Should be used as the set_length callback for iio_buffer_access_ops
587 * struct for DMA buffers.
588 */
589int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length)
590{
591 /* Avoid an invalid state */
592 if (length < 2)
593 length = 2;
594 buffer->length = length;
595 buffer->watermark = length / 2;
596
597 return 0;
598}
599EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
600
601/**
602 * iio_dma_buffer_init() - Initialize DMA buffer queue
603 * @queue: Buffer to initialize
604 * @dev: DMA device
605 * @ops: DMA buffer queue callback operations
606 *
607 * The DMA device will be used by the queue to do DMA memory allocations. So it
608 * should refer to the device that will perform the DMA to ensure that
609 * allocations are done from a memory region that can be accessed by the device.
610 */
611int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
612 struct device *dev, const struct iio_dma_buffer_ops *ops)
613{
614 iio_buffer_init(&queue->buffer);
615 queue->buffer.length = PAGE_SIZE;
616 queue->buffer.watermark = queue->buffer.length / 2;
617 queue->dev = dev;
618 queue->ops = ops;
619
620 INIT_LIST_HEAD(&queue->incoming);
621 INIT_LIST_HEAD(&queue->outgoing);
622
623 mutex_init(&queue->lock);
624 spin_lock_init(&queue->list_lock);
625
626 return 0;
627}
628EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
629
630/**
631 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
632 * @queue: Buffer to cleanup
633 *
634 * After this function has completed it is safe to free any resources that are
635 * associated with the buffer and are accessed inside the callback operations.
636 */
637void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
638{
639 unsigned int i;
640
641 mutex_lock(&queue->lock);
642
643 spin_lock_irq(&queue->list_lock);
644 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
645 if (!queue->fileio.blocks[i])
646 continue;
647 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
648 }
649 INIT_LIST_HEAD(&queue->outgoing);
650 spin_unlock_irq(&queue->list_lock);
651
652 INIT_LIST_HEAD(&queue->incoming);
653
654 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
655 if (!queue->fileio.blocks[i])
656 continue;
657 iio_buffer_block_put(queue->fileio.blocks[i]);
658 queue->fileio.blocks[i] = NULL;
659 }
660 queue->fileio.active_block = NULL;
661 queue->ops = NULL;
662
663 mutex_unlock(&queue->lock);
664}
665EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
666
667/**
668 * iio_dma_buffer_release() - Release final buffer resources
669 * @queue: Buffer to release
670 *
671 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
672 * called in the buffers release callback implementation right before freeing
673 * the memory associated with the buffer.
674 */
675void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
676{
677 mutex_destroy(&queue->lock);
678}
679EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
680
681MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
682MODULE_DESCRIPTION("DMA buffer for the IIO framework");
683MODULE_LICENSE("GPL v2");