Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright 2013-2015 Analog Devices Inc.
  4 *  Author: Lars-Peter Clausen <lars@metafoo.de>
  5 */
  6
  7#include <linux/atomic.h>
  8#include <linux/cleanup.h>
  9#include <linux/slab.h>
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/device.h>
 13#include <linux/workqueue.h>
 14#include <linux/mutex.h>
 15#include <linux/sched.h>
 16#include <linux/poll.h>
 17#include <linux/iio/buffer_impl.h>
 18#include <linux/iio/buffer-dma.h>
 19#include <linux/dma-buf.h>
 20#include <linux/dma-fence.h>
 21#include <linux/dma-mapping.h>
 22#include <linux/sizes.h>
 23
 24/*
 25 * For DMA buffers the storage is sub-divided into so called blocks. Each block
 26 * has its own memory buffer. The size of the block is the granularity at which
 27 * memory is exchanged between the hardware and the application. Increasing the
 28 * basic unit of data exchange from one sample to one block decreases the
 29 * management overhead that is associated with each sample. E.g. if we say the
 30 * management overhead for one exchange is x and the unit of exchange is one
 31 * sample the overhead will be x for each sample. Whereas when using a block
 32 * which contains n samples the overhead per sample is reduced to x/n. This
 33 * allows to achieve much higher samplerates than what can be sustained with
 34 * the one sample approach.
 35 *
 36 * Blocks are exchanged between the DMA controller and the application via the
 37 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
 38 * incoming queue are waiting for the DMA controller to pick them up and fill
 39 * them with data. Block on the outgoing queue have been filled with data and
 40 * are waiting for the application to dequeue them and read the data.
 41 *
 42 * A block can be in one of the following states:
 43 *  * Owned by the application. In this state the application can read data from
 44 *    the block.
 45 *  * On the incoming list: Blocks on the incoming list are queued up to be
 46 *    processed by the DMA controller.
 47 *  * Owned by the DMA controller: The DMA controller is processing the block
 48 *    and filling it with data.
 49 *  * On the outgoing list: Blocks on the outgoing list have been successfully
 50 *    processed by the DMA controller and contain data. They can be dequeued by
 51 *    the application.
 52 *  * Dead: A block that is dead has been marked as to be freed. It might still
 53 *    be owned by either the application or the DMA controller at the moment.
 54 *    But once they are done processing it instead of going to either the
 55 *    incoming or outgoing queue the block will be freed.
 56 *
 57 * In addition to this blocks are reference counted and the memory associated
 58 * with both the block structure as well as the storage memory for the block
 59 * will be freed when the last reference to the block is dropped. This means a
 60 * block must not be accessed without holding a reference.
 61 *
 62 * The iio_dma_buffer implementation provides a generic infrastructure for
 63 * managing the blocks.
 64 *
 65 * A driver for a specific piece of hardware that has DMA capabilities need to
 66 * implement the submit() callback from the iio_dma_buffer_ops structure. This
 67 * callback is supposed to initiate the DMA transfer copying data from the
 68 * converter to the memory region of the block. Once the DMA transfer has been
 69 * completed the driver must call iio_dma_buffer_block_done() for the completed
 70 * block.
 71 *
 72 * Prior to this it must set the bytes_used field of the block contains
 73 * the actual number of bytes in the buffer. Typically this will be equal to the
 74 * size of the block, but if the DMA hardware has certain alignment requirements
 75 * for the transfer length it might choose to use less than the full size. In
 76 * either case it is expected that bytes_used is a multiple of the bytes per
 77 * datum, i.e. the block must not contain partial samples.
 78 *
 79 * The driver must call iio_dma_buffer_block_done() for each block it has
 80 * received through its submit_block() callback, even if it does not actually
 81 * perform a DMA transfer for the block, e.g. because the buffer was disabled
 82 * before the block transfer was started. In this case it should set bytes_used
 83 * to 0.
 84 *
 85 * In addition it is recommended that a driver implements the abort() callback.
 86 * It will be called when the buffer is disabled and can be used to cancel
 87 * pending and stop active transfers.
 88 *
 89 * The specific driver implementation should use the default callback
 90 * implementations provided by this module for the iio_buffer_access_funcs
 91 * struct. It may overload some callbacks with custom variants if the hardware
 92 * has special requirements that are not handled by the generic functions. If a
 93 * driver chooses to overload a callback it has to ensure that the generic
 94 * callback is called from within the custom callback.
 95 */
 96
 97static void iio_buffer_block_release(struct kref *kref)
 98{
 99	struct iio_dma_buffer_block *block = container_of(kref,
100		struct iio_dma_buffer_block, kref);
101	struct iio_dma_buffer_queue *queue = block->queue;
102
103	WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
104
105	if (block->fileio) {
106		dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
107				  block->vaddr, block->phys_addr);
108	} else {
109		atomic_dec(&queue->num_dmabufs);
110	}
111
112	iio_buffer_put(&queue->buffer);
113	kfree(block);
114}
115
116static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
117{
118	kref_get(&block->kref);
119}
120
121static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
122{
123	kref_put(&block->kref, iio_buffer_block_release);
124}
125
126/*
127 * dma_free_coherent can sleep, hence we need to take some special care to be
128 * able to drop a reference from an atomic context.
129 */
130static LIST_HEAD(iio_dma_buffer_dead_blocks);
131static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
132
133static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
134{
135	struct iio_dma_buffer_block *block, *_block;
136	LIST_HEAD(block_list);
137
138	spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
139	list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
140	spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
141
142	list_for_each_entry_safe(block, _block, &block_list, head)
143		iio_buffer_block_release(&block->kref);
144}
145static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
146
147static void iio_buffer_block_release_atomic(struct kref *kref)
148{
149	struct iio_dma_buffer_block *block;
150	unsigned long flags;
151
152	block = container_of(kref, struct iio_dma_buffer_block, kref);
153
154	spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
155	list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
156	spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
157
158	schedule_work(&iio_dma_buffer_cleanup_work);
159}
160
161/*
162 * Version of iio_buffer_block_put() that can be called from atomic context
163 */
164static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
165{
166	kref_put(&block->kref, iio_buffer_block_release_atomic);
167}
168
169static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
170{
171	return container_of(buf, struct iio_dma_buffer_queue, buffer);
172}
173
174static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
175	struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
176{
177	struct iio_dma_buffer_block *block;
178
179	block = kzalloc(sizeof(*block), GFP_KERNEL);
180	if (!block)
181		return NULL;
182
183	if (fileio) {
184		block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
185						  &block->phys_addr, GFP_KERNEL);
186		if (!block->vaddr) {
187			kfree(block);
188			return NULL;
189		}
190	}
191
192	block->fileio = fileio;
193	block->size = size;
194	block->state = IIO_BLOCK_STATE_DONE;
195	block->queue = queue;
196	INIT_LIST_HEAD(&block->head);
197	kref_init(&block->kref);
198
199	iio_buffer_get(&queue->buffer);
200
201	if (!fileio)
202		atomic_inc(&queue->num_dmabufs);
203
204	return block;
205}
206
207static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
208{
209	if (block->state != IIO_BLOCK_STATE_DEAD)
210		block->state = IIO_BLOCK_STATE_DONE;
211}
212
213static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue)
214{
215	__poll_t flags;
216
217	if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
218		flags = EPOLLIN | EPOLLRDNORM;
219	else
220		flags = EPOLLOUT | EPOLLWRNORM;
221
222	wake_up_interruptible_poll(&queue->buffer.pollq, flags);
223}
224
225/**
226 * iio_dma_buffer_block_done() - Indicate that a block has been completed
227 * @block: The completed block
228 *
229 * Should be called when the DMA controller has finished handling the block to
230 * pass back ownership of the block to the queue.
231 */
232void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
233{
234	struct iio_dma_buffer_queue *queue = block->queue;
235	unsigned long flags;
236	bool cookie;
237
238	cookie = dma_fence_begin_signalling();
239
240	spin_lock_irqsave(&queue->list_lock, flags);
241	_iio_dma_buffer_block_done(block);
242	spin_unlock_irqrestore(&queue->list_lock, flags);
243
244	if (!block->fileio)
245		iio_buffer_signal_dmabuf_done(block->fence, 0);
246
247	iio_buffer_block_put_atomic(block);
248	iio_dma_buffer_queue_wake(queue);
249	dma_fence_end_signalling(cookie);
250}
251EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_done, "IIO_DMA_BUFFER");
252
253/**
254 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
255 *   aborted
256 * @queue: Queue for which to complete blocks.
257 * @list: List of aborted blocks. All blocks in this list must be from @queue.
258 *
259 * Typically called from the abort() callback after the DMA controller has been
260 * stopped. This will set bytes_used to 0 for each block in the list and then
261 * hand the blocks back to the queue.
262 */
263void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
264	struct list_head *list)
265{
266	struct iio_dma_buffer_block *block, *_block;
267	unsigned long flags;
268	bool cookie;
269
270	cookie = dma_fence_begin_signalling();
271
272	spin_lock_irqsave(&queue->list_lock, flags);
273	list_for_each_entry_safe(block, _block, list, head) {
274		list_del(&block->head);
275		block->bytes_used = 0;
276		_iio_dma_buffer_block_done(block);
277
278		if (!block->fileio)
279			iio_buffer_signal_dmabuf_done(block->fence, -EINTR);
280		iio_buffer_block_put_atomic(block);
281	}
282	spin_unlock_irqrestore(&queue->list_lock, flags);
283
284	if (queue->fileio.enabled)
285		queue->fileio.enabled = false;
286
287	iio_dma_buffer_queue_wake(queue);
288	dma_fence_end_signalling(cookie);
289}
290EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_list_abort, "IIO_DMA_BUFFER");
291
292static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
293{
294	/*
295	 * If the core owns the block it can be re-used. This should be the
296	 * default case when enabling the buffer, unless the DMA controller does
297	 * not support abort and has not given back the block yet.
298	 */
299	switch (block->state) {
300	case IIO_BLOCK_STATE_QUEUED:
301	case IIO_BLOCK_STATE_DONE:
302		return true;
303	default:
304		return false;
305	}
306}
307
308static bool iio_dma_buffer_can_use_fileio(struct iio_dma_buffer_queue *queue)
309{
310	/*
311	 * Note that queue->num_dmabufs cannot increase while the queue is
312	 * locked, it can only decrease, so it does not race against
313	 * iio_dma_buffer_alloc_block().
314	 */
315	return queue->fileio.enabled || !atomic_read(&queue->num_dmabufs);
316}
317
318/**
319 * iio_dma_buffer_request_update() - DMA buffer request_update callback
320 * @buffer: The buffer which to request an update
321 *
322 * Should be used as the iio_dma_buffer_request_update() callback for
323 * iio_buffer_access_ops struct for DMA buffers.
324 */
325int iio_dma_buffer_request_update(struct iio_buffer *buffer)
326{
327	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
328	struct iio_dma_buffer_block *block;
329	bool try_reuse = false;
330	size_t size;
331	int ret = 0;
332	int i;
333
334	/*
335	 * Split the buffer into two even parts. This is used as a double
336	 * buffering scheme with usually one block at a time being used by the
337	 * DMA and the other one by the application.
338	 */
339	size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
340		queue->buffer.length, 2);
341
342	mutex_lock(&queue->lock);
343
344	queue->fileio.enabled = iio_dma_buffer_can_use_fileio(queue);
345
346	/* If DMABUFs were created, disable fileio interface */
347	if (!queue->fileio.enabled)
348		goto out_unlock;
349
350	/* Allocations are page aligned */
351	if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
352		try_reuse = true;
353
354	queue->fileio.block_size = size;
355	queue->fileio.active_block = NULL;
356
357	spin_lock_irq(&queue->list_lock);
358	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
359		block = queue->fileio.blocks[i];
360
361		/* If we can't re-use it free it */
362		if (block && (!iio_dma_block_reusable(block) || !try_reuse))
363			block->state = IIO_BLOCK_STATE_DEAD;
364	}
365
366	/*
367	 * At this point all blocks are either owned by the core or marked as
368	 * dead. This means we can reset the lists without having to fear
369	 * corrution.
370	 */
371	spin_unlock_irq(&queue->list_lock);
372
373	INIT_LIST_HEAD(&queue->incoming);
374
375	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
376		if (queue->fileio.blocks[i]) {
377			block = queue->fileio.blocks[i];
378			if (block->state == IIO_BLOCK_STATE_DEAD) {
379				/* Could not reuse it */
380				iio_buffer_block_put(block);
381				block = NULL;
382			} else {
383				block->size = size;
384			}
385		} else {
386			block = NULL;
387		}
388
389		if (!block) {
390			block = iio_dma_buffer_alloc_block(queue, size, true);
391			if (!block) {
392				ret = -ENOMEM;
393				goto out_unlock;
394			}
395			queue->fileio.blocks[i] = block;
396		}
397
398		/*
399		 * block->bytes_used may have been modified previously, e.g. by
400		 * iio_dma_buffer_block_list_abort(). Reset it here to the
401		 * block's so that iio_dma_buffer_io() will work.
402		 */
403		block->bytes_used = block->size;
404
405		/*
406		 * If it's an input buffer, mark the block as queued, and
407		 * iio_dma_buffer_enable() will submit it. Otherwise mark it as
408		 * done, which means it's ready to be dequeued.
409		 */
410		if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
411			block->state = IIO_BLOCK_STATE_QUEUED;
412			list_add_tail(&block->head, &queue->incoming);
413		} else {
414			block->state = IIO_BLOCK_STATE_DONE;
415		}
416	}
417
418out_unlock:
419	mutex_unlock(&queue->lock);
420
421	return ret;
422}
423EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_request_update, "IIO_DMA_BUFFER");
424
425static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
426{
427	unsigned int i;
428
429	spin_lock_irq(&queue->list_lock);
430	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
431		if (!queue->fileio.blocks[i])
432			continue;
433		queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
434	}
435	spin_unlock_irq(&queue->list_lock);
436
437	INIT_LIST_HEAD(&queue->incoming);
438
439	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
440		if (!queue->fileio.blocks[i])
441			continue;
442		iio_buffer_block_put(queue->fileio.blocks[i]);
443		queue->fileio.blocks[i] = NULL;
444	}
445	queue->fileio.active_block = NULL;
446}
447
448static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
449	struct iio_dma_buffer_block *block)
450{
451	int ret;
452
453	/*
454	 * If the hardware has already been removed we put the block into
455	 * limbo. It will neither be on the incoming nor outgoing list, nor will
456	 * it ever complete. It will just wait to be freed eventually.
457	 */
458	if (!queue->ops)
459		return;
460
461	block->state = IIO_BLOCK_STATE_ACTIVE;
462	iio_buffer_block_get(block);
463
464	ret = queue->ops->submit(queue, block);
465	if (ret) {
466		if (!block->fileio)
467			iio_buffer_signal_dmabuf_done(block->fence, ret);
468
469		/*
470		 * This is a bit of a problem and there is not much we can do
471		 * other then wait for the buffer to be disabled and re-enabled
472		 * and try again. But it should not really happen unless we run
473		 * out of memory or something similar.
474		 *
475		 * TODO: Implement support in the IIO core to allow buffers to
476		 * notify consumers that something went wrong and the buffer
477		 * should be disabled.
478		 */
479		iio_buffer_block_put(block);
480	}
481}
482
483/**
484 * iio_dma_buffer_enable() - Enable DMA buffer
485 * @buffer: IIO buffer to enable
486 * @indio_dev: IIO device the buffer is attached to
487 *
488 * Needs to be called when the device that the buffer is attached to starts
489 * sampling. Typically should be the iio_buffer_access_ops enable callback.
490 *
491 * This will allocate the DMA buffers and start the DMA transfers.
492 */
493int iio_dma_buffer_enable(struct iio_buffer *buffer,
494	struct iio_dev *indio_dev)
495{
496	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
497	struct iio_dma_buffer_block *block, *_block;
498
499	mutex_lock(&queue->lock);
500	queue->active = true;
501	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
502		list_del(&block->head);
503		iio_dma_buffer_submit_block(queue, block);
504	}
505	mutex_unlock(&queue->lock);
506
507	return 0;
508}
509EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enable, "IIO_DMA_BUFFER");
510
511/**
512 * iio_dma_buffer_disable() - Disable DMA buffer
513 * @buffer: IIO DMA buffer to disable
514 * @indio_dev: IIO device the buffer is attached to
515 *
516 * Needs to be called when the device that the buffer is attached to stops
517 * sampling. Typically should be the iio_buffer_access_ops disable callback.
518 */
519int iio_dma_buffer_disable(struct iio_buffer *buffer,
520	struct iio_dev *indio_dev)
521{
522	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
523
524	mutex_lock(&queue->lock);
525	queue->active = false;
526
527	if (queue->ops && queue->ops->abort)
528		queue->ops->abort(queue);
529	mutex_unlock(&queue->lock);
530
531	return 0;
532}
533EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_disable, "IIO_DMA_BUFFER");
534
535static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
536	struct iio_dma_buffer_block *block)
537{
538	if (block->state == IIO_BLOCK_STATE_DEAD) {
539		iio_buffer_block_put(block);
540	} else if (queue->active) {
541		iio_dma_buffer_submit_block(queue, block);
542	} else {
543		block->state = IIO_BLOCK_STATE_QUEUED;
544		list_add_tail(&block->head, &queue->incoming);
545	}
546}
547
548static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
549	struct iio_dma_buffer_queue *queue)
550{
551	struct iio_dma_buffer_block *block;
552	unsigned int idx;
553
554	spin_lock_irq(&queue->list_lock);
555
556	idx = queue->fileio.next_dequeue;
557	block = queue->fileio.blocks[idx];
558
559	if (block->state == IIO_BLOCK_STATE_DONE) {
560		idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
561		queue->fileio.next_dequeue = idx;
562	} else {
563		block = NULL;
564	}
565
566	spin_unlock_irq(&queue->list_lock);
567
568	return block;
569}
570
571static int iio_dma_buffer_io(struct iio_buffer *buffer, size_t n,
572			     char __user *user_buffer, bool is_from_user)
573{
574	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
575	struct iio_dma_buffer_block *block;
576	void *addr;
577	int ret;
578
579	if (n < buffer->bytes_per_datum)
580		return -EINVAL;
581
582	mutex_lock(&queue->lock);
583
584	if (!queue->fileio.active_block) {
585		block = iio_dma_buffer_dequeue(queue);
586		if (block == NULL) {
587			ret = 0;
588			goto out_unlock;
589		}
590		queue->fileio.pos = 0;
591		queue->fileio.active_block = block;
592	} else {
593		block = queue->fileio.active_block;
594	}
595
596	n = rounddown(n, buffer->bytes_per_datum);
597	if (n > block->bytes_used - queue->fileio.pos)
598		n = block->bytes_used - queue->fileio.pos;
599	addr = block->vaddr + queue->fileio.pos;
600
601	if (is_from_user)
602		ret = copy_from_user(addr, user_buffer, n);
603	else
604		ret = copy_to_user(user_buffer, addr, n);
605	if (ret) {
606		ret = -EFAULT;
607		goto out_unlock;
608	}
609
610	queue->fileio.pos += n;
611
612	if (queue->fileio.pos == block->bytes_used) {
613		queue->fileio.active_block = NULL;
614		iio_dma_buffer_enqueue(queue, block);
615	}
616
617	ret = n;
618
619out_unlock:
620	mutex_unlock(&queue->lock);
621
622	return ret;
623}
624
625/**
626 * iio_dma_buffer_read() - DMA buffer read callback
627 * @buffer: Buffer to read form
628 * @n: Number of bytes to read
629 * @user_buffer: Userspace buffer to copy the data to
630 *
631 * Should be used as the read callback for iio_buffer_access_ops
632 * struct for DMA buffers.
633 */
634int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
635			char __user *user_buffer)
636{
637	return iio_dma_buffer_io(buffer, n, user_buffer, false);
638}
639EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_read, "IIO_DMA_BUFFER");
640
641/**
642 * iio_dma_buffer_write() - DMA buffer write callback
643 * @buffer: Buffer to read form
644 * @n: Number of bytes to read
645 * @user_buffer: Userspace buffer to copy the data from
646 *
647 * Should be used as the write callback for iio_buffer_access_ops
648 * struct for DMA buffers.
649 */
650int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
651			 const char __user *user_buffer)
652{
653	return iio_dma_buffer_io(buffer, n,
654				 (__force __user char *)user_buffer, true);
655}
656EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_write, "IIO_DMA_BUFFER");
657
658/**
659 * iio_dma_buffer_usage() - DMA buffer data_available and
660 * space_available callback
661 * @buf: Buffer to check for data availability
662 *
663 * Should be used as the data_available and space_available callbacks for
664 * iio_buffer_access_ops struct for DMA buffers.
665 */
666size_t iio_dma_buffer_usage(struct iio_buffer *buf)
667{
668	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
669	struct iio_dma_buffer_block *block;
670	size_t data_available = 0;
671	unsigned int i;
672
673	/*
674	 * For counting the available bytes we'll use the size of the block not
675	 * the number of actual bytes available in the block. Otherwise it is
676	 * possible that we end up with a value that is lower than the watermark
677	 * but won't increase since all blocks are in use.
678	 */
679
680	mutex_lock(&queue->lock);
681	if (queue->fileio.active_block)
682		data_available += queue->fileio.active_block->size;
683
684	spin_lock_irq(&queue->list_lock);
685
686	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
687		block = queue->fileio.blocks[i];
688
689		if (block != queue->fileio.active_block
690		    && block->state == IIO_BLOCK_STATE_DONE)
691			data_available += block->size;
692	}
693
694	spin_unlock_irq(&queue->list_lock);
695	mutex_unlock(&queue->lock);
696
697	return data_available;
698}
699EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_usage, "IIO_DMA_BUFFER");
700
701struct iio_dma_buffer_block *
702iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
703			     struct dma_buf_attachment *attach)
704{
705	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
706	struct iio_dma_buffer_block *block;
707
708	guard(mutex)(&queue->lock);
709
710	/*
711	 * If the buffer is enabled and in fileio mode new blocks can't be
712	 * allocated.
713	 */
714	if (queue->fileio.enabled)
715		return ERR_PTR(-EBUSY);
716
717	block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false);
718	if (!block)
719		return ERR_PTR(-ENOMEM);
720
721	/* Free memory that might be in use for fileio mode */
722	iio_dma_buffer_fileio_free(queue);
723
724	return block;
725}
726EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_attach_dmabuf, "IIO_DMA_BUFFER");
727
728void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
729				  struct iio_dma_buffer_block *block)
730{
731	block->state = IIO_BLOCK_STATE_DEAD;
732	iio_buffer_block_put_atomic(block);
733}
734EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_detach_dmabuf, "IIO_DMA_BUFFER");
735
736static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block)
737{
738	struct iio_dma_buffer_queue *queue = block->queue;
739
740	/* If in fileio mode buffers can't be enqueued. */
741	if (queue->fileio.enabled)
742		return -EBUSY;
743
744	switch (block->state) {
745	case IIO_BLOCK_STATE_QUEUED:
746		return -EPERM;
747	case IIO_BLOCK_STATE_ACTIVE:
748	case IIO_BLOCK_STATE_DEAD:
749		return -EBUSY;
750	case IIO_BLOCK_STATE_DONE:
751		break;
752	}
753
754	return 0;
755}
756
757int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
758				  struct iio_dma_buffer_block *block,
759				  struct dma_fence *fence,
760				  struct sg_table *sgt,
761				  size_t size, bool cyclic)
762{
763	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
764	bool cookie;
765	int ret;
766
767	WARN_ON(!mutex_is_locked(&queue->lock));
768
769	cookie = dma_fence_begin_signalling();
770
771	ret = iio_dma_can_enqueue_block(block);
772	if (ret < 0)
773		goto out_end_signalling;
774
775	block->bytes_used = size;
776	block->cyclic = cyclic;
777	block->sg_table = sgt;
778	block->fence = fence;
779
780	iio_dma_buffer_enqueue(queue, block);
781
782out_end_signalling:
783	dma_fence_end_signalling(cookie);
784
785	return ret;
786}
787EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enqueue_dmabuf, "IIO_DMA_BUFFER");
788
789void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
790{
791	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
792
793	mutex_lock(&queue->lock);
794}
795EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_lock_queue, "IIO_DMA_BUFFER");
796
797void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer)
798{
799	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
800
801	mutex_unlock(&queue->lock);
802}
803EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_unlock_queue, "IIO_DMA_BUFFER");
804
805/**
806 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
807 * @buffer: Buffer to set the bytes-per-datum for
808 * @bpd: The new bytes-per-datum value
809 *
810 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
811 * struct for DMA buffers.
812 */
813int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
814{
815	buffer->bytes_per_datum = bpd;
816
817	return 0;
818}
819EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_bytes_per_datum, "IIO_DMA_BUFFER");
820
821/**
822 * iio_dma_buffer_set_length - DMA buffer set_length callback
823 * @buffer: Buffer to set the length for
824 * @length: The new buffer length
825 *
826 * Should be used as the set_length callback for iio_buffer_access_ops
827 * struct for DMA buffers.
828 */
829int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
830{
831	/* Avoid an invalid state */
832	if (length < 2)
833		length = 2;
834	buffer->length = length;
835	buffer->watermark = length / 2;
836
837	return 0;
838}
839EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_length, "IIO_DMA_BUFFER");
840
841/**
842 * iio_dma_buffer_init() - Initialize DMA buffer queue
843 * @queue: Buffer to initialize
844 * @dev: DMA device
845 * @ops: DMA buffer queue callback operations
846 *
847 * The DMA device will be used by the queue to do DMA memory allocations. So it
848 * should refer to the device that will perform the DMA to ensure that
849 * allocations are done from a memory region that can be accessed by the device.
850 */
851int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
852	struct device *dev, const struct iio_dma_buffer_ops *ops)
853{
854	iio_buffer_init(&queue->buffer);
855	queue->buffer.length = PAGE_SIZE;
856	queue->buffer.watermark = queue->buffer.length / 2;
857	queue->dev = dev;
858	queue->ops = ops;
859
860	INIT_LIST_HEAD(&queue->incoming);
861
862	mutex_init(&queue->lock);
863	spin_lock_init(&queue->list_lock);
864
865	return 0;
866}
867EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_init, "IIO_DMA_BUFFER");
868
869/**
870 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
871 * @queue: Buffer to cleanup
872 *
873 * After this function has completed it is safe to free any resources that are
874 * associated with the buffer and are accessed inside the callback operations.
875 */
876void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
877{
878	mutex_lock(&queue->lock);
879
880	iio_dma_buffer_fileio_free(queue);
881	queue->ops = NULL;
882
883	mutex_unlock(&queue->lock);
884}
885EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_exit, "IIO_DMA_BUFFER");
886
887/**
888 * iio_dma_buffer_release() - Release final buffer resources
889 * @queue: Buffer to release
890 *
891 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
892 * called in the buffers release callback implementation right before freeing
893 * the memory associated with the buffer.
894 */
895void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
896{
897	mutex_destroy(&queue->lock);
898}
899EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_release, "IIO_DMA_BUFFER");
900
901MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
902MODULE_DESCRIPTION("DMA buffer for the IIO framework");
903MODULE_LICENSE("GPL v2");