Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2014-2015 Analog Devices Inc.
  3 *  Author: Lars-Peter Clausen <lars@metafoo.de>
  4 *
  5 * Licensed under the GPL-2 or later.
  6 */
  7
  8#include <linux/slab.h>
  9#include <linux/kernel.h>
 10#include <linux/dmaengine.h>
 11#include <linux/dma-mapping.h>
 12#include <linux/spinlock.h>
 13#include <linux/err.h>
 14
 15#include <linux/iio/iio.h>
 16#include <linux/iio/buffer.h>
 17#include <linux/iio/buffer_impl.h>
 18#include <linux/iio/buffer-dma.h>
 19#include <linux/iio/buffer-dmaengine.h>
 20
 21/*
 22 * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
 23 * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
 24 * used to manage the buffer memory and implement the IIO buffer operations
 25 * while the DMAengine framework is used to perform the DMA transfers. Combined
 26 * this results in a device independent fully functional DMA buffer
 27 * implementation that can be used by device drivers for peripherals which are
 28 * connected to a DMA controller which has a DMAengine driver implementation.
 29 */
 30
 31struct dmaengine_buffer {
 32	struct iio_dma_buffer_queue queue;
 33
 34	struct dma_chan *chan;
 35	struct list_head active;
 36
 37	size_t align;
 38	size_t max_size;
 39};
 40
 41static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
 42		struct iio_buffer *buffer)
 43{
 44	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
 45}
 46
 47static void iio_dmaengine_buffer_block_done(void *data)
 48{
 49	struct iio_dma_buffer_block *block = data;
 50	unsigned long flags;
 51
 52	spin_lock_irqsave(&block->queue->list_lock, flags);
 53	list_del(&block->head);
 54	spin_unlock_irqrestore(&block->queue->list_lock, flags);
 55	iio_dma_buffer_block_done(block);
 56}
 57
 58static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
 59	struct iio_dma_buffer_block *block)
 60{
 61	struct dmaengine_buffer *dmaengine_buffer =
 62		iio_buffer_to_dmaengine_buffer(&queue->buffer);
 63	struct dma_async_tx_descriptor *desc;
 64	dma_cookie_t cookie;
 65
 66	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
 67	block->bytes_used = rounddown(block->bytes_used,
 68			dmaengine_buffer->align);
 69
 70	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
 71		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
 72		DMA_PREP_INTERRUPT);
 73	if (!desc)
 74		return -ENOMEM;
 75
 76	desc->callback = iio_dmaengine_buffer_block_done;
 77	desc->callback_param = block;
 78
 79	cookie = dmaengine_submit(desc);
 80	if (dma_submit_error(cookie))
 81		return dma_submit_error(cookie);
 82
 83	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
 84	list_add_tail(&block->head, &dmaengine_buffer->active);
 85	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
 86
 87	dma_async_issue_pending(dmaengine_buffer->chan);
 88
 89	return 0;
 90}
 91
 92static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
 93{
 94	struct dmaengine_buffer *dmaengine_buffer =
 95		iio_buffer_to_dmaengine_buffer(&queue->buffer);
 96
 97	dmaengine_terminate_sync(dmaengine_buffer->chan);
 98	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
 99}
100
101static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
102{
103	struct dmaengine_buffer *dmaengine_buffer =
104		iio_buffer_to_dmaengine_buffer(buf);
105
106	iio_dma_buffer_release(&dmaengine_buffer->queue);
107	kfree(dmaengine_buffer);
108}
109
110static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
111	.read_first_n = iio_dma_buffer_read,
112	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
113	.set_length = iio_dma_buffer_set_length,
114	.request_update = iio_dma_buffer_request_update,
115	.enable = iio_dma_buffer_enable,
116	.disable = iio_dma_buffer_disable,
117	.data_available = iio_dma_buffer_data_available,
118	.release = iio_dmaengine_buffer_release,
119
120	.modes = INDIO_BUFFER_HARDWARE,
121	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
122};
123
124static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
125	.submit = iio_dmaengine_buffer_submit_block,
126	.abort = iio_dmaengine_buffer_abort,
127};
128
129/**
130 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
131 * @dev: Parent device for the buffer
132 * @channel: DMA channel name, typically "rx".
133 *
134 * This allocates a new IIO buffer which internally uses the DMAengine framework
135 * to perform its transfers. The parent device will be used to request the DMA
136 * channel.
137 *
138 * Once done using the buffer iio_dmaengine_buffer_free() should be used to
139 * release it.
140 */
141struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
142	const char *channel)
143{
144	struct dmaengine_buffer *dmaengine_buffer;
145	unsigned int width, src_width, dest_width;
146	struct dma_slave_caps caps;
147	struct dma_chan *chan;
148	int ret;
149
150	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
151	if (!dmaengine_buffer)
152		return ERR_PTR(-ENOMEM);
153
154	chan = dma_request_slave_channel_reason(dev, channel);
155	if (IS_ERR(chan)) {
156		ret = PTR_ERR(chan);
157		goto err_free;
158	}
159
160	ret = dma_get_slave_caps(chan, &caps);
161	if (ret < 0)
162		goto err_free;
163
164	/* Needs to be aligned to the maximum of the minimums */
165	if (caps.src_addr_widths)
166		src_width = __ffs(caps.src_addr_widths);
167	else
168		src_width = 1;
169	if (caps.dst_addr_widths)
170		dest_width = __ffs(caps.dst_addr_widths);
171	else
172		dest_width = 1;
173	width = max(src_width, dest_width);
174
175	INIT_LIST_HEAD(&dmaengine_buffer->active);
176	dmaengine_buffer->chan = chan;
177	dmaengine_buffer->align = width;
178	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
179
180	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
181		&iio_dmaengine_default_ops);
182
183	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
184
185	return &dmaengine_buffer->queue.buffer;
186
187err_free:
188	kfree(dmaengine_buffer);
189	return ERR_PTR(ret);
190}
191EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
192
193/**
194 * iio_dmaengine_buffer_free() - Free dmaengine buffer
195 * @buffer: Buffer to free
196 *
197 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
198 */
199void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
200{
201	struct dmaengine_buffer *dmaengine_buffer =
202		iio_buffer_to_dmaengine_buffer(buffer);
203
204	iio_dma_buffer_exit(&dmaengine_buffer->queue);
205	dma_release_channel(dmaengine_buffer->chan);
206
207	iio_buffer_put(buffer);
208}
209EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);