Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright 2014-2015 Analog Devices Inc.
  4 *  Author: Lars-Peter Clausen <lars@metafoo.de>
  5 */
  6
  7#include <linux/slab.h>
  8#include <linux/kernel.h>
  9#include <linux/dmaengine.h>
 10#include <linux/dma-mapping.h>
 11#include <linux/spinlock.h>
 12#include <linux/err.h>
 13#include <linux/module.h>
 14
 15#include <linux/iio/iio.h>
 16#include <linux/iio/sysfs.h>
 17#include <linux/iio/buffer.h>
 18#include <linux/iio/buffer_impl.h>
 19#include <linux/iio/buffer-dma.h>
 20#include <linux/iio/buffer-dmaengine.h>
 21
 22/*
 23 * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
 24 * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
 25 * used to manage the buffer memory and implement the IIO buffer operations
 26 * while the DMAengine framework is used to perform the DMA transfers. Combined
 27 * this results in a device independent fully functional DMA buffer
 28 * implementation that can be used by device drivers for peripherals which are
 29 * connected to a DMA controller which has a DMAengine driver implementation.
 30 */
 31
 32struct dmaengine_buffer {
 33	struct iio_dma_buffer_queue queue;
 34
 35	struct dma_chan *chan;
 36	struct list_head active;
 37
 38	size_t align;
 39	size_t max_size;
 40};
 41
 42static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
 43		struct iio_buffer *buffer)
 44{
 45	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
 46}
 47
 48static void iio_dmaengine_buffer_block_done(void *data,
 49		const struct dmaengine_result *result)
 50{
 51	struct iio_dma_buffer_block *block = data;
 52	unsigned long flags;
 53
 54	spin_lock_irqsave(&block->queue->list_lock, flags);
 55	list_del(&block->head);
 56	spin_unlock_irqrestore(&block->queue->list_lock, flags);
 57	block->bytes_used -= result->residue;
 58	iio_dma_buffer_block_done(block);
 59}
 60
 61static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
 62	struct iio_dma_buffer_block *block)
 63{
 64	struct dmaengine_buffer *dmaengine_buffer =
 65		iio_buffer_to_dmaengine_buffer(&queue->buffer);
 66	struct dma_async_tx_descriptor *desc;
 67	enum dma_transfer_direction dma_dir;
 68	struct scatterlist *sgl;
 69	struct dma_vec *vecs;
 70	size_t max_size;
 71	dma_cookie_t cookie;
 72	size_t len_total;
 73	unsigned int i;
 74	int nents;
 75
 76	max_size = min(block->size, dmaengine_buffer->max_size);
 77	max_size = round_down(max_size, dmaengine_buffer->align);
 78
 79	if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
 80		dma_dir = DMA_DEV_TO_MEM;
 81	else
 82		dma_dir = DMA_MEM_TO_DEV;
 83
 84	if (block->sg_table) {
 85		sgl = block->sg_table->sgl;
 86		nents = sg_nents_for_len(sgl, block->bytes_used);
 87		if (nents < 0)
 88			return nents;
 89
 90		vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC);
 91		if (!vecs)
 92			return -ENOMEM;
 93
 94		len_total = block->bytes_used;
 95
 96		for (i = 0; i < nents; i++) {
 97			vecs[i].addr = sg_dma_address(sgl);
 98			vecs[i].len = min(sg_dma_len(sgl), len_total);
 99			len_total -= vecs[i].len;
100
101			sgl = sg_next(sgl);
102		}
103
104		desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan,
105							 vecs, nents, dma_dir,
106							 DMA_PREP_INTERRUPT);
107		kfree(vecs);
108	} else {
109		max_size = min(block->size, dmaengine_buffer->max_size);
110		max_size = round_down(max_size, dmaengine_buffer->align);
111
112		if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
113			block->bytes_used = max_size;
114
115		if (!block->bytes_used || block->bytes_used > max_size)
116			return -EINVAL;
117
118		desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
119						   block->phys_addr,
120						   block->bytes_used,
121						   dma_dir,
122						   DMA_PREP_INTERRUPT);
123	}
124	if (!desc)
125		return -ENOMEM;
126
127	desc->callback_result = iio_dmaengine_buffer_block_done;
128	desc->callback_param = block;
129
130	cookie = dmaengine_submit(desc);
131	if (dma_submit_error(cookie))
132		return dma_submit_error(cookie);
133
134	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
135	list_add_tail(&block->head, &dmaengine_buffer->active);
136	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
137
138	dma_async_issue_pending(dmaengine_buffer->chan);
139
140	return 0;
141}
142
143static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
144{
145	struct dmaengine_buffer *dmaengine_buffer =
146		iio_buffer_to_dmaengine_buffer(&queue->buffer);
147
148	dmaengine_terminate_sync(dmaengine_buffer->chan);
149	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
150}
151
152static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
153{
154	struct dmaengine_buffer *dmaengine_buffer =
155		iio_buffer_to_dmaengine_buffer(buf);
156
157	iio_dma_buffer_release(&dmaengine_buffer->queue);
158	kfree(dmaengine_buffer);
159}
160
161static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
162	.read = iio_dma_buffer_read,
163	.write = iio_dma_buffer_write,
164	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
165	.set_length = iio_dma_buffer_set_length,
166	.request_update = iio_dma_buffer_request_update,
167	.enable = iio_dma_buffer_enable,
168	.disable = iio_dma_buffer_disable,
169	.data_available = iio_dma_buffer_usage,
170	.space_available = iio_dma_buffer_usage,
171	.release = iio_dmaengine_buffer_release,
172
173	.enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf,
174	.attach_dmabuf = iio_dma_buffer_attach_dmabuf,
175	.detach_dmabuf = iio_dma_buffer_detach_dmabuf,
176
177	.lock_queue = iio_dma_buffer_lock_queue,
178	.unlock_queue = iio_dma_buffer_unlock_queue,
179
180	.modes = INDIO_BUFFER_HARDWARE,
181	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
182};
183
184static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
185	.submit = iio_dmaengine_buffer_submit_block,
186	.abort = iio_dmaengine_buffer_abort,
187};
188
189static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
190	struct device_attribute *attr, char *buf)
191{
192	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
193	struct dmaengine_buffer *dmaengine_buffer =
194		iio_buffer_to_dmaengine_buffer(buffer);
195
196	return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align);
197}
198
199static IIO_DEVICE_ATTR(length_align_bytes, 0444,
200		       iio_dmaengine_buffer_get_length_align, NULL, 0);
201
202static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
203	&iio_dev_attr_length_align_bytes,
204	NULL,
205};
206
207/**
208 * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
209 * @dev: Parent device for the buffer
210 * @channel: DMA channel name, typically "rx".
211 *
212 * This allocates a new IIO buffer which internally uses the DMAengine framework
213 * to perform its transfers. The parent device will be used to request the DMA
214 * channel.
215 *
216 * Once done using the buffer iio_dmaengine_buffer_free() should be used to
217 * release it.
218 */
219static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
220	const char *channel)
221{
222	struct dmaengine_buffer *dmaengine_buffer;
223	unsigned int width, src_width, dest_width;
224	struct dma_slave_caps caps;
225	struct dma_chan *chan;
226	int ret;
227
228	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
229	if (!dmaengine_buffer)
230		return ERR_PTR(-ENOMEM);
231
232	chan = dma_request_chan(dev, channel);
233	if (IS_ERR(chan)) {
234		ret = PTR_ERR(chan);
235		goto err_free;
236	}
237
238	ret = dma_get_slave_caps(chan, &caps);
239	if (ret < 0)
240		goto err_release;
241
242	/* Needs to be aligned to the maximum of the minimums */
243	if (caps.src_addr_widths)
244		src_width = __ffs(caps.src_addr_widths);
245	else
246		src_width = 1;
247	if (caps.dst_addr_widths)
248		dest_width = __ffs(caps.dst_addr_widths);
249	else
250		dest_width = 1;
251	width = max(src_width, dest_width);
252
253	INIT_LIST_HEAD(&dmaengine_buffer->active);
254	dmaengine_buffer->chan = chan;
255	dmaengine_buffer->align = width;
256	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
257
258	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
259		&iio_dmaengine_default_ops);
260
261	dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs;
262	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
263
264	return &dmaengine_buffer->queue.buffer;
265
266err_release:
267	dma_release_channel(chan);
268err_free:
269	kfree(dmaengine_buffer);
270	return ERR_PTR(ret);
271}
272
273/**
274 * iio_dmaengine_buffer_free() - Free dmaengine buffer
275 * @buffer: Buffer to free
276 *
277 * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
278 */
279void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
280{
281	struct dmaengine_buffer *dmaengine_buffer =
282		iio_buffer_to_dmaengine_buffer(buffer);
283
284	iio_dma_buffer_exit(&dmaengine_buffer->queue);
285	dma_release_channel(dmaengine_buffer->chan);
286
287	iio_buffer_put(buffer);
288}
289EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
290
291struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
292						  struct iio_dev *indio_dev,
293						  const char *channel,
294						  enum iio_buffer_direction dir)
295{
296	struct iio_buffer *buffer;
297	int ret;
298
299	buffer = iio_dmaengine_buffer_alloc(dev, channel);
300	if (IS_ERR(buffer))
301		return ERR_CAST(buffer);
302
303	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
304
305	buffer->direction = dir;
306
307	ret = iio_device_attach_buffer(indio_dev, buffer);
308	if (ret) {
309		iio_dmaengine_buffer_free(buffer);
310		return ERR_PTR(ret);
311	}
312
313	return buffer;
314}
315EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
316
317static void __devm_iio_dmaengine_buffer_free(void *buffer)
318{
319	iio_dmaengine_buffer_free(buffer);
320}
321
322/**
323 * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
324 * @dev: Parent device for the buffer
325 * @indio_dev: IIO device to which to attach this buffer.
326 * @channel: DMA channel name, typically "rx".
327 * @dir: Direction of buffer (in or out)
328 *
329 * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
330 * and attaches it to an IIO device with iio_device_attach_buffer().
331 * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
332 * IIO device.
333 */
334int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
335					struct iio_dev *indio_dev,
336					const char *channel,
337					enum iio_buffer_direction dir)
338{
339	struct iio_buffer *buffer;
340
341	buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);
342	if (IS_ERR(buffer))
343		return PTR_ERR(buffer);
344
345	return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
346					buffer);
347}
348EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
349
350MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
351MODULE_DESCRIPTION("DMA buffer for the IIO framework");
352MODULE_LICENSE("GPL");
353MODULE_IMPORT_NS("IIO_DMA_BUFFER");