Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Virtual DMA channel support for DMAengine
  4 *
  5 * Copyright (C) 2012 Russell King
  6 */
  7#include <linux/device.h>
  8#include <linux/dmaengine.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 11
 12#include "virt-dma.h"
 13
 14static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
 15{
 16	return container_of(tx, struct virt_dma_desc, tx);
 17}
 18
 19dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
 20{
 21	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
 22	struct virt_dma_desc *vd = to_virt_desc(tx);
 23	unsigned long flags;
 24	dma_cookie_t cookie;
 25
 26	spin_lock_irqsave(&vc->lock, flags);
 27	cookie = dma_cookie_assign(tx);
 28
 29	list_move_tail(&vd->node, &vc->desc_submitted);
 30	spin_unlock_irqrestore(&vc->lock, flags);
 31
 32	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
 33		vc, vd, cookie);
 34
 35	return cookie;
 36}
 37EXPORT_SYMBOL_GPL(vchan_tx_submit);
 38
 39/**
 40 * vchan_tx_desc_free - free a reusable descriptor
 41 * @tx: the transfer
 42 *
 43 * This function frees a previously allocated reusable descriptor. The only
 44 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
 45 * transfer.
 46 *
 47 * Returns 0 upon success
 48 */
 49int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
 50{
 51	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
 52	struct virt_dma_desc *vd = to_virt_desc(tx);
 53	unsigned long flags;
 54
 55	spin_lock_irqsave(&vc->lock, flags);
 56	list_del(&vd->node);
 57	spin_unlock_irqrestore(&vc->lock, flags);
 58
 59	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
 60		vc, vd, vd->tx.cookie);
 61	vc->desc_free(vd);
 62	return 0;
 63}
 64EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
 65
 66struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
 67	dma_cookie_t cookie)
 68{
 69	struct virt_dma_desc *vd;
 70
 71	list_for_each_entry(vd, &vc->desc_issued, node)
 72		if (vd->tx.cookie == cookie)
 73			return vd;
 74
 75	return NULL;
 76}
 77EXPORT_SYMBOL_GPL(vchan_find_desc);
 78
 79/*
 80 * This tasklet handles the completion of a DMA descriptor by
 81 * calling its callback and freeing it.
 82 */
 83static void vchan_complete(struct tasklet_struct *t)
 84{
 85	struct virt_dma_chan *vc = from_tasklet(vc, t, task);
 86	struct virt_dma_desc *vd, *_vd;
 87	struct dmaengine_desc_callback cb;
 88	LIST_HEAD(head);
 89
 90	spin_lock_irq(&vc->lock);
 91	list_splice_tail_init(&vc->desc_completed, &head);
 92	vd = vc->cyclic;
 93	if (vd) {
 94		vc->cyclic = NULL;
 95		dmaengine_desc_get_callback(&vd->tx, &cb);
 96	} else {
 97		memset(&cb, 0, sizeof(cb));
 98	}
 99	spin_unlock_irq(&vc->lock);
100
101	dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
102
103	list_for_each_entry_safe(vd, _vd, &head, node) {
104		dmaengine_desc_get_callback(&vd->tx, &cb);
105
106		list_del(&vd->node);
107		dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
108		vchan_vdesc_fini(vd);
 
 
109	}
110}
111
112void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
113{
114	struct virt_dma_desc *vd, *_vd;
115
116	list_for_each_entry_safe(vd, _vd, head, node) {
117		list_del(&vd->node);
118		vchan_vdesc_fini(vd);
 
 
 
 
 
119	}
120}
121EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
122
123void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
124{
125	dma_cookie_init(&vc->chan);
126
127	spin_lock_init(&vc->lock);
128	INIT_LIST_HEAD(&vc->desc_allocated);
129	INIT_LIST_HEAD(&vc->desc_submitted);
130	INIT_LIST_HEAD(&vc->desc_issued);
131	INIT_LIST_HEAD(&vc->desc_completed);
132	INIT_LIST_HEAD(&vc->desc_terminated);
133
134	tasklet_setup(&vc->task, vchan_complete);
135
136	vc->chan.device = dmadev;
137	list_add_tail(&vc->chan.device_node, &dmadev->channels);
138}
139EXPORT_SYMBOL_GPL(vchan_init);
140
141MODULE_AUTHOR("Russell King");
142MODULE_LICENSE("GPL");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Virtual DMA channel support for DMAengine
  4 *
  5 * Copyright (C) 2012 Russell King
  6 */
  7#include <linux/device.h>
  8#include <linux/dmaengine.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 11
 12#include "virt-dma.h"
 13
 14static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
 15{
 16	return container_of(tx, struct virt_dma_desc, tx);
 17}
 18
 19dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
 20{
 21	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
 22	struct virt_dma_desc *vd = to_virt_desc(tx);
 23	unsigned long flags;
 24	dma_cookie_t cookie;
 25
 26	spin_lock_irqsave(&vc->lock, flags);
 27	cookie = dma_cookie_assign(tx);
 28
 29	list_move_tail(&vd->node, &vc->desc_submitted);
 30	spin_unlock_irqrestore(&vc->lock, flags);
 31
 32	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
 33		vc, vd, cookie);
 34
 35	return cookie;
 36}
 37EXPORT_SYMBOL_GPL(vchan_tx_submit);
 38
 39/**
 40 * vchan_tx_desc_free - free a reusable descriptor
 41 * @tx: the transfer
 42 *
 43 * This function frees a previously allocated reusable descriptor. The only
 44 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
 45 * transfer.
 46 *
 47 * Returns 0 upon success
 48 */
 49int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
 50{
 51	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
 52	struct virt_dma_desc *vd = to_virt_desc(tx);
 53	unsigned long flags;
 54
 55	spin_lock_irqsave(&vc->lock, flags);
 56	list_del(&vd->node);
 57	spin_unlock_irqrestore(&vc->lock, flags);
 58
 59	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
 60		vc, vd, vd->tx.cookie);
 61	vc->desc_free(vd);
 62	return 0;
 63}
 64EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
 65
 66struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
 67	dma_cookie_t cookie)
 68{
 69	struct virt_dma_desc *vd;
 70
 71	list_for_each_entry(vd, &vc->desc_issued, node)
 72		if (vd->tx.cookie == cookie)
 73			return vd;
 74
 75	return NULL;
 76}
 77EXPORT_SYMBOL_GPL(vchan_find_desc);
 78
 79/*
 80 * This tasklet handles the completion of a DMA descriptor by
 81 * calling its callback and freeing it.
 82 */
 83static void vchan_complete(unsigned long arg)
 84{
 85	struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
 86	struct virt_dma_desc *vd, *_vd;
 87	struct dmaengine_desc_callback cb;
 88	LIST_HEAD(head);
 89
 90	spin_lock_irq(&vc->lock);
 91	list_splice_tail_init(&vc->desc_completed, &head);
 92	vd = vc->cyclic;
 93	if (vd) {
 94		vc->cyclic = NULL;
 95		dmaengine_desc_get_callback(&vd->tx, &cb);
 96	} else {
 97		memset(&cb, 0, sizeof(cb));
 98	}
 99	spin_unlock_irq(&vc->lock);
100
101	dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
102
103	list_for_each_entry_safe(vd, _vd, &head, node) {
104		dmaengine_desc_get_callback(&vd->tx, &cb);
105
106		list_del(&vd->node);
 
107		vchan_vdesc_fini(vd);
108
109		dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
110	}
111}
112
113void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
114{
115	struct virt_dma_desc *vd, *_vd;
116
117	list_for_each_entry_safe(vd, _vd, head, node) {
118		if (dmaengine_desc_test_reuse(&vd->tx)) {
119			list_move_tail(&vd->node, &vc->desc_allocated);
120		} else {
121			dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
122			list_del(&vd->node);
123			vc->desc_free(vd);
124		}
125	}
126}
127EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
128
129void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
130{
131	dma_cookie_init(&vc->chan);
132
133	spin_lock_init(&vc->lock);
134	INIT_LIST_HEAD(&vc->desc_allocated);
135	INIT_LIST_HEAD(&vc->desc_submitted);
136	INIT_LIST_HEAD(&vc->desc_issued);
137	INIT_LIST_HEAD(&vc->desc_completed);
 
138
139	tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
140
141	vc->chan.device = dmadev;
142	list_add_tail(&vc->chan.device_node, &dmadev->channels);
143}
144EXPORT_SYMBOL_GPL(vchan_init);
145
146MODULE_AUTHOR("Russell King");
147MODULE_LICENSE("GPL");