Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Virtual DMA channel support for DMAengine
  3 *
  4 * Copyright (C) 2012 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/device.h>
 11#include <linux/dmaengine.h>
 12#include <linux/module.h>
 13#include <linux/spinlock.h>
 14
 15#include "virt-dma.h"
 16
 17static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
 18{
 19	return container_of(tx, struct virt_dma_desc, tx);
 20}
 21
 22dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
 23{
 24	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
 25	struct virt_dma_desc *vd = to_virt_desc(tx);
 26	unsigned long flags;
 27	dma_cookie_t cookie;
 28
 29	spin_lock_irqsave(&vc->lock, flags);
 30	cookie = dma_cookie_assign(tx);
 31
 32	list_add_tail(&vd->node, &vc->desc_submitted);
 33	spin_unlock_irqrestore(&vc->lock, flags);
 34
 35	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
 36		vc, vd, cookie);
 37
 38	return cookie;
 39}
 40EXPORT_SYMBOL_GPL(vchan_tx_submit);
 41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
 43	dma_cookie_t cookie)
 44{
 45	struct virt_dma_desc *vd;
 46
 47	list_for_each_entry(vd, &vc->desc_issued, node)
 48		if (vd->tx.cookie == cookie)
 49			return vd;
 50
 51	return NULL;
 52}
 53EXPORT_SYMBOL_GPL(vchan_find_desc);
 54
 55/*
 56 * This tasklet handles the completion of a DMA descriptor by
 57 * calling its callback and freeing it.
 58 */
 59static void vchan_complete(unsigned long arg)
 60{
 61	struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
 62	struct virt_dma_desc *vd;
 63	dma_async_tx_callback cb = NULL;
 64	void *cb_data = NULL;
 65	LIST_HEAD(head);
 66
 67	spin_lock_irq(&vc->lock);
 68	list_splice_tail_init(&vc->desc_completed, &head);
 69	vd = vc->cyclic;
 70	if (vd) {
 71		vc->cyclic = NULL;
 72		cb = vd->tx.callback;
 73		cb_data = vd->tx.callback_param;
 
 74	}
 75	spin_unlock_irq(&vc->lock);
 76
 77	if (cb)
 78		cb(cb_data);
 79
 80	while (!list_empty(&head)) {
 81		vd = list_first_entry(&head, struct virt_dma_desc, node);
 82		cb = vd->tx.callback;
 83		cb_data = vd->tx.callback_param;
 84
 85		list_del(&vd->node);
 86
 87		vc->desc_free(vd);
 88
 89		if (cb)
 90			cb(cb_data);
 91	}
 92}
 93
 94void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
 95{
 96	while (!list_empty(head)) {
 97		struct virt_dma_desc *vd = list_first_entry(head,
 98			struct virt_dma_desc, node);
 99		list_del(&vd->node);
100		dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
101		vc->desc_free(vd);
102	}
103}
104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
105
106void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
107{
108	dma_cookie_init(&vc->chan);
109
110	spin_lock_init(&vc->lock);
 
111	INIT_LIST_HEAD(&vc->desc_submitted);
112	INIT_LIST_HEAD(&vc->desc_issued);
113	INIT_LIST_HEAD(&vc->desc_completed);
 
114
115	tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
116
117	vc->chan.device = dmadev;
118	list_add_tail(&vc->chan.device_node, &dmadev->channels);
119}
120EXPORT_SYMBOL_GPL(vchan_init);
121
122MODULE_AUTHOR("Russell King");
 
123MODULE_LICENSE("GPL");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Virtual DMA channel support for DMAengine
  4 *
  5 * Copyright (C) 2012 Russell King
 
 
 
 
  6 */
  7#include <linux/device.h>
  8#include <linux/dmaengine.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 11
 12#include "virt-dma.h"
 13
 14static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
 15{
 16	return container_of(tx, struct virt_dma_desc, tx);
 17}
 18
 19dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
 20{
 21	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
 22	struct virt_dma_desc *vd = to_virt_desc(tx);
 23	unsigned long flags;
 24	dma_cookie_t cookie;
 25
 26	spin_lock_irqsave(&vc->lock, flags);
 27	cookie = dma_cookie_assign(tx);
 28
 29	list_move_tail(&vd->node, &vc->desc_submitted);
 30	spin_unlock_irqrestore(&vc->lock, flags);
 31
 32	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
 33		vc, vd, cookie);
 34
 35	return cookie;
 36}
 37EXPORT_SYMBOL_GPL(vchan_tx_submit);
 38
 39/**
 40 * vchan_tx_desc_free - free a reusable descriptor
 41 * @tx: the transfer
 42 *
 43 * This function frees a previously allocated reusable descriptor. The only
 44 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
 45 * transfer.
 46 *
 47 * Returns 0 upon success
 48 */
 49int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
 50{
 51	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
 52	struct virt_dma_desc *vd = to_virt_desc(tx);
 53	unsigned long flags;
 54
 55	spin_lock_irqsave(&vc->lock, flags);
 56	list_del(&vd->node);
 57	spin_unlock_irqrestore(&vc->lock, flags);
 58
 59	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
 60		vc, vd, vd->tx.cookie);
 61	vc->desc_free(vd);
 62	return 0;
 63}
 64EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
 65
 66struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
 67	dma_cookie_t cookie)
 68{
 69	struct virt_dma_desc *vd;
 70
 71	list_for_each_entry(vd, &vc->desc_issued, node)
 72		if (vd->tx.cookie == cookie)
 73			return vd;
 74
 75	return NULL;
 76}
 77EXPORT_SYMBOL_GPL(vchan_find_desc);
 78
 79/*
 80 * This tasklet handles the completion of a DMA descriptor by
 81 * calling its callback and freeing it.
 82 */
 83static void vchan_complete(struct tasklet_struct *t)
 84{
 85	struct virt_dma_chan *vc = from_tasklet(vc, t, task);
 86	struct virt_dma_desc *vd, *_vd;
 87	struct dmaengine_desc_callback cb;
 
 88	LIST_HEAD(head);
 89
 90	spin_lock_irq(&vc->lock);
 91	list_splice_tail_init(&vc->desc_completed, &head);
 92	vd = vc->cyclic;
 93	if (vd) {
 94		vc->cyclic = NULL;
 95		dmaengine_desc_get_callback(&vd->tx, &cb);
 96	} else {
 97		memset(&cb, 0, sizeof(cb));
 98	}
 99	spin_unlock_irq(&vc->lock);
100
101	dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
 
102
103	list_for_each_entry_safe(vd, _vd, &head, node) {
104		dmaengine_desc_get_callback(&vd->tx, &cb);
 
 
105
106		list_del(&vd->node);
107		dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
108		vchan_vdesc_fini(vd);
 
 
 
109	}
110}
111
112void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
113{
114	struct virt_dma_desc *vd, *_vd;
115
116	list_for_each_entry_safe(vd, _vd, head, node) {
117		list_del(&vd->node);
118		vchan_vdesc_fini(vd);
 
119	}
120}
121EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
122
123void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
124{
125	dma_cookie_init(&vc->chan);
126
127	spin_lock_init(&vc->lock);
128	INIT_LIST_HEAD(&vc->desc_allocated);
129	INIT_LIST_HEAD(&vc->desc_submitted);
130	INIT_LIST_HEAD(&vc->desc_issued);
131	INIT_LIST_HEAD(&vc->desc_completed);
132	INIT_LIST_HEAD(&vc->desc_terminated);
133
134	tasklet_setup(&vc->task, vchan_complete);
135
136	vc->chan.device = dmadev;
137	list_add_tail(&vc->chan.device_node, &dmadev->channels);
138}
139EXPORT_SYMBOL_GPL(vchan_init);
140
141MODULE_AUTHOR("Russell King");
142MODULE_DESCRIPTION("Virtual DMA channel support for DMAengine");
143MODULE_LICENSE("GPL");