Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Virtual DMA channel support for DMAengine
4 *
5 * Copyright (C) 2012 Russell King
6 */
7#include <linux/device.h>
8#include <linux/dmaengine.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11
12#include "virt-dma.h"
13
14static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
15{
16 return container_of(tx, struct virt_dma_desc, tx);
17}
18
19dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
20{
21 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
22 struct virt_dma_desc *vd = to_virt_desc(tx);
23 unsigned long flags;
24 dma_cookie_t cookie;
25
26 spin_lock_irqsave(&vc->lock, flags);
27 cookie = dma_cookie_assign(tx);
28
29 list_move_tail(&vd->node, &vc->desc_submitted);
30 spin_unlock_irqrestore(&vc->lock, flags);
31
32 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
33 vc, vd, cookie);
34
35 return cookie;
36}
37EXPORT_SYMBOL_GPL(vchan_tx_submit);
38
39/**
40 * vchan_tx_desc_free - free a reusable descriptor
41 * @tx: the transfer
42 *
43 * This function frees a previously allocated reusable descriptor. The only
44 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
45 * transfer.
46 *
47 * Returns 0 upon success
48 */
49int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
50{
51 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
52 struct virt_dma_desc *vd = to_virt_desc(tx);
53 unsigned long flags;
54
55 spin_lock_irqsave(&vc->lock, flags);
56 list_del(&vd->node);
57 spin_unlock_irqrestore(&vc->lock, flags);
58
59 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
60 vc, vd, vd->tx.cookie);
61 vc->desc_free(vd);
62 return 0;
63}
64EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
65
66struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
67 dma_cookie_t cookie)
68{
69 struct virt_dma_desc *vd;
70
71 list_for_each_entry(vd, &vc->desc_issued, node)
72 if (vd->tx.cookie == cookie)
73 return vd;
74
75 return NULL;
76}
77EXPORT_SYMBOL_GPL(vchan_find_desc);
78
79/*
80 * This tasklet handles the completion of a DMA descriptor by
81 * calling its callback and freeing it.
82 */
83static void vchan_complete(struct tasklet_struct *t)
84{
85 struct virt_dma_chan *vc = from_tasklet(vc, t, task);
86 struct virt_dma_desc *vd, *_vd;
87 struct dmaengine_desc_callback cb;
88 LIST_HEAD(head);
89
90 spin_lock_irq(&vc->lock);
91 list_splice_tail_init(&vc->desc_completed, &head);
92 vd = vc->cyclic;
93 if (vd) {
94 vc->cyclic = NULL;
95 dmaengine_desc_get_callback(&vd->tx, &cb);
96 } else {
97 memset(&cb, 0, sizeof(cb));
98 }
99 spin_unlock_irq(&vc->lock);
100
101 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
102
103 list_for_each_entry_safe(vd, _vd, &head, node) {
104 dmaengine_desc_get_callback(&vd->tx, &cb);
105
106 list_del(&vd->node);
107 dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
108 vchan_vdesc_fini(vd);
109 }
110}
111
112void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
113{
114 struct virt_dma_desc *vd, *_vd;
115
116 list_for_each_entry_safe(vd, _vd, head, node) {
117 list_del(&vd->node);
118 vchan_vdesc_fini(vd);
119 }
120}
121EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
122
123void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
124{
125 dma_cookie_init(&vc->chan);
126
127 spin_lock_init(&vc->lock);
128 INIT_LIST_HEAD(&vc->desc_allocated);
129 INIT_LIST_HEAD(&vc->desc_submitted);
130 INIT_LIST_HEAD(&vc->desc_issued);
131 INIT_LIST_HEAD(&vc->desc_completed);
132 INIT_LIST_HEAD(&vc->desc_terminated);
133
134 tasklet_setup(&vc->task, vchan_complete);
135
136 vc->chan.device = dmadev;
137 list_add_tail(&vc->chan.device_node, &dmadev->channels);
138}
139EXPORT_SYMBOL_GPL(vchan_init);
140
141MODULE_AUTHOR("Russell King");
142MODULE_LICENSE("GPL");
1/*
2 * Virtual DMA channel support for DMAengine
3 *
4 * Copyright (C) 2012 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/device.h>
11#include <linux/dmaengine.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
14
15#include "virt-dma.h"
16
17static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
18{
19 return container_of(tx, struct virt_dma_desc, tx);
20}
21
22dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
23{
24 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
25 struct virt_dma_desc *vd = to_virt_desc(tx);
26 unsigned long flags;
27 dma_cookie_t cookie;
28
29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx);
31
32 list_move_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags);
34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
36 vc, vd, cookie);
37
38 return cookie;
39}
40EXPORT_SYMBOL_GPL(vchan_tx_submit);
41
42/**
43 * vchan_tx_desc_free - free a reusable descriptor
44 * @tx: the transfer
45 *
46 * This function frees a previously allocated reusable descriptor. The only
47 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
48 * transfer.
49 *
50 * Returns 0 upon success
51 */
52int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
53{
54 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
55 struct virt_dma_desc *vd = to_virt_desc(tx);
56 unsigned long flags;
57
58 spin_lock_irqsave(&vc->lock, flags);
59 list_del(&vd->node);
60 spin_unlock_irqrestore(&vc->lock, flags);
61
62 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
63 vc, vd, vd->tx.cookie);
64 vc->desc_free(vd);
65 return 0;
66}
67EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
68
69struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
70 dma_cookie_t cookie)
71{
72 struct virt_dma_desc *vd;
73
74 list_for_each_entry(vd, &vc->desc_issued, node)
75 if (vd->tx.cookie == cookie)
76 return vd;
77
78 return NULL;
79}
80EXPORT_SYMBOL_GPL(vchan_find_desc);
81
82/*
83 * This tasklet handles the completion of a DMA descriptor by
84 * calling its callback and freeing it.
85 */
86static void vchan_complete(unsigned long arg)
87{
88 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
89 struct virt_dma_desc *vd;
90 dma_async_tx_callback cb = NULL;
91 void *cb_data = NULL;
92 LIST_HEAD(head);
93
94 spin_lock_irq(&vc->lock);
95 list_splice_tail_init(&vc->desc_completed, &head);
96 vd = vc->cyclic;
97 if (vd) {
98 vc->cyclic = NULL;
99 cb = vd->tx.callback;
100 cb_data = vd->tx.callback_param;
101 }
102 spin_unlock_irq(&vc->lock);
103
104 if (cb)
105 cb(cb_data);
106
107 while (!list_empty(&head)) {
108 vd = list_first_entry(&head, struct virt_dma_desc, node);
109 cb = vd->tx.callback;
110 cb_data = vd->tx.callback_param;
111
112 list_del(&vd->node);
113 if (dmaengine_desc_test_reuse(&vd->tx))
114 list_add(&vd->node, &vc->desc_allocated);
115 else
116 vc->desc_free(vd);
117
118 if (cb)
119 cb(cb_data);
120 }
121}
122
123void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
124{
125 while (!list_empty(head)) {
126 struct virt_dma_desc *vd = list_first_entry(head,
127 struct virt_dma_desc, node);
128 if (dmaengine_desc_test_reuse(&vd->tx)) {
129 list_move_tail(&vd->node, &vc->desc_allocated);
130 } else {
131 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
132 list_del(&vd->node);
133 vc->desc_free(vd);
134 }
135 }
136}
137EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
138
139void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
140{
141 dma_cookie_init(&vc->chan);
142
143 spin_lock_init(&vc->lock);
144 INIT_LIST_HEAD(&vc->desc_allocated);
145 INIT_LIST_HEAD(&vc->desc_submitted);
146 INIT_LIST_HEAD(&vc->desc_issued);
147 INIT_LIST_HEAD(&vc->desc_completed);
148
149 tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
150
151 vc->chan.device = dmadev;
152 list_add_tail(&vc->chan.device_node, &dmadev->channels);
153}
154EXPORT_SYMBOL_GPL(vchan_init);
155
156MODULE_AUTHOR("Russell King");
157MODULE_LICENSE("GPL");