Loading...
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
5
6#include <linux/module.h>
7#include <linux/interrupt.h>
8#include <linux/dmaengine.h>
9#include <linux/platform_device.h>
10#include <linux/platform_data/dma-mcf-edma.h>
11
12#include "fsl-edma-common.h"
13
14#define EDMA_CHANNELS 64
15#define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
16
17static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
18{
19 struct fsl_edma_engine *mcf_edma = dev_id;
20 struct edma_regs *regs = &mcf_edma->regs;
21 unsigned int ch;
22 struct fsl_edma_chan *mcf_chan;
23 u64 intmap;
24
25 intmap = ioread32(regs->inth);
26 intmap <<= 32;
27 intmap |= ioread32(regs->intl);
28 if (!intmap)
29 return IRQ_NONE;
30
31 for (ch = 0; ch < mcf_edma->n_chans; ch++) {
32 if (intmap & BIT(ch)) {
33 iowrite8(EDMA_MASK_CH(ch), regs->cint);
34
35 mcf_chan = &mcf_edma->chans[ch];
36
37 spin_lock(&mcf_chan->vchan.lock);
38 if (!mcf_chan->edesc->iscyclic) {
39 list_del(&mcf_chan->edesc->vdesc.node);
40 vchan_cookie_complete(&mcf_chan->edesc->vdesc);
41 mcf_chan->edesc = NULL;
42 mcf_chan->status = DMA_COMPLETE;
43 mcf_chan->idle = true;
44 } else {
45 vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
46 }
47
48 if (!mcf_chan->edesc)
49 fsl_edma_xfer_desc(mcf_chan);
50
51 spin_unlock(&mcf_chan->vchan.lock);
52 }
53 }
54
55 return IRQ_HANDLED;
56}
57
58static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
59{
60 struct fsl_edma_engine *mcf_edma = dev_id;
61 struct edma_regs *regs = &mcf_edma->regs;
62 unsigned int err, ch;
63
64 err = ioread32(regs->errl);
65 if (!err)
66 return IRQ_NONE;
67
68 for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
69 if (err & BIT(ch)) {
70 fsl_edma_disable_request(&mcf_edma->chans[ch]);
71 iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
72 mcf_edma->chans[ch].status = DMA_ERROR;
73 mcf_edma->chans[ch].idle = true;
74 }
75 }
76
77 err = ioread32(regs->errh);
78 if (!err)
79 return IRQ_NONE;
80
81 for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
82 if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
83 fsl_edma_disable_request(&mcf_edma->chans[ch]);
84 iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
85 mcf_edma->chans[ch].status = DMA_ERROR;
86 mcf_edma->chans[ch].idle = true;
87 }
88 }
89
90 return IRQ_HANDLED;
91}
92
93static int mcf_edma_irq_init(struct platform_device *pdev,
94 struct fsl_edma_engine *mcf_edma)
95{
96 int ret = 0, i;
97 struct resource *res;
98
99 res = platform_get_resource_byname(pdev,
100 IORESOURCE_IRQ, "edma-tx-00-15");
101 if (!res)
102 return -1;
103
104 for (ret = 0, i = res->start; i <= res->end; ++i)
105 ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
106 if (ret)
107 return ret;
108
109 res = platform_get_resource_byname(pdev,
110 IORESOURCE_IRQ, "edma-tx-16-55");
111 if (!res)
112 return -1;
113
114 for (ret = 0, i = res->start; i <= res->end; ++i)
115 ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
116 if (ret)
117 return ret;
118
119 ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
120 if (ret != -ENXIO) {
121 ret = request_irq(ret, mcf_edma_tx_handler,
122 0, "eDMA", mcf_edma);
123 if (ret)
124 return ret;
125 }
126
127 ret = platform_get_irq_byname(pdev, "edma-err");
128 if (ret != -ENXIO) {
129 ret = request_irq(ret, mcf_edma_err_handler,
130 0, "eDMA", mcf_edma);
131 if (ret)
132 return ret;
133 }
134
135 return 0;
136}
137
138static void mcf_edma_irq_free(struct platform_device *pdev,
139 struct fsl_edma_engine *mcf_edma)
140{
141 int irq;
142 struct resource *res;
143
144 res = platform_get_resource_byname(pdev,
145 IORESOURCE_IRQ, "edma-tx-00-15");
146 if (res) {
147 for (irq = res->start; irq <= res->end; irq++)
148 free_irq(irq, mcf_edma);
149 }
150
151 res = platform_get_resource_byname(pdev,
152 IORESOURCE_IRQ, "edma-tx-16-55");
153 if (res) {
154 for (irq = res->start; irq <= res->end; irq++)
155 free_irq(irq, mcf_edma);
156 }
157
158 irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
159 if (irq != -ENXIO)
160 free_irq(irq, mcf_edma);
161
162 irq = platform_get_irq_byname(pdev, "edma-err");
163 if (irq != -ENXIO)
164 free_irq(irq, mcf_edma);
165}
166
167static struct fsl_edma_drvdata mcf_data = {
168 .version = v2,
169 .setup_irq = mcf_edma_irq_init,
170};
171
172static int mcf_edma_probe(struct platform_device *pdev)
173{
174 struct mcf_edma_platform_data *pdata;
175 struct fsl_edma_engine *mcf_edma;
176 struct fsl_edma_chan *mcf_chan;
177 struct edma_regs *regs;
178 struct resource *res;
179 int ret, i, len, chans;
180
181 pdata = dev_get_platdata(&pdev->dev);
182 if (!pdata) {
183 dev_err(&pdev->dev, "no platform data supplied\n");
184 return -EINVAL;
185 }
186
187 chans = pdata->dma_channels;
188 len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
189 mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
190 if (!mcf_edma)
191 return -ENOMEM;
192
193 mcf_edma->n_chans = chans;
194
195 /* Set up drvdata for ColdFire edma */
196 mcf_edma->drvdata = &mcf_data;
197 mcf_edma->big_endian = 1;
198
199 if (!mcf_edma->n_chans) {
200 dev_info(&pdev->dev, "setting default channel number to 64");
201 mcf_edma->n_chans = 64;
202 }
203
204 mutex_init(&mcf_edma->fsl_edma_mutex);
205
206 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
207
208 mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
209 if (IS_ERR(mcf_edma->membase))
210 return PTR_ERR(mcf_edma->membase);
211
212 fsl_edma_setup_regs(mcf_edma);
213 regs = &mcf_edma->regs;
214
215 INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
216 for (i = 0; i < mcf_edma->n_chans; i++) {
217 struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
218
219 mcf_chan->edma = mcf_edma;
220 mcf_chan->slave_id = i;
221 mcf_chan->idle = true;
222 mcf_chan->dma_dir = DMA_NONE;
223 mcf_chan->vchan.desc_free = fsl_edma_free_desc;
224 vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
225 iowrite32(0x0, ®s->tcd[i].csr);
226 }
227
228 iowrite32(~0, regs->inth);
229 iowrite32(~0, regs->intl);
230
231 ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
232 if (ret)
233 return ret;
234
235 dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
236 dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
237 dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
238
239 mcf_edma->dma_dev.dev = &pdev->dev;
240 mcf_edma->dma_dev.device_alloc_chan_resources =
241 fsl_edma_alloc_chan_resources;
242 mcf_edma->dma_dev.device_free_chan_resources =
243 fsl_edma_free_chan_resources;
244 mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
245 mcf_edma->dma_dev.device_prep_dma_cyclic =
246 fsl_edma_prep_dma_cyclic;
247 mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
248 mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
249 mcf_edma->dma_dev.device_pause = fsl_edma_pause;
250 mcf_edma->dma_dev.device_resume = fsl_edma_resume;
251 mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
252 mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
253
254 mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
255 mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
256 mcf_edma->dma_dev.directions =
257 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
258
259 mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
260 mcf_edma->dma_dev.filter.map = pdata->slave_map;
261 mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
262
263 platform_set_drvdata(pdev, mcf_edma);
264
265 ret = dma_async_device_register(&mcf_edma->dma_dev);
266 if (ret) {
267 dev_err(&pdev->dev,
268 "Can't register Freescale eDMA engine. (%d)\n", ret);
269 return ret;
270 }
271
272 /* Enable round robin arbitration */
273 iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
274
275 return 0;
276}
277
278static int mcf_edma_remove(struct platform_device *pdev)
279{
280 struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
281
282 mcf_edma_irq_free(pdev, mcf_edma);
283 fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
284 dma_async_device_unregister(&mcf_edma->dma_dev);
285
286 return 0;
287}
288
289static struct platform_driver mcf_edma_driver = {
290 .driver = {
291 .name = "mcf-edma",
292 },
293 .probe = mcf_edma_probe,
294 .remove = mcf_edma_remove,
295};
296
297bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
298{
299 if (chan->device->dev->driver == &mcf_edma_driver.driver) {
300 struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
301
302 return (mcf_chan->slave_id == (uintptr_t)param);
303 }
304
305 return false;
306}
307EXPORT_SYMBOL(mcf_edma_filter_fn);
308
309static int __init mcf_edma_init(void)
310{
311 return platform_driver_register(&mcf_edma_driver);
312}
313subsys_initcall(mcf_edma_init);
314
315static void __exit mcf_edma_exit(void)
316{
317 platform_driver_unregister(&mcf_edma_driver);
318}
319module_exit(mcf_edma_exit);
320
321MODULE_ALIAS("platform:mcf-edma");
322MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
323MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
5
6#include <linux/module.h>
7#include <linux/interrupt.h>
8#include <linux/dmaengine.h>
9#include <linux/platform_device.h>
10#include <linux/platform_data/dma-mcf-edma.h>
11
12#include "fsl-edma-common.h"
13
14#define EDMA_CHANNELS 64
15#define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
16
17static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
18{
19 struct fsl_edma_engine *mcf_edma = dev_id;
20 struct edma_regs *regs = &mcf_edma->regs;
21 unsigned int ch;
22 struct fsl_edma_chan *mcf_chan;
23 u64 intmap;
24
25 intmap = ioread32(regs->inth);
26 intmap <<= 32;
27 intmap |= ioread32(regs->intl);
28 if (!intmap)
29 return IRQ_NONE;
30
31 for (ch = 0; ch < mcf_edma->n_chans; ch++) {
32 if (intmap & BIT(ch)) {
33 iowrite8(EDMA_MASK_CH(ch), regs->cint);
34
35 mcf_chan = &mcf_edma->chans[ch];
36
37 spin_lock(&mcf_chan->vchan.lock);
38
39 if (!mcf_chan->edesc) {
40 /* terminate_all called before */
41 spin_unlock(&mcf_chan->vchan.lock);
42 continue;
43 }
44
45 if (!mcf_chan->edesc->iscyclic) {
46 list_del(&mcf_chan->edesc->vdesc.node);
47 vchan_cookie_complete(&mcf_chan->edesc->vdesc);
48 mcf_chan->edesc = NULL;
49 mcf_chan->status = DMA_COMPLETE;
50 mcf_chan->idle = true;
51 } else {
52 vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
53 }
54
55 if (!mcf_chan->edesc)
56 fsl_edma_xfer_desc(mcf_chan);
57
58 spin_unlock(&mcf_chan->vchan.lock);
59 }
60 }
61
62 return IRQ_HANDLED;
63}
64
65static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
66{
67 struct fsl_edma_engine *mcf_edma = dev_id;
68 struct edma_regs *regs = &mcf_edma->regs;
69 unsigned int err, ch;
70
71 err = ioread32(regs->errl);
72 if (!err)
73 return IRQ_NONE;
74
75 for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
76 if (err & BIT(ch)) {
77 fsl_edma_disable_request(&mcf_edma->chans[ch]);
78 iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
79 mcf_edma->chans[ch].status = DMA_ERROR;
80 mcf_edma->chans[ch].idle = true;
81 }
82 }
83
84 err = ioread32(regs->errh);
85 if (!err)
86 return IRQ_NONE;
87
88 for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
89 if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
90 fsl_edma_disable_request(&mcf_edma->chans[ch]);
91 iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
92 mcf_edma->chans[ch].status = DMA_ERROR;
93 mcf_edma->chans[ch].idle = true;
94 }
95 }
96
97 return IRQ_HANDLED;
98}
99
100static int mcf_edma_irq_init(struct platform_device *pdev,
101 struct fsl_edma_engine *mcf_edma)
102{
103 int ret = 0, i;
104 struct resource *res;
105
106 res = platform_get_resource_byname(pdev,
107 IORESOURCE_IRQ, "edma-tx-00-15");
108 if (!res)
109 return -1;
110
111 for (ret = 0, i = res->start; i <= res->end; ++i)
112 ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
113 if (ret)
114 return ret;
115
116 res = platform_get_resource_byname(pdev,
117 IORESOURCE_IRQ, "edma-tx-16-55");
118 if (!res)
119 return -1;
120
121 for (ret = 0, i = res->start; i <= res->end; ++i)
122 ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
123 if (ret)
124 return ret;
125
126 ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
127 if (ret != -ENXIO) {
128 ret = request_irq(ret, mcf_edma_tx_handler,
129 0, "eDMA", mcf_edma);
130 if (ret)
131 return ret;
132 }
133
134 ret = platform_get_irq_byname(pdev, "edma-err");
135 if (ret != -ENXIO) {
136 ret = request_irq(ret, mcf_edma_err_handler,
137 0, "eDMA", mcf_edma);
138 if (ret)
139 return ret;
140 }
141
142 return 0;
143}
144
145static void mcf_edma_irq_free(struct platform_device *pdev,
146 struct fsl_edma_engine *mcf_edma)
147{
148 int irq;
149 struct resource *res;
150
151 res = platform_get_resource_byname(pdev,
152 IORESOURCE_IRQ, "edma-tx-00-15");
153 if (res) {
154 for (irq = res->start; irq <= res->end; irq++)
155 free_irq(irq, mcf_edma);
156 }
157
158 res = platform_get_resource_byname(pdev,
159 IORESOURCE_IRQ, "edma-tx-16-55");
160 if (res) {
161 for (irq = res->start; irq <= res->end; irq++)
162 free_irq(irq, mcf_edma);
163 }
164
165 irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
166 if (irq != -ENXIO)
167 free_irq(irq, mcf_edma);
168
169 irq = platform_get_irq_byname(pdev, "edma-err");
170 if (irq != -ENXIO)
171 free_irq(irq, mcf_edma);
172}
173
174static struct fsl_edma_drvdata mcf_data = {
175 .version = v2,
176 .setup_irq = mcf_edma_irq_init,
177};
178
179static int mcf_edma_probe(struct platform_device *pdev)
180{
181 struct mcf_edma_platform_data *pdata;
182 struct fsl_edma_engine *mcf_edma;
183 struct fsl_edma_chan *mcf_chan;
184 struct edma_regs *regs;
185 struct resource *res;
186 int ret, i, len, chans;
187
188 pdata = dev_get_platdata(&pdev->dev);
189 if (!pdata) {
190 dev_err(&pdev->dev, "no platform data supplied\n");
191 return -EINVAL;
192 }
193
194 chans = pdata->dma_channels;
195 len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
196 mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
197 if (!mcf_edma)
198 return -ENOMEM;
199
200 mcf_edma->n_chans = chans;
201
202 /* Set up drvdata for ColdFire edma */
203 mcf_edma->drvdata = &mcf_data;
204 mcf_edma->big_endian = 1;
205
206 if (!mcf_edma->n_chans) {
207 dev_info(&pdev->dev, "setting default channel number to 64");
208 mcf_edma->n_chans = 64;
209 }
210
211 mutex_init(&mcf_edma->fsl_edma_mutex);
212
213 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
214
215 mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
216 if (IS_ERR(mcf_edma->membase))
217 return PTR_ERR(mcf_edma->membase);
218
219 fsl_edma_setup_regs(mcf_edma);
220 regs = &mcf_edma->regs;
221
222 INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
223 for (i = 0; i < mcf_edma->n_chans; i++) {
224 struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
225
226 mcf_chan->edma = mcf_edma;
227 mcf_chan->slave_id = i;
228 mcf_chan->idle = true;
229 mcf_chan->dma_dir = DMA_NONE;
230 mcf_chan->vchan.desc_free = fsl_edma_free_desc;
231 vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
232 iowrite32(0x0, ®s->tcd[i].csr);
233 }
234
235 iowrite32(~0, regs->inth);
236 iowrite32(~0, regs->intl);
237
238 ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
239 if (ret)
240 return ret;
241
242 dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
243 dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
244 dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
245
246 mcf_edma->dma_dev.dev = &pdev->dev;
247 mcf_edma->dma_dev.device_alloc_chan_resources =
248 fsl_edma_alloc_chan_resources;
249 mcf_edma->dma_dev.device_free_chan_resources =
250 fsl_edma_free_chan_resources;
251 mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
252 mcf_edma->dma_dev.device_prep_dma_cyclic =
253 fsl_edma_prep_dma_cyclic;
254 mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
255 mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
256 mcf_edma->dma_dev.device_pause = fsl_edma_pause;
257 mcf_edma->dma_dev.device_resume = fsl_edma_resume;
258 mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
259 mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
260
261 mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
262 mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
263 mcf_edma->dma_dev.directions =
264 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
265
266 mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
267 mcf_edma->dma_dev.filter.map = pdata->slave_map;
268 mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
269
270 platform_set_drvdata(pdev, mcf_edma);
271
272 ret = dma_async_device_register(&mcf_edma->dma_dev);
273 if (ret) {
274 dev_err(&pdev->dev,
275 "Can't register Freescale eDMA engine. (%d)\n", ret);
276 return ret;
277 }
278
279 /* Enable round robin arbitration */
280 iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
281
282 return 0;
283}
284
285static int mcf_edma_remove(struct platform_device *pdev)
286{
287 struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
288
289 mcf_edma_irq_free(pdev, mcf_edma);
290 fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
291 dma_async_device_unregister(&mcf_edma->dma_dev);
292
293 return 0;
294}
295
296static struct platform_driver mcf_edma_driver = {
297 .driver = {
298 .name = "mcf-edma",
299 },
300 .probe = mcf_edma_probe,
301 .remove = mcf_edma_remove,
302};
303
304bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
305{
306 if (chan->device->dev->driver == &mcf_edma_driver.driver) {
307 struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
308
309 return (mcf_chan->slave_id == (uintptr_t)param);
310 }
311
312 return false;
313}
314EXPORT_SYMBOL(mcf_edma_filter_fn);
315
316static int __init mcf_edma_init(void)
317{
318 return platform_driver_register(&mcf_edma_driver);
319}
320subsys_initcall(mcf_edma_init);
321
322static void __exit mcf_edma_exit(void)
323{
324 platform_driver_unregister(&mcf_edma_driver);
325}
326module_exit(mcf_edma_exit);
327
328MODULE_ALIAS("platform:mcf-edma");
329MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
330MODULE_LICENSE("GPL v2");