Loading...
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5 *
6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
7 * (defines, structures and comments) was taken from MPC5121 DMA driver
8 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
9 *
10 * Approved as OSADL project by a majority of OSADL members and funded
11 * by OSADL membership fees in 2009; for details see www.osadl.org.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but WITHOUT
19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * more details.
22 *
23 * You should have received a copy of the GNU General Public License along with
24 * this program; if not, write to the Free Software Foundation, Inc., 59
25 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 * The full GNU General Public License is included in this distribution in the
28 * file called COPYING.
29 */
30
31/*
32 * This is initial version of MPC5121 DMA driver. Only memory to memory
33 * transfers are supported (tested using dmatest module).
34 */
35
36#include <linux/module.h>
37#include <linux/dmaengine.h>
38#include <linux/dma-mapping.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
41#include <linux/slab.h>
42#include <linux/of_address.h>
43#include <linux/of_device.h>
44#include <linux/of_irq.h>
45#include <linux/of_platform.h>
46
47#include <linux/random.h>
48
49#include "dmaengine.h"
50
51/* Number of DMA Transfer descriptors allocated per channel */
52#define MPC_DMA_DESCRIPTORS 64
53
54/* Macro definitions */
55#define MPC_DMA_CHANNELS 64
56#define MPC_DMA_TCD_OFFSET 0x1000
57
58/* Arbitration mode of group and channel */
59#define MPC_DMA_DMACR_EDCG (1 << 31)
60#define MPC_DMA_DMACR_ERGA (1 << 3)
61#define MPC_DMA_DMACR_ERCA (1 << 2)
62
63/* Error codes */
64#define MPC_DMA_DMAES_VLD (1 << 31)
65#define MPC_DMA_DMAES_GPE (1 << 15)
66#define MPC_DMA_DMAES_CPE (1 << 14)
67#define MPC_DMA_DMAES_ERRCHN(err) \
68 (((err) >> 8) & 0x3f)
69#define MPC_DMA_DMAES_SAE (1 << 7)
70#define MPC_DMA_DMAES_SOE (1 << 6)
71#define MPC_DMA_DMAES_DAE (1 << 5)
72#define MPC_DMA_DMAES_DOE (1 << 4)
73#define MPC_DMA_DMAES_NCE (1 << 3)
74#define MPC_DMA_DMAES_SGE (1 << 2)
75#define MPC_DMA_DMAES_SBE (1 << 1)
76#define MPC_DMA_DMAES_DBE (1 << 0)
77
78#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
79
80#define MPC_DMA_TSIZE_1 0x00
81#define MPC_DMA_TSIZE_2 0x01
82#define MPC_DMA_TSIZE_4 0x02
83#define MPC_DMA_TSIZE_16 0x04
84#define MPC_DMA_TSIZE_32 0x05
85
86/* MPC5121 DMA engine registers */
87struct __attribute__ ((__packed__)) mpc_dma_regs {
88 /* 0x00 */
89 u32 dmacr; /* DMA control register */
90 u32 dmaes; /* DMA error status */
91 /* 0x08 */
92 u32 dmaerqh; /* DMA enable request high(channels 63~32) */
93 u32 dmaerql; /* DMA enable request low(channels 31~0) */
94 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
95 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
96 /* 0x18 */
97 u8 dmaserq; /* DMA set enable request */
98 u8 dmacerq; /* DMA clear enable request */
99 u8 dmaseei; /* DMA set enable error interrupt */
100 u8 dmaceei; /* DMA clear enable error interrupt */
101 /* 0x1c */
102 u8 dmacint; /* DMA clear interrupt request */
103 u8 dmacerr; /* DMA clear error */
104 u8 dmassrt; /* DMA set start bit */
105 u8 dmacdne; /* DMA clear DONE status bit */
106 /* 0x20 */
107 u32 dmainth; /* DMA interrupt request high(ch63~32) */
108 u32 dmaintl; /* DMA interrupt request low(ch31~0) */
109 u32 dmaerrh; /* DMA error high(ch63~32) */
110 u32 dmaerrl; /* DMA error low(ch31~0) */
111 /* 0x30 */
112 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
113 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
114 union {
115 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
116 u32 dmagpor; /* (General purpose register on MPC8308) */
117 };
118 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
119 /* 0x40 ~ 0xff */
120 u32 reserve0[48]; /* Reserved */
121 /* 0x100 */
122 u8 dchpri[MPC_DMA_CHANNELS];
123 /* DMA channels(0~63) priority */
124};
125
126struct __attribute__ ((__packed__)) mpc_dma_tcd {
127 /* 0x00 */
128 u32 saddr; /* Source address */
129
130 u32 smod:5; /* Source address modulo */
131 u32 ssize:3; /* Source data transfer size */
132 u32 dmod:5; /* Destination address modulo */
133 u32 dsize:3; /* Destination data transfer size */
134 u32 soff:16; /* Signed source address offset */
135
136 /* 0x08 */
137 u32 nbytes; /* Inner "minor" byte count */
138 u32 slast; /* Last source address adjustment */
139 u32 daddr; /* Destination address */
140
141 /* 0x14 */
142 u32 citer_elink:1; /* Enable channel-to-channel linking on
143 * minor loop complete
144 */
145 u32 citer_linkch:6; /* Link channel for minor loop complete */
146 u32 citer:9; /* Current "major" iteration count */
147 u32 doff:16; /* Signed destination address offset */
148
149 /* 0x18 */
150 u32 dlast_sga; /* Last Destination address adjustment/scatter
151 * gather address
152 */
153
154 /* 0x1c */
155 u32 biter_elink:1; /* Enable channel-to-channel linking on major
156 * loop complete
157 */
158 u32 biter_linkch:6;
159 u32 biter:9; /* Beginning "major" iteration count */
160 u32 bwc:2; /* Bandwidth control */
161 u32 major_linkch:6; /* Link channel number */
162 u32 done:1; /* Channel done */
163 u32 active:1; /* Channel active */
164 u32 major_elink:1; /* Enable channel-to-channel linking on major
165 * loop complete
166 */
167 u32 e_sg:1; /* Enable scatter/gather processing */
168 u32 d_req:1; /* Disable request */
169 u32 int_half:1; /* Enable an interrupt when major counter is
170 * half complete
171 */
172 u32 int_maj:1; /* Enable an interrupt when major iteration
173 * count completes
174 */
175 u32 start:1; /* Channel start */
176};
177
178struct mpc_dma_desc {
179 struct dma_async_tx_descriptor desc;
180 struct mpc_dma_tcd *tcd;
181 dma_addr_t tcd_paddr;
182 int error;
183 struct list_head node;
184};
185
186struct mpc_dma_chan {
187 struct dma_chan chan;
188 struct list_head free;
189 struct list_head prepared;
190 struct list_head queued;
191 struct list_head active;
192 struct list_head completed;
193 struct mpc_dma_tcd *tcd;
194 dma_addr_t tcd_paddr;
195
196 /* Lock for this structure */
197 spinlock_t lock;
198};
199
200struct mpc_dma {
201 struct dma_device dma;
202 struct tasklet_struct tasklet;
203 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
204 struct mpc_dma_regs __iomem *regs;
205 struct mpc_dma_tcd __iomem *tcd;
206 int irq;
207 int irq2;
208 uint error_status;
209 int is_mpc8308;
210
211 /* Lock for error_status field in this structure */
212 spinlock_t error_status_lock;
213};
214
215#define DRV_NAME "mpc512x_dma"
216
217/* Convert struct dma_chan to struct mpc_dma_chan */
218static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
219{
220 return container_of(c, struct mpc_dma_chan, chan);
221}
222
223/* Convert struct dma_chan to struct mpc_dma */
224static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
225{
226 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
227 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
228}
229
230/*
231 * Execute all queued DMA descriptors.
232 *
233 * Following requirements must be met while calling mpc_dma_execute():
234 * a) mchan->lock is acquired,
235 * b) mchan->active list is empty,
236 * c) mchan->queued list contains at least one entry.
237 */
238static void mpc_dma_execute(struct mpc_dma_chan *mchan)
239{
240 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
241 struct mpc_dma_desc *first = NULL;
242 struct mpc_dma_desc *prev = NULL;
243 struct mpc_dma_desc *mdesc;
244 int cid = mchan->chan.chan_id;
245
246 /* Move all queued descriptors to active list */
247 list_splice_tail_init(&mchan->queued, &mchan->active);
248
249 /* Chain descriptors into one transaction */
250 list_for_each_entry(mdesc, &mchan->active, node) {
251 if (!first)
252 first = mdesc;
253
254 if (!prev) {
255 prev = mdesc;
256 continue;
257 }
258
259 prev->tcd->dlast_sga = mdesc->tcd_paddr;
260 prev->tcd->e_sg = 1;
261 mdesc->tcd->start = 1;
262
263 prev = mdesc;
264 }
265
266 prev->tcd->int_maj = 1;
267
268 /* Send first descriptor in chain into hardware */
269 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
270
271 if (first != prev)
272 mdma->tcd[cid].e_sg = 1;
273 out_8(&mdma->regs->dmassrt, cid);
274}
275
276/* Handle interrupt on one half of DMA controller (32 channels) */
277static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
278{
279 struct mpc_dma_chan *mchan;
280 struct mpc_dma_desc *mdesc;
281 u32 status = is | es;
282 int ch;
283
284 while ((ch = fls(status) - 1) >= 0) {
285 status &= ~(1 << ch);
286 mchan = &mdma->channels[ch + off];
287
288 spin_lock(&mchan->lock);
289
290 out_8(&mdma->regs->dmacint, ch + off);
291 out_8(&mdma->regs->dmacerr, ch + off);
292
293 /* Check error status */
294 if (es & (1 << ch))
295 list_for_each_entry(mdesc, &mchan->active, node)
296 mdesc->error = -EIO;
297
298 /* Execute queued descriptors */
299 list_splice_tail_init(&mchan->active, &mchan->completed);
300 if (!list_empty(&mchan->queued))
301 mpc_dma_execute(mchan);
302
303 spin_unlock(&mchan->lock);
304 }
305}
306
307/* Interrupt handler */
308static irqreturn_t mpc_dma_irq(int irq, void *data)
309{
310 struct mpc_dma *mdma = data;
311 uint es;
312
313 /* Save error status register */
314 es = in_be32(&mdma->regs->dmaes);
315 spin_lock(&mdma->error_status_lock);
316 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
317 mdma->error_status = es;
318 spin_unlock(&mdma->error_status_lock);
319
320 /* Handle interrupt on each channel */
321 if (mdma->dma.chancnt > 32) {
322 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
323 in_be32(&mdma->regs->dmaerrh), 32);
324 }
325 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
326 in_be32(&mdma->regs->dmaerrl), 0);
327
328 /* Schedule tasklet */
329 tasklet_schedule(&mdma->tasklet);
330
331 return IRQ_HANDLED;
332}
333
334/* process completed descriptors */
335static void mpc_dma_process_completed(struct mpc_dma *mdma)
336{
337 dma_cookie_t last_cookie = 0;
338 struct mpc_dma_chan *mchan;
339 struct mpc_dma_desc *mdesc;
340 struct dma_async_tx_descriptor *desc;
341 unsigned long flags;
342 LIST_HEAD(list);
343 int i;
344
345 for (i = 0; i < mdma->dma.chancnt; i++) {
346 mchan = &mdma->channels[i];
347
348 /* Get all completed descriptors */
349 spin_lock_irqsave(&mchan->lock, flags);
350 if (!list_empty(&mchan->completed))
351 list_splice_tail_init(&mchan->completed, &list);
352 spin_unlock_irqrestore(&mchan->lock, flags);
353
354 if (list_empty(&list))
355 continue;
356
357 /* Execute callbacks and run dependencies */
358 list_for_each_entry(mdesc, &list, node) {
359 desc = &mdesc->desc;
360
361 if (desc->callback)
362 desc->callback(desc->callback_param);
363
364 last_cookie = desc->cookie;
365 dma_run_dependencies(desc);
366 }
367
368 /* Free descriptors */
369 spin_lock_irqsave(&mchan->lock, flags);
370 list_splice_tail_init(&list, &mchan->free);
371 mchan->chan.completed_cookie = last_cookie;
372 spin_unlock_irqrestore(&mchan->lock, flags);
373 }
374}
375
376/* DMA Tasklet */
377static void mpc_dma_tasklet(unsigned long data)
378{
379 struct mpc_dma *mdma = (void *)data;
380 unsigned long flags;
381 uint es;
382
383 spin_lock_irqsave(&mdma->error_status_lock, flags);
384 es = mdma->error_status;
385 mdma->error_status = 0;
386 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
387
388 /* Print nice error report */
389 if (es) {
390 dev_err(mdma->dma.dev,
391 "Hardware reported following error(s) on channel %u:\n",
392 MPC_DMA_DMAES_ERRCHN(es));
393
394 if (es & MPC_DMA_DMAES_GPE)
395 dev_err(mdma->dma.dev, "- Group Priority Error\n");
396 if (es & MPC_DMA_DMAES_CPE)
397 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
398 if (es & MPC_DMA_DMAES_SAE)
399 dev_err(mdma->dma.dev, "- Source Address Error\n");
400 if (es & MPC_DMA_DMAES_SOE)
401 dev_err(mdma->dma.dev, "- Source Offset"
402 " Configuration Error\n");
403 if (es & MPC_DMA_DMAES_DAE)
404 dev_err(mdma->dma.dev, "- Destination Address"
405 " Error\n");
406 if (es & MPC_DMA_DMAES_DOE)
407 dev_err(mdma->dma.dev, "- Destination Offset"
408 " Configuration Error\n");
409 if (es & MPC_DMA_DMAES_NCE)
410 dev_err(mdma->dma.dev, "- NBytes/Citter"
411 " Configuration Error\n");
412 if (es & MPC_DMA_DMAES_SGE)
413 dev_err(mdma->dma.dev, "- Scatter/Gather"
414 " Configuration Error\n");
415 if (es & MPC_DMA_DMAES_SBE)
416 dev_err(mdma->dma.dev, "- Source Bus Error\n");
417 if (es & MPC_DMA_DMAES_DBE)
418 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
419 }
420
421 mpc_dma_process_completed(mdma);
422}
423
424/* Submit descriptor to hardware */
425static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
426{
427 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
428 struct mpc_dma_desc *mdesc;
429 unsigned long flags;
430 dma_cookie_t cookie;
431
432 mdesc = container_of(txd, struct mpc_dma_desc, desc);
433
434 spin_lock_irqsave(&mchan->lock, flags);
435
436 /* Move descriptor to queue */
437 list_move_tail(&mdesc->node, &mchan->queued);
438
439 /* If channel is idle, execute all queued descriptors */
440 if (list_empty(&mchan->active))
441 mpc_dma_execute(mchan);
442
443 /* Update cookie */
444 cookie = dma_cookie_assign(txd);
445 spin_unlock_irqrestore(&mchan->lock, flags);
446
447 return cookie;
448}
449
450/* Alloc channel resources */
451static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
452{
453 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
454 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
455 struct mpc_dma_desc *mdesc;
456 struct mpc_dma_tcd *tcd;
457 dma_addr_t tcd_paddr;
458 unsigned long flags;
459 LIST_HEAD(descs);
460 int i;
461
462 /* Alloc DMA memory for Transfer Control Descriptors */
463 tcd = dma_alloc_coherent(mdma->dma.dev,
464 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
465 &tcd_paddr, GFP_KERNEL);
466 if (!tcd)
467 return -ENOMEM;
468
469 /* Alloc descriptors for this channel */
470 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
471 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
472 if (!mdesc) {
473 dev_notice(mdma->dma.dev, "Memory allocation error. "
474 "Allocated only %u descriptors\n", i);
475 break;
476 }
477
478 dma_async_tx_descriptor_init(&mdesc->desc, chan);
479 mdesc->desc.flags = DMA_CTRL_ACK;
480 mdesc->desc.tx_submit = mpc_dma_tx_submit;
481
482 mdesc->tcd = &tcd[i];
483 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
484
485 list_add_tail(&mdesc->node, &descs);
486 }
487
488 /* Return error only if no descriptors were allocated */
489 if (i == 0) {
490 dma_free_coherent(mdma->dma.dev,
491 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
492 tcd, tcd_paddr);
493 return -ENOMEM;
494 }
495
496 spin_lock_irqsave(&mchan->lock, flags);
497 mchan->tcd = tcd;
498 mchan->tcd_paddr = tcd_paddr;
499 list_splice_tail_init(&descs, &mchan->free);
500 spin_unlock_irqrestore(&mchan->lock, flags);
501
502 /* Enable Error Interrupt */
503 out_8(&mdma->regs->dmaseei, chan->chan_id);
504
505 return 0;
506}
507
508/* Free channel resources */
509static void mpc_dma_free_chan_resources(struct dma_chan *chan)
510{
511 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
512 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
513 struct mpc_dma_desc *mdesc, *tmp;
514 struct mpc_dma_tcd *tcd;
515 dma_addr_t tcd_paddr;
516 unsigned long flags;
517 LIST_HEAD(descs);
518
519 spin_lock_irqsave(&mchan->lock, flags);
520
521 /* Channel must be idle */
522 BUG_ON(!list_empty(&mchan->prepared));
523 BUG_ON(!list_empty(&mchan->queued));
524 BUG_ON(!list_empty(&mchan->active));
525 BUG_ON(!list_empty(&mchan->completed));
526
527 /* Move data */
528 list_splice_tail_init(&mchan->free, &descs);
529 tcd = mchan->tcd;
530 tcd_paddr = mchan->tcd_paddr;
531
532 spin_unlock_irqrestore(&mchan->lock, flags);
533
534 /* Free DMA memory used by descriptors */
535 dma_free_coherent(mdma->dma.dev,
536 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
537 tcd, tcd_paddr);
538
539 /* Free descriptors */
540 list_for_each_entry_safe(mdesc, tmp, &descs, node)
541 kfree(mdesc);
542
543 /* Disable Error Interrupt */
544 out_8(&mdma->regs->dmaceei, chan->chan_id);
545}
546
547/* Send all pending descriptor to hardware */
548static void mpc_dma_issue_pending(struct dma_chan *chan)
549{
550 /*
551 * We are posting descriptors to the hardware as soon as
552 * they are ready, so this function does nothing.
553 */
554}
555
556/* Check request completion status */
557static enum dma_status
558mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
559 struct dma_tx_state *txstate)
560{
561 return dma_cookie_status(chan, cookie, txstate);
562}
563
564/* Prepare descriptor for memory to memory copy */
565static struct dma_async_tx_descriptor *
566mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
567 size_t len, unsigned long flags)
568{
569 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
570 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
571 struct mpc_dma_desc *mdesc = NULL;
572 struct mpc_dma_tcd *tcd;
573 unsigned long iflags;
574
575 /* Get free descriptor */
576 spin_lock_irqsave(&mchan->lock, iflags);
577 if (!list_empty(&mchan->free)) {
578 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
579 node);
580 list_del(&mdesc->node);
581 }
582 spin_unlock_irqrestore(&mchan->lock, iflags);
583
584 if (!mdesc) {
585 /* try to free completed descriptors */
586 mpc_dma_process_completed(mdma);
587 return NULL;
588 }
589
590 mdesc->error = 0;
591 tcd = mdesc->tcd;
592
593 /* Prepare Transfer Control Descriptor for this transaction */
594 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
595
596 if (IS_ALIGNED(src | dst | len, 32)) {
597 tcd->ssize = MPC_DMA_TSIZE_32;
598 tcd->dsize = MPC_DMA_TSIZE_32;
599 tcd->soff = 32;
600 tcd->doff = 32;
601 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
602 /* MPC8308 doesn't support 16 byte transfers */
603 tcd->ssize = MPC_DMA_TSIZE_16;
604 tcd->dsize = MPC_DMA_TSIZE_16;
605 tcd->soff = 16;
606 tcd->doff = 16;
607 } else if (IS_ALIGNED(src | dst | len, 4)) {
608 tcd->ssize = MPC_DMA_TSIZE_4;
609 tcd->dsize = MPC_DMA_TSIZE_4;
610 tcd->soff = 4;
611 tcd->doff = 4;
612 } else if (IS_ALIGNED(src | dst | len, 2)) {
613 tcd->ssize = MPC_DMA_TSIZE_2;
614 tcd->dsize = MPC_DMA_TSIZE_2;
615 tcd->soff = 2;
616 tcd->doff = 2;
617 } else {
618 tcd->ssize = MPC_DMA_TSIZE_1;
619 tcd->dsize = MPC_DMA_TSIZE_1;
620 tcd->soff = 1;
621 tcd->doff = 1;
622 }
623
624 tcd->saddr = src;
625 tcd->daddr = dst;
626 tcd->nbytes = len;
627 tcd->biter = 1;
628 tcd->citer = 1;
629
630 /* Place descriptor in prepared list */
631 spin_lock_irqsave(&mchan->lock, iflags);
632 list_add_tail(&mdesc->node, &mchan->prepared);
633 spin_unlock_irqrestore(&mchan->lock, iflags);
634
635 return &mdesc->desc;
636}
637
638static int mpc_dma_probe(struct platform_device *op)
639{
640 struct device_node *dn = op->dev.of_node;
641 struct device *dev = &op->dev;
642 struct dma_device *dma;
643 struct mpc_dma *mdma;
644 struct mpc_dma_chan *mchan;
645 struct resource res;
646 ulong regs_start, regs_size;
647 int retval, i;
648
649 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
650 if (!mdma) {
651 dev_err(dev, "Memory exhausted!\n");
652 return -ENOMEM;
653 }
654
655 mdma->irq = irq_of_parse_and_map(dn, 0);
656 if (mdma->irq == NO_IRQ) {
657 dev_err(dev, "Error mapping IRQ!\n");
658 return -EINVAL;
659 }
660
661 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
662 mdma->is_mpc8308 = 1;
663 mdma->irq2 = irq_of_parse_and_map(dn, 1);
664 if (mdma->irq2 == NO_IRQ) {
665 dev_err(dev, "Error mapping IRQ!\n");
666 return -EINVAL;
667 }
668 }
669
670 retval = of_address_to_resource(dn, 0, &res);
671 if (retval) {
672 dev_err(dev, "Error parsing memory region!\n");
673 return retval;
674 }
675
676 regs_start = res.start;
677 regs_size = resource_size(&res);
678
679 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
680 dev_err(dev, "Error requesting memory region!\n");
681 return -EBUSY;
682 }
683
684 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
685 if (!mdma->regs) {
686 dev_err(dev, "Error mapping memory region!\n");
687 return -ENOMEM;
688 }
689
690 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
691 + MPC_DMA_TCD_OFFSET);
692
693 retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
694 mdma);
695 if (retval) {
696 dev_err(dev, "Error requesting IRQ!\n");
697 return -EINVAL;
698 }
699
700 if (mdma->is_mpc8308) {
701 retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
702 DRV_NAME, mdma);
703 if (retval) {
704 dev_err(dev, "Error requesting IRQ2!\n");
705 return -EINVAL;
706 }
707 }
708
709 spin_lock_init(&mdma->error_status_lock);
710
711 dma = &mdma->dma;
712 dma->dev = dev;
713 if (!mdma->is_mpc8308)
714 dma->chancnt = MPC_DMA_CHANNELS;
715 else
716 dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
717 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
718 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
719 dma->device_issue_pending = mpc_dma_issue_pending;
720 dma->device_tx_status = mpc_dma_tx_status;
721 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
722
723 INIT_LIST_HEAD(&dma->channels);
724 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
725
726 for (i = 0; i < dma->chancnt; i++) {
727 mchan = &mdma->channels[i];
728
729 mchan->chan.device = dma;
730 dma_cookie_init(&mchan->chan);
731
732 INIT_LIST_HEAD(&mchan->free);
733 INIT_LIST_HEAD(&mchan->prepared);
734 INIT_LIST_HEAD(&mchan->queued);
735 INIT_LIST_HEAD(&mchan->active);
736 INIT_LIST_HEAD(&mchan->completed);
737
738 spin_lock_init(&mchan->lock);
739 list_add_tail(&mchan->chan.device_node, &dma->channels);
740 }
741
742 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
743
744 /*
745 * Configure DMA Engine:
746 * - Dynamic clock,
747 * - Round-robin group arbitration,
748 * - Round-robin channel arbitration.
749 */
750 if (!mdma->is_mpc8308) {
751 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
752 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
753
754 /* Disable hardware DMA requests */
755 out_be32(&mdma->regs->dmaerqh, 0);
756 out_be32(&mdma->regs->dmaerql, 0);
757
758 /* Disable error interrupts */
759 out_be32(&mdma->regs->dmaeeih, 0);
760 out_be32(&mdma->regs->dmaeeil, 0);
761
762 /* Clear interrupts status */
763 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
764 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
765 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
766 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
767
768 /* Route interrupts to IPIC */
769 out_be32(&mdma->regs->dmaihsa, 0);
770 out_be32(&mdma->regs->dmailsa, 0);
771 } else {
772 /* MPC8308 has 16 channels and lacks some registers */
773 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
774
775 /* enable snooping */
776 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
777 /* Disable error interrupts */
778 out_be32(&mdma->regs->dmaeeil, 0);
779
780 /* Clear interrupts status */
781 out_be32(&mdma->regs->dmaintl, 0xFFFF);
782 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
783 }
784
785 /* Register DMA engine */
786 dev_set_drvdata(dev, mdma);
787 retval = dma_async_device_register(dma);
788 if (retval) {
789 devm_free_irq(dev, mdma->irq, mdma);
790 irq_dispose_mapping(mdma->irq);
791 }
792
793 return retval;
794}
795
796static int mpc_dma_remove(struct platform_device *op)
797{
798 struct device *dev = &op->dev;
799 struct mpc_dma *mdma = dev_get_drvdata(dev);
800
801 dma_async_device_unregister(&mdma->dma);
802 devm_free_irq(dev, mdma->irq, mdma);
803 irq_dispose_mapping(mdma->irq);
804
805 return 0;
806}
807
808static struct of_device_id mpc_dma_match[] = {
809 { .compatible = "fsl,mpc5121-dma", },
810 {},
811};
812
813static struct platform_driver mpc_dma_driver = {
814 .probe = mpc_dma_probe,
815 .remove = mpc_dma_remove,
816 .driver = {
817 .name = DRV_NAME,
818 .owner = THIS_MODULE,
819 .of_match_table = mpc_dma_match,
820 },
821};
822
823module_platform_driver(mpc_dma_driver);
824
825MODULE_LICENSE("GPL");
826MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5 * Copyright (C) Alexander Popov, Promcontroller 2014
6 * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016
7 *
8 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
9 * (defines, structures and comments) was taken from MPC5121 DMA driver
10 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
11 *
12 * Approved as OSADL project by a majority of OSADL members and funded
13 * by OSADL membership fees in 2009; for details see www.osadl.org.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the Free
17 * Software Foundation; either version 2 of the License, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful, but WITHOUT
21 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 * more details.
24 *
25 * The full GNU General Public License is included in this distribution in the
26 * file called COPYING.
27 */
28
29/*
30 * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers
31 * (tested using dmatest module) and data transfers between memory and
32 * peripheral I/O memory by means of slave scatter/gather with these
33 * limitations:
34 * - chunked transfers (described by s/g lists with more than one item) are
35 * refused as long as proper support for scatter/gather is missing
36 * - transfers on MPC8308 always start from software as this SoC does not have
37 * external request lines for peripheral flow control
38 * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for
39 * MPC512x), and 32 bytes are supported, and, consequently, source
40 * addresses and destination addresses must be aligned accordingly;
41 * furthermore, for MPC512x SoCs, the transfer size must be aligned on
42 * (chunk size * maxburst)
43 */
44
45#include <linux/module.h>
46#include <linux/dmaengine.h>
47#include <linux/dma-mapping.h>
48#include <linux/interrupt.h>
49#include <linux/io.h>
50#include <linux/slab.h>
51#include <linux/of_address.h>
52#include <linux/of_device.h>
53#include <linux/of_irq.h>
54#include <linux/of_dma.h>
55#include <linux/of_platform.h>
56
57#include <linux/random.h>
58
59#include "dmaengine.h"
60
61/* Number of DMA Transfer descriptors allocated per channel */
62#define MPC_DMA_DESCRIPTORS 64
63
64/* Macro definitions */
65#define MPC_DMA_TCD_OFFSET 0x1000
66
67/*
68 * Maximum channel counts for individual hardware variants
69 * and the maximum channel count over all supported controllers,
70 * used for data structure size
71 */
72#define MPC8308_DMACHAN_MAX 16
73#define MPC512x_DMACHAN_MAX 64
74#define MPC_DMA_CHANNELS 64
75
76/* Arbitration mode of group and channel */
77#define MPC_DMA_DMACR_EDCG (1 << 31)
78#define MPC_DMA_DMACR_ERGA (1 << 3)
79#define MPC_DMA_DMACR_ERCA (1 << 2)
80
81/* Error codes */
82#define MPC_DMA_DMAES_VLD (1 << 31)
83#define MPC_DMA_DMAES_GPE (1 << 15)
84#define MPC_DMA_DMAES_CPE (1 << 14)
85#define MPC_DMA_DMAES_ERRCHN(err) \
86 (((err) >> 8) & 0x3f)
87#define MPC_DMA_DMAES_SAE (1 << 7)
88#define MPC_DMA_DMAES_SOE (1 << 6)
89#define MPC_DMA_DMAES_DAE (1 << 5)
90#define MPC_DMA_DMAES_DOE (1 << 4)
91#define MPC_DMA_DMAES_NCE (1 << 3)
92#define MPC_DMA_DMAES_SGE (1 << 2)
93#define MPC_DMA_DMAES_SBE (1 << 1)
94#define MPC_DMA_DMAES_DBE (1 << 0)
95
96#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
97
98#define MPC_DMA_TSIZE_1 0x00
99#define MPC_DMA_TSIZE_2 0x01
100#define MPC_DMA_TSIZE_4 0x02
101#define MPC_DMA_TSIZE_16 0x04
102#define MPC_DMA_TSIZE_32 0x05
103
104/* MPC5121 DMA engine registers */
105struct __attribute__ ((__packed__)) mpc_dma_regs {
106 /* 0x00 */
107 u32 dmacr; /* DMA control register */
108 u32 dmaes; /* DMA error status */
109 /* 0x08 */
110 u32 dmaerqh; /* DMA enable request high(channels 63~32) */
111 u32 dmaerql; /* DMA enable request low(channels 31~0) */
112 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
113 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
114 /* 0x18 */
115 u8 dmaserq; /* DMA set enable request */
116 u8 dmacerq; /* DMA clear enable request */
117 u8 dmaseei; /* DMA set enable error interrupt */
118 u8 dmaceei; /* DMA clear enable error interrupt */
119 /* 0x1c */
120 u8 dmacint; /* DMA clear interrupt request */
121 u8 dmacerr; /* DMA clear error */
122 u8 dmassrt; /* DMA set start bit */
123 u8 dmacdne; /* DMA clear DONE status bit */
124 /* 0x20 */
125 u32 dmainth; /* DMA interrupt request high(ch63~32) */
126 u32 dmaintl; /* DMA interrupt request low(ch31~0) */
127 u32 dmaerrh; /* DMA error high(ch63~32) */
128 u32 dmaerrl; /* DMA error low(ch31~0) */
129 /* 0x30 */
130 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
131 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
132 union {
133 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
134 u32 dmagpor; /* (General purpose register on MPC8308) */
135 };
136 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
137 /* 0x40 ~ 0xff */
138 u32 reserve0[48]; /* Reserved */
139 /* 0x100 */
140 u8 dchpri[MPC_DMA_CHANNELS];
141 /* DMA channels(0~63) priority */
142};
143
144struct __attribute__ ((__packed__)) mpc_dma_tcd {
145 /* 0x00 */
146 u32 saddr; /* Source address */
147
148 u32 smod:5; /* Source address modulo */
149 u32 ssize:3; /* Source data transfer size */
150 u32 dmod:5; /* Destination address modulo */
151 u32 dsize:3; /* Destination data transfer size */
152 u32 soff:16; /* Signed source address offset */
153
154 /* 0x08 */
155 u32 nbytes; /* Inner "minor" byte count */
156 u32 slast; /* Last source address adjustment */
157 u32 daddr; /* Destination address */
158
159 /* 0x14 */
160 u32 citer_elink:1; /* Enable channel-to-channel linking on
161 * minor loop complete
162 */
163 u32 citer_linkch:6; /* Link channel for minor loop complete */
164 u32 citer:9; /* Current "major" iteration count */
165 u32 doff:16; /* Signed destination address offset */
166
167 /* 0x18 */
168 u32 dlast_sga; /* Last Destination address adjustment/scatter
169 * gather address
170 */
171
172 /* 0x1c */
173 u32 biter_elink:1; /* Enable channel-to-channel linking on major
174 * loop complete
175 */
176 u32 biter_linkch:6;
177 u32 biter:9; /* Beginning "major" iteration count */
178 u32 bwc:2; /* Bandwidth control */
179 u32 major_linkch:6; /* Link channel number */
180 u32 done:1; /* Channel done */
181 u32 active:1; /* Channel active */
182 u32 major_elink:1; /* Enable channel-to-channel linking on major
183 * loop complete
184 */
185 u32 e_sg:1; /* Enable scatter/gather processing */
186 u32 d_req:1; /* Disable request */
187 u32 int_half:1; /* Enable an interrupt when major counter is
188 * half complete
189 */
190 u32 int_maj:1; /* Enable an interrupt when major iteration
191 * count completes
192 */
193 u32 start:1; /* Channel start */
194};
195
196struct mpc_dma_desc {
197 struct dma_async_tx_descriptor desc;
198 struct mpc_dma_tcd *tcd;
199 dma_addr_t tcd_paddr;
200 int error;
201 struct list_head node;
202 int will_access_peripheral;
203};
204
205struct mpc_dma_chan {
206 struct dma_chan chan;
207 struct list_head free;
208 struct list_head prepared;
209 struct list_head queued;
210 struct list_head active;
211 struct list_head completed;
212 struct mpc_dma_tcd *tcd;
213 dma_addr_t tcd_paddr;
214
215 /* Settings for access to peripheral FIFO */
216 dma_addr_t src_per_paddr;
217 u32 src_tcd_nunits;
218 u8 swidth;
219 dma_addr_t dst_per_paddr;
220 u32 dst_tcd_nunits;
221 u8 dwidth;
222
223 /* Lock for this structure */
224 spinlock_t lock;
225};
226
227struct mpc_dma {
228 struct dma_device dma;
229 struct tasklet_struct tasklet;
230 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
231 struct mpc_dma_regs __iomem *regs;
232 struct mpc_dma_tcd __iomem *tcd;
233 int irq;
234 int irq2;
235 uint error_status;
236 int is_mpc8308;
237
238 /* Lock for error_status field in this structure */
239 spinlock_t error_status_lock;
240};
241
242#define DRV_NAME "mpc512x_dma"
243
244/* Convert struct dma_chan to struct mpc_dma_chan */
245static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
246{
247 return container_of(c, struct mpc_dma_chan, chan);
248}
249
250/* Convert struct dma_chan to struct mpc_dma */
251static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
252{
253 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
254
255 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
256}
257
258/*
259 * Execute all queued DMA descriptors.
260 *
261 * Following requirements must be met while calling mpc_dma_execute():
262 * a) mchan->lock is acquired,
263 * b) mchan->active list is empty,
264 * c) mchan->queued list contains at least one entry.
265 */
266static void mpc_dma_execute(struct mpc_dma_chan *mchan)
267{
268 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
269 struct mpc_dma_desc *first = NULL;
270 struct mpc_dma_desc *prev = NULL;
271 struct mpc_dma_desc *mdesc;
272 int cid = mchan->chan.chan_id;
273
274 while (!list_empty(&mchan->queued)) {
275 mdesc = list_first_entry(&mchan->queued,
276 struct mpc_dma_desc, node);
277 /*
278 * Grab either several mem-to-mem transfer descriptors
279 * or one peripheral transfer descriptor,
280 * don't mix mem-to-mem and peripheral transfer descriptors
281 * within the same 'active' list.
282 */
283 if (mdesc->will_access_peripheral) {
284 if (list_empty(&mchan->active))
285 list_move_tail(&mdesc->node, &mchan->active);
286 break;
287 } else {
288 list_move_tail(&mdesc->node, &mchan->active);
289 }
290 }
291
292 /* Chain descriptors into one transaction */
293 list_for_each_entry(mdesc, &mchan->active, node) {
294 if (!first)
295 first = mdesc;
296
297 if (!prev) {
298 prev = mdesc;
299 continue;
300 }
301
302 prev->tcd->dlast_sga = mdesc->tcd_paddr;
303 prev->tcd->e_sg = 1;
304 mdesc->tcd->start = 1;
305
306 prev = mdesc;
307 }
308
309 prev->tcd->int_maj = 1;
310
311 /* Send first descriptor in chain into hardware */
312 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
313
314 if (first != prev)
315 mdma->tcd[cid].e_sg = 1;
316
317 if (mdma->is_mpc8308) {
318 /* MPC8308, no request lines, software initiated start */
319 out_8(&mdma->regs->dmassrt, cid);
320 } else if (first->will_access_peripheral) {
321 /* Peripherals involved, start by external request signal */
322 out_8(&mdma->regs->dmaserq, cid);
323 } else {
324 /* Memory to memory transfer, software initiated start */
325 out_8(&mdma->regs->dmassrt, cid);
326 }
327}
328
329/* Handle interrupt on one half of DMA controller (32 channels) */
330static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
331{
332 struct mpc_dma_chan *mchan;
333 struct mpc_dma_desc *mdesc;
334 u32 status = is | es;
335 int ch;
336
337 while ((ch = fls(status) - 1) >= 0) {
338 status &= ~(1 << ch);
339 mchan = &mdma->channels[ch + off];
340
341 spin_lock(&mchan->lock);
342
343 out_8(&mdma->regs->dmacint, ch + off);
344 out_8(&mdma->regs->dmacerr, ch + off);
345
346 /* Check error status */
347 if (es & (1 << ch))
348 list_for_each_entry(mdesc, &mchan->active, node)
349 mdesc->error = -EIO;
350
351 /* Execute queued descriptors */
352 list_splice_tail_init(&mchan->active, &mchan->completed);
353 if (!list_empty(&mchan->queued))
354 mpc_dma_execute(mchan);
355
356 spin_unlock(&mchan->lock);
357 }
358}
359
360/* Interrupt handler */
361static irqreturn_t mpc_dma_irq(int irq, void *data)
362{
363 struct mpc_dma *mdma = data;
364 uint es;
365
366 /* Save error status register */
367 es = in_be32(&mdma->regs->dmaes);
368 spin_lock(&mdma->error_status_lock);
369 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
370 mdma->error_status = es;
371 spin_unlock(&mdma->error_status_lock);
372
373 /* Handle interrupt on each channel */
374 if (mdma->dma.chancnt > 32) {
375 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
376 in_be32(&mdma->regs->dmaerrh), 32);
377 }
378 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
379 in_be32(&mdma->regs->dmaerrl), 0);
380
381 /* Schedule tasklet */
382 tasklet_schedule(&mdma->tasklet);
383
384 return IRQ_HANDLED;
385}
386
387/* process completed descriptors */
388static void mpc_dma_process_completed(struct mpc_dma *mdma)
389{
390 dma_cookie_t last_cookie = 0;
391 struct mpc_dma_chan *mchan;
392 struct mpc_dma_desc *mdesc;
393 struct dma_async_tx_descriptor *desc;
394 unsigned long flags;
395 LIST_HEAD(list);
396 int i;
397
398 for (i = 0; i < mdma->dma.chancnt; i++) {
399 mchan = &mdma->channels[i];
400
401 /* Get all completed descriptors */
402 spin_lock_irqsave(&mchan->lock, flags);
403 if (!list_empty(&mchan->completed))
404 list_splice_tail_init(&mchan->completed, &list);
405 spin_unlock_irqrestore(&mchan->lock, flags);
406
407 if (list_empty(&list))
408 continue;
409
410 /* Execute callbacks and run dependencies */
411 list_for_each_entry(mdesc, &list, node) {
412 desc = &mdesc->desc;
413
414 dmaengine_desc_get_callback_invoke(desc, NULL);
415
416 last_cookie = desc->cookie;
417 dma_run_dependencies(desc);
418 }
419
420 /* Free descriptors */
421 spin_lock_irqsave(&mchan->lock, flags);
422 list_splice_tail_init(&list, &mchan->free);
423 mchan->chan.completed_cookie = last_cookie;
424 spin_unlock_irqrestore(&mchan->lock, flags);
425 }
426}
427
428/* DMA Tasklet */
429static void mpc_dma_tasklet(unsigned long data)
430{
431 struct mpc_dma *mdma = (void *)data;
432 unsigned long flags;
433 uint es;
434
435 spin_lock_irqsave(&mdma->error_status_lock, flags);
436 es = mdma->error_status;
437 mdma->error_status = 0;
438 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
439
440 /* Print nice error report */
441 if (es) {
442 dev_err(mdma->dma.dev,
443 "Hardware reported following error(s) on channel %u:\n",
444 MPC_DMA_DMAES_ERRCHN(es));
445
446 if (es & MPC_DMA_DMAES_GPE)
447 dev_err(mdma->dma.dev, "- Group Priority Error\n");
448 if (es & MPC_DMA_DMAES_CPE)
449 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
450 if (es & MPC_DMA_DMAES_SAE)
451 dev_err(mdma->dma.dev, "- Source Address Error\n");
452 if (es & MPC_DMA_DMAES_SOE)
453 dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n");
454 if (es & MPC_DMA_DMAES_DAE)
455 dev_err(mdma->dma.dev, "- Destination Address Error\n");
456 if (es & MPC_DMA_DMAES_DOE)
457 dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n");
458 if (es & MPC_DMA_DMAES_NCE)
459 dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n");
460 if (es & MPC_DMA_DMAES_SGE)
461 dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n");
462 if (es & MPC_DMA_DMAES_SBE)
463 dev_err(mdma->dma.dev, "- Source Bus Error\n");
464 if (es & MPC_DMA_DMAES_DBE)
465 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
466 }
467
468 mpc_dma_process_completed(mdma);
469}
470
471/* Submit descriptor to hardware */
472static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
473{
474 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
475 struct mpc_dma_desc *mdesc;
476 unsigned long flags;
477 dma_cookie_t cookie;
478
479 mdesc = container_of(txd, struct mpc_dma_desc, desc);
480
481 spin_lock_irqsave(&mchan->lock, flags);
482
483 /* Move descriptor to queue */
484 list_move_tail(&mdesc->node, &mchan->queued);
485
486 /* If channel is idle, execute all queued descriptors */
487 if (list_empty(&mchan->active))
488 mpc_dma_execute(mchan);
489
490 /* Update cookie */
491 cookie = dma_cookie_assign(txd);
492 spin_unlock_irqrestore(&mchan->lock, flags);
493
494 return cookie;
495}
496
497/* Alloc channel resources */
498static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
499{
500 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
501 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
502 struct mpc_dma_desc *mdesc;
503 struct mpc_dma_tcd *tcd;
504 dma_addr_t tcd_paddr;
505 unsigned long flags;
506 LIST_HEAD(descs);
507 int i;
508
509 /* Alloc DMA memory for Transfer Control Descriptors */
510 tcd = dma_alloc_coherent(mdma->dma.dev,
511 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
512 &tcd_paddr, GFP_KERNEL);
513 if (!tcd)
514 return -ENOMEM;
515
516 /* Alloc descriptors for this channel */
517 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
518 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
519 if (!mdesc) {
520 dev_notice(mdma->dma.dev,
521 "Memory allocation error. Allocated only %u descriptors\n", i);
522 break;
523 }
524
525 dma_async_tx_descriptor_init(&mdesc->desc, chan);
526 mdesc->desc.flags = DMA_CTRL_ACK;
527 mdesc->desc.tx_submit = mpc_dma_tx_submit;
528
529 mdesc->tcd = &tcd[i];
530 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
531
532 list_add_tail(&mdesc->node, &descs);
533 }
534
535 /* Return error only if no descriptors were allocated */
536 if (i == 0) {
537 dma_free_coherent(mdma->dma.dev,
538 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
539 tcd, tcd_paddr);
540 return -ENOMEM;
541 }
542
543 spin_lock_irqsave(&mchan->lock, flags);
544 mchan->tcd = tcd;
545 mchan->tcd_paddr = tcd_paddr;
546 list_splice_tail_init(&descs, &mchan->free);
547 spin_unlock_irqrestore(&mchan->lock, flags);
548
549 /* Enable Error Interrupt */
550 out_8(&mdma->regs->dmaseei, chan->chan_id);
551
552 return 0;
553}
554
555/* Free channel resources */
556static void mpc_dma_free_chan_resources(struct dma_chan *chan)
557{
558 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
559 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
560 struct mpc_dma_desc *mdesc, *tmp;
561 struct mpc_dma_tcd *tcd;
562 dma_addr_t tcd_paddr;
563 unsigned long flags;
564 LIST_HEAD(descs);
565
566 spin_lock_irqsave(&mchan->lock, flags);
567
568 /* Channel must be idle */
569 BUG_ON(!list_empty(&mchan->prepared));
570 BUG_ON(!list_empty(&mchan->queued));
571 BUG_ON(!list_empty(&mchan->active));
572 BUG_ON(!list_empty(&mchan->completed));
573
574 /* Move data */
575 list_splice_tail_init(&mchan->free, &descs);
576 tcd = mchan->tcd;
577 tcd_paddr = mchan->tcd_paddr;
578
579 spin_unlock_irqrestore(&mchan->lock, flags);
580
581 /* Free DMA memory used by descriptors */
582 dma_free_coherent(mdma->dma.dev,
583 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
584 tcd, tcd_paddr);
585
586 /* Free descriptors */
587 list_for_each_entry_safe(mdesc, tmp, &descs, node)
588 kfree(mdesc);
589
590 /* Disable Error Interrupt */
591 out_8(&mdma->regs->dmaceei, chan->chan_id);
592}
593
594/* Send all pending descriptor to hardware */
595static void mpc_dma_issue_pending(struct dma_chan *chan)
596{
597 /*
598 * We are posting descriptors to the hardware as soon as
599 * they are ready, so this function does nothing.
600 */
601}
602
603/* Check request completion status */
604static enum dma_status
605mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
606 struct dma_tx_state *txstate)
607{
608 return dma_cookie_status(chan, cookie, txstate);
609}
610
611/* Prepare descriptor for memory to memory copy */
612static struct dma_async_tx_descriptor *
613mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
614 size_t len, unsigned long flags)
615{
616 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
617 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
618 struct mpc_dma_desc *mdesc = NULL;
619 struct mpc_dma_tcd *tcd;
620 unsigned long iflags;
621
622 /* Get free descriptor */
623 spin_lock_irqsave(&mchan->lock, iflags);
624 if (!list_empty(&mchan->free)) {
625 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
626 node);
627 list_del(&mdesc->node);
628 }
629 spin_unlock_irqrestore(&mchan->lock, iflags);
630
631 if (!mdesc) {
632 /* try to free completed descriptors */
633 mpc_dma_process_completed(mdma);
634 return NULL;
635 }
636
637 mdesc->error = 0;
638 mdesc->will_access_peripheral = 0;
639 tcd = mdesc->tcd;
640
641 /* Prepare Transfer Control Descriptor for this transaction */
642 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
643
644 if (IS_ALIGNED(src | dst | len, 32)) {
645 tcd->ssize = MPC_DMA_TSIZE_32;
646 tcd->dsize = MPC_DMA_TSIZE_32;
647 tcd->soff = 32;
648 tcd->doff = 32;
649 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
650 /* MPC8308 doesn't support 16 byte transfers */
651 tcd->ssize = MPC_DMA_TSIZE_16;
652 tcd->dsize = MPC_DMA_TSIZE_16;
653 tcd->soff = 16;
654 tcd->doff = 16;
655 } else if (IS_ALIGNED(src | dst | len, 4)) {
656 tcd->ssize = MPC_DMA_TSIZE_4;
657 tcd->dsize = MPC_DMA_TSIZE_4;
658 tcd->soff = 4;
659 tcd->doff = 4;
660 } else if (IS_ALIGNED(src | dst | len, 2)) {
661 tcd->ssize = MPC_DMA_TSIZE_2;
662 tcd->dsize = MPC_DMA_TSIZE_2;
663 tcd->soff = 2;
664 tcd->doff = 2;
665 } else {
666 tcd->ssize = MPC_DMA_TSIZE_1;
667 tcd->dsize = MPC_DMA_TSIZE_1;
668 tcd->soff = 1;
669 tcd->doff = 1;
670 }
671
672 tcd->saddr = src;
673 tcd->daddr = dst;
674 tcd->nbytes = len;
675 tcd->biter = 1;
676 tcd->citer = 1;
677
678 /* Place descriptor in prepared list */
679 spin_lock_irqsave(&mchan->lock, iflags);
680 list_add_tail(&mdesc->node, &mchan->prepared);
681 spin_unlock_irqrestore(&mchan->lock, iflags);
682
683 return &mdesc->desc;
684}
685
686inline u8 buswidth_to_dmatsize(u8 buswidth)
687{
688 u8 res;
689
690 for (res = 0; buswidth > 1; buswidth /= 2)
691 res++;
692 return res;
693}
694
695static struct dma_async_tx_descriptor *
696mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
697 unsigned int sg_len, enum dma_transfer_direction direction,
698 unsigned long flags, void *context)
699{
700 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
701 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
702 struct mpc_dma_desc *mdesc = NULL;
703 dma_addr_t per_paddr;
704 u32 tcd_nunits;
705 struct mpc_dma_tcd *tcd;
706 unsigned long iflags;
707 struct scatterlist *sg;
708 size_t len;
709 int iter, i;
710
711 /* Currently there is no proper support for scatter/gather */
712 if (sg_len != 1)
713 return NULL;
714
715 if (!is_slave_direction(direction))
716 return NULL;
717
718 for_each_sg(sgl, sg, sg_len, i) {
719 spin_lock_irqsave(&mchan->lock, iflags);
720
721 mdesc = list_first_entry(&mchan->free,
722 struct mpc_dma_desc, node);
723 if (!mdesc) {
724 spin_unlock_irqrestore(&mchan->lock, iflags);
725 /* Try to free completed descriptors */
726 mpc_dma_process_completed(mdma);
727 return NULL;
728 }
729
730 list_del(&mdesc->node);
731
732 if (direction == DMA_DEV_TO_MEM) {
733 per_paddr = mchan->src_per_paddr;
734 tcd_nunits = mchan->src_tcd_nunits;
735 } else {
736 per_paddr = mchan->dst_per_paddr;
737 tcd_nunits = mchan->dst_tcd_nunits;
738 }
739
740 spin_unlock_irqrestore(&mchan->lock, iflags);
741
742 if (per_paddr == 0 || tcd_nunits == 0)
743 goto err_prep;
744
745 mdesc->error = 0;
746 mdesc->will_access_peripheral = 1;
747
748 /* Prepare Transfer Control Descriptor for this transaction */
749 tcd = mdesc->tcd;
750
751 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
752
753 if (direction == DMA_DEV_TO_MEM) {
754 tcd->saddr = per_paddr;
755 tcd->daddr = sg_dma_address(sg);
756
757 if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth))
758 goto err_prep;
759
760 tcd->soff = 0;
761 tcd->doff = mchan->dwidth;
762 } else {
763 tcd->saddr = sg_dma_address(sg);
764 tcd->daddr = per_paddr;
765
766 if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth))
767 goto err_prep;
768
769 tcd->soff = mchan->swidth;
770 tcd->doff = 0;
771 }
772
773 tcd->ssize = buswidth_to_dmatsize(mchan->swidth);
774 tcd->dsize = buswidth_to_dmatsize(mchan->dwidth);
775
776 if (mdma->is_mpc8308) {
777 tcd->nbytes = sg_dma_len(sg);
778 if (!IS_ALIGNED(tcd->nbytes, mchan->swidth))
779 goto err_prep;
780
781 /* No major loops for MPC8303 */
782 tcd->biter = 1;
783 tcd->citer = 1;
784 } else {
785 len = sg_dma_len(sg);
786 tcd->nbytes = tcd_nunits * tcd->ssize;
787 if (!IS_ALIGNED(len, tcd->nbytes))
788 goto err_prep;
789
790 iter = len / tcd->nbytes;
791 if (iter >= 1 << 15) {
792 /* len is too big */
793 goto err_prep;
794 }
795 /* citer_linkch contains the high bits of iter */
796 tcd->biter = iter & 0x1ff;
797 tcd->biter_linkch = iter >> 9;
798 tcd->citer = tcd->biter;
799 tcd->citer_linkch = tcd->biter_linkch;
800 }
801
802 tcd->e_sg = 0;
803 tcd->d_req = 1;
804
805 /* Place descriptor in prepared list */
806 spin_lock_irqsave(&mchan->lock, iflags);
807 list_add_tail(&mdesc->node, &mchan->prepared);
808 spin_unlock_irqrestore(&mchan->lock, iflags);
809 }
810
811 return &mdesc->desc;
812
813err_prep:
814 /* Put the descriptor back */
815 spin_lock_irqsave(&mchan->lock, iflags);
816 list_add_tail(&mdesc->node, &mchan->free);
817 spin_unlock_irqrestore(&mchan->lock, iflags);
818
819 return NULL;
820}
821
822inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
823{
824 switch (buswidth) {
825 case 16:
826 if (is_mpc8308)
827 return false;
828 case 1:
829 case 2:
830 case 4:
831 case 32:
832 break;
833 default:
834 return false;
835 }
836
837 return true;
838}
839
840static int mpc_dma_device_config(struct dma_chan *chan,
841 struct dma_slave_config *cfg)
842{
843 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
844 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
845 unsigned long flags;
846
847 /*
848 * Software constraints:
849 * - only transfers between a peripheral device and memory are
850 * supported
851 * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes
852 * are supported, and, consequently, source addresses and
853 * destination addresses; must be aligned accordingly; furthermore,
854 * for MPC512x SoCs, the transfer size must be aligned on (chunk
855 * size * maxburst)
856 * - during the transfer, the RAM address is incremented by the size
857 * of transfer chunk
858 * - the peripheral port's address is constant during the transfer.
859 */
860
861 if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) ||
862 !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) {
863 return -EINVAL;
864 }
865
866 if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) ||
867 !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308))
868 return -EINVAL;
869
870 spin_lock_irqsave(&mchan->lock, flags);
871
872 mchan->src_per_paddr = cfg->src_addr;
873 mchan->src_tcd_nunits = cfg->src_maxburst;
874 mchan->swidth = cfg->src_addr_width;
875 mchan->dst_per_paddr = cfg->dst_addr;
876 mchan->dst_tcd_nunits = cfg->dst_maxburst;
877 mchan->dwidth = cfg->dst_addr_width;
878
879 /* Apply defaults */
880 if (mchan->src_tcd_nunits == 0)
881 mchan->src_tcd_nunits = 1;
882 if (mchan->dst_tcd_nunits == 0)
883 mchan->dst_tcd_nunits = 1;
884
885 spin_unlock_irqrestore(&mchan->lock, flags);
886
887 return 0;
888}
889
890static int mpc_dma_device_terminate_all(struct dma_chan *chan)
891{
892 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
893 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
894 unsigned long flags;
895
896 /* Disable channel requests */
897 spin_lock_irqsave(&mchan->lock, flags);
898
899 out_8(&mdma->regs->dmacerq, chan->chan_id);
900 list_splice_tail_init(&mchan->prepared, &mchan->free);
901 list_splice_tail_init(&mchan->queued, &mchan->free);
902 list_splice_tail_init(&mchan->active, &mchan->free);
903
904 spin_unlock_irqrestore(&mchan->lock, flags);
905
906 return 0;
907}
908
909static int mpc_dma_probe(struct platform_device *op)
910{
911 struct device_node *dn = op->dev.of_node;
912 struct device *dev = &op->dev;
913 struct dma_device *dma;
914 struct mpc_dma *mdma;
915 struct mpc_dma_chan *mchan;
916 struct resource res;
917 ulong regs_start, regs_size;
918 int retval, i;
919 u8 chancnt;
920
921 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
922 if (!mdma) {
923 retval = -ENOMEM;
924 goto err;
925 }
926
927 mdma->irq = irq_of_parse_and_map(dn, 0);
928 if (!mdma->irq) {
929 dev_err(dev, "Error mapping IRQ!\n");
930 retval = -EINVAL;
931 goto err;
932 }
933
934 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
935 mdma->is_mpc8308 = 1;
936 mdma->irq2 = irq_of_parse_and_map(dn, 1);
937 if (!mdma->irq2) {
938 dev_err(dev, "Error mapping IRQ!\n");
939 retval = -EINVAL;
940 goto err_dispose1;
941 }
942 }
943
944 retval = of_address_to_resource(dn, 0, &res);
945 if (retval) {
946 dev_err(dev, "Error parsing memory region!\n");
947 goto err_dispose2;
948 }
949
950 regs_start = res.start;
951 regs_size = resource_size(&res);
952
953 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
954 dev_err(dev, "Error requesting memory region!\n");
955 retval = -EBUSY;
956 goto err_dispose2;
957 }
958
959 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
960 if (!mdma->regs) {
961 dev_err(dev, "Error mapping memory region!\n");
962 retval = -ENOMEM;
963 goto err_dispose2;
964 }
965
966 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
967 + MPC_DMA_TCD_OFFSET);
968
969 retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
970 if (retval) {
971 dev_err(dev, "Error requesting IRQ!\n");
972 retval = -EINVAL;
973 goto err_dispose2;
974 }
975
976 if (mdma->is_mpc8308) {
977 retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
978 DRV_NAME, mdma);
979 if (retval) {
980 dev_err(dev, "Error requesting IRQ2!\n");
981 retval = -EINVAL;
982 goto err_free1;
983 }
984 }
985
986 spin_lock_init(&mdma->error_status_lock);
987
988 dma = &mdma->dma;
989 dma->dev = dev;
990 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
991 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
992 dma->device_issue_pending = mpc_dma_issue_pending;
993 dma->device_tx_status = mpc_dma_tx_status;
994 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
995 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
996 dma->device_config = mpc_dma_device_config;
997 dma->device_terminate_all = mpc_dma_device_terminate_all;
998
999 INIT_LIST_HEAD(&dma->channels);
1000 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1001 dma_cap_set(DMA_SLAVE, dma->cap_mask);
1002
1003 if (mdma->is_mpc8308)
1004 chancnt = MPC8308_DMACHAN_MAX;
1005 else
1006 chancnt = MPC512x_DMACHAN_MAX;
1007
1008 for (i = 0; i < chancnt; i++) {
1009 mchan = &mdma->channels[i];
1010
1011 mchan->chan.device = dma;
1012 dma_cookie_init(&mchan->chan);
1013
1014 INIT_LIST_HEAD(&mchan->free);
1015 INIT_LIST_HEAD(&mchan->prepared);
1016 INIT_LIST_HEAD(&mchan->queued);
1017 INIT_LIST_HEAD(&mchan->active);
1018 INIT_LIST_HEAD(&mchan->completed);
1019
1020 spin_lock_init(&mchan->lock);
1021 list_add_tail(&mchan->chan.device_node, &dma->channels);
1022 }
1023
1024 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
1025
1026 /*
1027 * Configure DMA Engine:
1028 * - Dynamic clock,
1029 * - Round-robin group arbitration,
1030 * - Round-robin channel arbitration.
1031 */
1032 if (mdma->is_mpc8308) {
1033 /* MPC8308 has 16 channels and lacks some registers */
1034 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
1035
1036 /* enable snooping */
1037 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
1038 /* Disable error interrupts */
1039 out_be32(&mdma->regs->dmaeeil, 0);
1040
1041 /* Clear interrupts status */
1042 out_be32(&mdma->regs->dmaintl, 0xFFFF);
1043 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
1044 } else {
1045 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
1046 MPC_DMA_DMACR_ERGA |
1047 MPC_DMA_DMACR_ERCA);
1048
1049 /* Disable hardware DMA requests */
1050 out_be32(&mdma->regs->dmaerqh, 0);
1051 out_be32(&mdma->regs->dmaerql, 0);
1052
1053 /* Disable error interrupts */
1054 out_be32(&mdma->regs->dmaeeih, 0);
1055 out_be32(&mdma->regs->dmaeeil, 0);
1056
1057 /* Clear interrupts status */
1058 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
1059 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
1060 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
1061 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
1062
1063 /* Route interrupts to IPIC */
1064 out_be32(&mdma->regs->dmaihsa, 0);
1065 out_be32(&mdma->regs->dmailsa, 0);
1066 }
1067
1068 /* Register DMA engine */
1069 dev_set_drvdata(dev, mdma);
1070 retval = dma_async_device_register(dma);
1071 if (retval)
1072 goto err_free2;
1073
1074 /* Register with OF helpers for DMA lookups (nonfatal) */
1075 if (dev->of_node) {
1076 retval = of_dma_controller_register(dev->of_node,
1077 of_dma_xlate_by_chan_id, mdma);
1078 if (retval)
1079 dev_warn(dev, "Could not register for OF lookup\n");
1080 }
1081
1082 return 0;
1083
1084err_free2:
1085 if (mdma->is_mpc8308)
1086 free_irq(mdma->irq2, mdma);
1087err_free1:
1088 free_irq(mdma->irq, mdma);
1089err_dispose2:
1090 if (mdma->is_mpc8308)
1091 irq_dispose_mapping(mdma->irq2);
1092err_dispose1:
1093 irq_dispose_mapping(mdma->irq);
1094err:
1095 return retval;
1096}
1097
1098static int mpc_dma_remove(struct platform_device *op)
1099{
1100 struct device *dev = &op->dev;
1101 struct mpc_dma *mdma = dev_get_drvdata(dev);
1102
1103 if (dev->of_node)
1104 of_dma_controller_free(dev->of_node);
1105 dma_async_device_unregister(&mdma->dma);
1106 if (mdma->is_mpc8308) {
1107 free_irq(mdma->irq2, mdma);
1108 irq_dispose_mapping(mdma->irq2);
1109 }
1110 free_irq(mdma->irq, mdma);
1111 irq_dispose_mapping(mdma->irq);
1112 tasklet_kill(&mdma->tasklet);
1113
1114 return 0;
1115}
1116
1117static const struct of_device_id mpc_dma_match[] = {
1118 { .compatible = "fsl,mpc5121-dma", },
1119 { .compatible = "fsl,mpc8308-dma", },
1120 {},
1121};
1122MODULE_DEVICE_TABLE(of, mpc_dma_match);
1123
1124static struct platform_driver mpc_dma_driver = {
1125 .probe = mpc_dma_probe,
1126 .remove = mpc_dma_remove,
1127 .driver = {
1128 .name = DRV_NAME,
1129 .of_match_table = mpc_dma_match,
1130 },
1131};
1132
1133module_platform_driver(mpc_dma_driver);
1134
1135MODULE_LICENSE("GPL");
1136MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");