Loading...
1/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA controller is also added.
14 *
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
20 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/slab.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/dmapool.h>
36#include <linux/of_platform.h>
37
38#include "fsldma.h"
39
40#define chan_dbg(chan, fmt, arg...) \
41 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
42#define chan_err(chan, fmt, arg...) \
43 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
44
45static const char msg_ld_oom[] = "No free memory for link descriptor";
46
47/*
48 * Register Helpers
49 */
50
51static void set_sr(struct fsldma_chan *chan, u32 val)
52{
53 DMA_OUT(chan, &chan->regs->sr, val, 32);
54}
55
56static u32 get_sr(struct fsldma_chan *chan)
57{
58 return DMA_IN(chan, &chan->regs->sr, 32);
59}
60
61static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
62{
63 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
64}
65
66static dma_addr_t get_cdar(struct fsldma_chan *chan)
67{
68 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
69}
70
71static u32 get_bcr(struct fsldma_chan *chan)
72{
73 return DMA_IN(chan, &chan->regs->bcr, 32);
74}
75
76/*
77 * Descriptor Helpers
78 */
79
80static void set_desc_cnt(struct fsldma_chan *chan,
81 struct fsl_dma_ld_hw *hw, u32 count)
82{
83 hw->count = CPU_TO_DMA(chan, count, 32);
84}
85
86static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
87{
88 return DMA_TO_CPU(chan, desc->hw.count, 32);
89}
90
91static void set_desc_src(struct fsldma_chan *chan,
92 struct fsl_dma_ld_hw *hw, dma_addr_t src)
93{
94 u64 snoop_bits;
95
96 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
97 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
98 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
99}
100
101static dma_addr_t get_desc_src(struct fsldma_chan *chan,
102 struct fsl_desc_sw *desc)
103{
104 u64 snoop_bits;
105
106 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
107 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
108 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
109}
110
111static void set_desc_dst(struct fsldma_chan *chan,
112 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
113{
114 u64 snoop_bits;
115
116 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
117 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
118 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
119}
120
121static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
122 struct fsl_desc_sw *desc)
123{
124 u64 snoop_bits;
125
126 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
127 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
128 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
129}
130
131static void set_desc_next(struct fsldma_chan *chan,
132 struct fsl_dma_ld_hw *hw, dma_addr_t next)
133{
134 u64 snoop_bits;
135
136 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
137 ? FSL_DMA_SNEN : 0;
138 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
139}
140
141static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
142{
143 u64 snoop_bits;
144
145 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
146 ? FSL_DMA_SNEN : 0;
147
148 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
149 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
150 | snoop_bits, 64);
151}
152
153/*
154 * DMA Engine Hardware Control Helpers
155 */
156
157static void dma_init(struct fsldma_chan *chan)
158{
159 /* Reset the channel */
160 DMA_OUT(chan, &chan->regs->mr, 0, 32);
161
162 switch (chan->feature & FSL_DMA_IP_MASK) {
163 case FSL_DMA_IP_85XX:
164 /* Set the channel to below modes:
165 * EIE - Error interrupt enable
166 * EOLNIE - End of links interrupt enable
167 * BWC - Bandwidth sharing among channels
168 */
169 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
170 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
171 break;
172 case FSL_DMA_IP_83XX:
173 /* Set the channel to below modes:
174 * EOTIE - End-of-transfer interrupt enable
175 * PRC_RM - PCI read multiple
176 */
177 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
178 | FSL_DMA_MR_PRC_RM, 32);
179 break;
180 }
181}
182
183static int dma_is_idle(struct fsldma_chan *chan)
184{
185 u32 sr = get_sr(chan);
186 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
187}
188
189/*
190 * Start the DMA controller
191 *
192 * Preconditions:
193 * - the CDAR register must point to the start descriptor
194 * - the MRn[CS] bit must be cleared
195 */
196static void dma_start(struct fsldma_chan *chan)
197{
198 u32 mode;
199
200 mode = DMA_IN(chan, &chan->regs->mr, 32);
201
202 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
203 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
204 mode |= FSL_DMA_MR_EMP_EN;
205 } else {
206 mode &= ~FSL_DMA_MR_EMP_EN;
207 }
208
209 if (chan->feature & FSL_DMA_CHAN_START_EXT) {
210 mode |= FSL_DMA_MR_EMS_EN;
211 } else {
212 mode &= ~FSL_DMA_MR_EMS_EN;
213 mode |= FSL_DMA_MR_CS;
214 }
215
216 DMA_OUT(chan, &chan->regs->mr, mode, 32);
217}
218
219static void dma_halt(struct fsldma_chan *chan)
220{
221 u32 mode;
222 int i;
223
224 /* read the mode register */
225 mode = DMA_IN(chan, &chan->regs->mr, 32);
226
227 /*
228 * The 85xx controller supports channel abort, which will stop
229 * the current transfer. On 83xx, this bit is the transfer error
230 * mask bit, which should not be changed.
231 */
232 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
233 mode |= FSL_DMA_MR_CA;
234 DMA_OUT(chan, &chan->regs->mr, mode, 32);
235
236 mode &= ~FSL_DMA_MR_CA;
237 }
238
239 /* stop the DMA controller */
240 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
241 DMA_OUT(chan, &chan->regs->mr, mode, 32);
242
243 /* wait for the DMA controller to become idle */
244 for (i = 0; i < 100; i++) {
245 if (dma_is_idle(chan))
246 return;
247
248 udelay(10);
249 }
250
251 if (!dma_is_idle(chan))
252 chan_err(chan, "DMA halt timeout!\n");
253}
254
255/**
256 * fsl_chan_set_src_loop_size - Set source address hold transfer size
257 * @chan : Freescale DMA channel
258 * @size : Address loop size, 0 for disable loop
259 *
260 * The set source address hold transfer size. The source
261 * address hold or loop transfer size is when the DMA transfer
262 * data from source address (SA), if the loop size is 4, the DMA will
263 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
264 * SA + 1 ... and so on.
265 */
266static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
267{
268 u32 mode;
269
270 mode = DMA_IN(chan, &chan->regs->mr, 32);
271
272 switch (size) {
273 case 0:
274 mode &= ~FSL_DMA_MR_SAHE;
275 break;
276 case 1:
277 case 2:
278 case 4:
279 case 8:
280 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
281 break;
282 }
283
284 DMA_OUT(chan, &chan->regs->mr, mode, 32);
285}
286
287/**
288 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
289 * @chan : Freescale DMA channel
290 * @size : Address loop size, 0 for disable loop
291 *
292 * The set destination address hold transfer size. The destination
293 * address hold or loop transfer size is when the DMA transfer
294 * data to destination address (TA), if the loop size is 4, the DMA will
295 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
296 * TA + 1 ... and so on.
297 */
298static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
299{
300 u32 mode;
301
302 mode = DMA_IN(chan, &chan->regs->mr, 32);
303
304 switch (size) {
305 case 0:
306 mode &= ~FSL_DMA_MR_DAHE;
307 break;
308 case 1:
309 case 2:
310 case 4:
311 case 8:
312 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
313 break;
314 }
315
316 DMA_OUT(chan, &chan->regs->mr, mode, 32);
317}
318
319/**
320 * fsl_chan_set_request_count - Set DMA Request Count for external control
321 * @chan : Freescale DMA channel
322 * @size : Number of bytes to transfer in a single request
323 *
324 * The Freescale DMA channel can be controlled by the external signal DREQ#.
325 * The DMA request count is how many bytes are allowed to transfer before
326 * pausing the channel, after which a new assertion of DREQ# resumes channel
327 * operation.
328 *
329 * A size of 0 disables external pause control. The maximum size is 1024.
330 */
331static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
332{
333 u32 mode;
334
335 BUG_ON(size > 1024);
336
337 mode = DMA_IN(chan, &chan->regs->mr, 32);
338 mode |= (__ilog2(size) << 24) & 0x0f000000;
339
340 DMA_OUT(chan, &chan->regs->mr, mode, 32);
341}
342
343/**
344 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
345 * @chan : Freescale DMA channel
346 * @enable : 0 is disabled, 1 is enabled.
347 *
348 * The Freescale DMA channel can be controlled by the external signal DREQ#.
349 * The DMA Request Count feature should be used in addition to this feature
350 * to set the number of bytes to transfer before pausing the channel.
351 */
352static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
353{
354 if (enable)
355 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
356 else
357 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
358}
359
360/**
361 * fsl_chan_toggle_ext_start - Toggle channel external start status
362 * @chan : Freescale DMA channel
363 * @enable : 0 is disabled, 1 is enabled.
364 *
365 * If enable the external start, the channel can be started by an
366 * external DMA start pin. So the dma_start() does not start the
367 * transfer immediately. The DMA channel will wait for the
368 * control pin asserted.
369 */
370static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
371{
372 if (enable)
373 chan->feature |= FSL_DMA_CHAN_START_EXT;
374 else
375 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
376}
377
378static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
379{
380 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
381
382 if (list_empty(&chan->ld_pending))
383 goto out_splice;
384
385 /*
386 * Add the hardware descriptor to the chain of hardware descriptors
387 * that already exists in memory.
388 *
389 * This will un-set the EOL bit of the existing transaction, and the
390 * last link in this transaction will become the EOL descriptor.
391 */
392 set_desc_next(chan, &tail->hw, desc->async_tx.phys);
393
394 /*
395 * Add the software descriptor and all children to the list
396 * of pending transactions
397 */
398out_splice:
399 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
400}
401
402static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
403{
404 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
405 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
406 struct fsl_desc_sw *child;
407 unsigned long flags;
408 dma_cookie_t cookie;
409
410 spin_lock_irqsave(&chan->desc_lock, flags);
411
412 /*
413 * assign cookies to all of the software descriptors
414 * that make up this transaction
415 */
416 cookie = chan->common.cookie;
417 list_for_each_entry(child, &desc->tx_list, node) {
418 cookie++;
419 if (cookie < DMA_MIN_COOKIE)
420 cookie = DMA_MIN_COOKIE;
421
422 child->async_tx.cookie = cookie;
423 }
424
425 chan->common.cookie = cookie;
426
427 /* put this transaction onto the tail of the pending queue */
428 append_ld_queue(chan, desc);
429
430 spin_unlock_irqrestore(&chan->desc_lock, flags);
431
432 return cookie;
433}
434
435/**
436 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
437 * @chan : Freescale DMA channel
438 *
439 * Return - The descriptor allocated. NULL for failed.
440 */
441static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
442{
443 struct fsl_desc_sw *desc;
444 dma_addr_t pdesc;
445
446 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
447 if (!desc) {
448 chan_dbg(chan, "out of memory for link descriptor\n");
449 return NULL;
450 }
451
452 memset(desc, 0, sizeof(*desc));
453 INIT_LIST_HEAD(&desc->tx_list);
454 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
455 desc->async_tx.tx_submit = fsl_dma_tx_submit;
456 desc->async_tx.phys = pdesc;
457
458#ifdef FSL_DMA_LD_DEBUG
459 chan_dbg(chan, "LD %p allocated\n", desc);
460#endif
461
462 return desc;
463}
464
465/**
466 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
467 * @chan : Freescale DMA channel
468 *
469 * This function will create a dma pool for descriptor allocation.
470 *
471 * Return - The number of descriptors allocated.
472 */
473static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
474{
475 struct fsldma_chan *chan = to_fsl_chan(dchan);
476
477 /* Has this channel already been allocated? */
478 if (chan->desc_pool)
479 return 1;
480
481 /*
482 * We need the descriptor to be aligned to 32bytes
483 * for meeting FSL DMA specification requirement.
484 */
485 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
486 sizeof(struct fsl_desc_sw),
487 __alignof__(struct fsl_desc_sw), 0);
488 if (!chan->desc_pool) {
489 chan_err(chan, "unable to allocate descriptor pool\n");
490 return -ENOMEM;
491 }
492
493 /* there is at least one descriptor free to be allocated */
494 return 1;
495}
496
497/**
498 * fsldma_free_desc_list - Free all descriptors in a queue
499 * @chan: Freescae DMA channel
500 * @list: the list to free
501 *
502 * LOCKING: must hold chan->desc_lock
503 */
504static void fsldma_free_desc_list(struct fsldma_chan *chan,
505 struct list_head *list)
506{
507 struct fsl_desc_sw *desc, *_desc;
508
509 list_for_each_entry_safe(desc, _desc, list, node) {
510 list_del(&desc->node);
511#ifdef FSL_DMA_LD_DEBUG
512 chan_dbg(chan, "LD %p free\n", desc);
513#endif
514 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
515 }
516}
517
518static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
519 struct list_head *list)
520{
521 struct fsl_desc_sw *desc, *_desc;
522
523 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
524 list_del(&desc->node);
525#ifdef FSL_DMA_LD_DEBUG
526 chan_dbg(chan, "LD %p free\n", desc);
527#endif
528 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
529 }
530}
531
532/**
533 * fsl_dma_free_chan_resources - Free all resources of the channel.
534 * @chan : Freescale DMA channel
535 */
536static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
537{
538 struct fsldma_chan *chan = to_fsl_chan(dchan);
539 unsigned long flags;
540
541 chan_dbg(chan, "free all channel resources\n");
542 spin_lock_irqsave(&chan->desc_lock, flags);
543 fsldma_free_desc_list(chan, &chan->ld_pending);
544 fsldma_free_desc_list(chan, &chan->ld_running);
545 spin_unlock_irqrestore(&chan->desc_lock, flags);
546
547 dma_pool_destroy(chan->desc_pool);
548 chan->desc_pool = NULL;
549}
550
551static struct dma_async_tx_descriptor *
552fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
553{
554 struct fsldma_chan *chan;
555 struct fsl_desc_sw *new;
556
557 if (!dchan)
558 return NULL;
559
560 chan = to_fsl_chan(dchan);
561
562 new = fsl_dma_alloc_descriptor(chan);
563 if (!new) {
564 chan_err(chan, "%s\n", msg_ld_oom);
565 return NULL;
566 }
567
568 new->async_tx.cookie = -EBUSY;
569 new->async_tx.flags = flags;
570
571 /* Insert the link descriptor to the LD ring */
572 list_add_tail(&new->node, &new->tx_list);
573
574 /* Set End-of-link to the last link descriptor of new list */
575 set_ld_eol(chan, new);
576
577 return &new->async_tx;
578}
579
580static struct dma_async_tx_descriptor *
581fsl_dma_prep_memcpy(struct dma_chan *dchan,
582 dma_addr_t dma_dst, dma_addr_t dma_src,
583 size_t len, unsigned long flags)
584{
585 struct fsldma_chan *chan;
586 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
587 size_t copy;
588
589 if (!dchan)
590 return NULL;
591
592 if (!len)
593 return NULL;
594
595 chan = to_fsl_chan(dchan);
596
597 do {
598
599 /* Allocate the link descriptor from DMA pool */
600 new = fsl_dma_alloc_descriptor(chan);
601 if (!new) {
602 chan_err(chan, "%s\n", msg_ld_oom);
603 goto fail;
604 }
605
606 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
607
608 set_desc_cnt(chan, &new->hw, copy);
609 set_desc_src(chan, &new->hw, dma_src);
610 set_desc_dst(chan, &new->hw, dma_dst);
611
612 if (!first)
613 first = new;
614 else
615 set_desc_next(chan, &prev->hw, new->async_tx.phys);
616
617 new->async_tx.cookie = 0;
618 async_tx_ack(&new->async_tx);
619
620 prev = new;
621 len -= copy;
622 dma_src += copy;
623 dma_dst += copy;
624
625 /* Insert the link descriptor to the LD ring */
626 list_add_tail(&new->node, &first->tx_list);
627 } while (len);
628
629 new->async_tx.flags = flags; /* client is in control of this ack */
630 new->async_tx.cookie = -EBUSY;
631
632 /* Set End-of-link to the last link descriptor of new list */
633 set_ld_eol(chan, new);
634
635 return &first->async_tx;
636
637fail:
638 if (!first)
639 return NULL;
640
641 fsldma_free_desc_list_reverse(chan, &first->tx_list);
642 return NULL;
643}
644
645static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
646 struct scatterlist *dst_sg, unsigned int dst_nents,
647 struct scatterlist *src_sg, unsigned int src_nents,
648 unsigned long flags)
649{
650 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
651 struct fsldma_chan *chan = to_fsl_chan(dchan);
652 size_t dst_avail, src_avail;
653 dma_addr_t dst, src;
654 size_t len;
655
656 /* basic sanity checks */
657 if (dst_nents == 0 || src_nents == 0)
658 return NULL;
659
660 if (dst_sg == NULL || src_sg == NULL)
661 return NULL;
662
663 /*
664 * TODO: should we check that both scatterlists have the same
665 * TODO: number of bytes in total? Is that really an error?
666 */
667
668 /* get prepared for the loop */
669 dst_avail = sg_dma_len(dst_sg);
670 src_avail = sg_dma_len(src_sg);
671
672 /* run until we are out of scatterlist entries */
673 while (true) {
674
675 /* create the largest transaction possible */
676 len = min_t(size_t, src_avail, dst_avail);
677 len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
678 if (len == 0)
679 goto fetch;
680
681 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
682 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
683
684 /* allocate and populate the descriptor */
685 new = fsl_dma_alloc_descriptor(chan);
686 if (!new) {
687 chan_err(chan, "%s\n", msg_ld_oom);
688 goto fail;
689 }
690
691 set_desc_cnt(chan, &new->hw, len);
692 set_desc_src(chan, &new->hw, src);
693 set_desc_dst(chan, &new->hw, dst);
694
695 if (!first)
696 first = new;
697 else
698 set_desc_next(chan, &prev->hw, new->async_tx.phys);
699
700 new->async_tx.cookie = 0;
701 async_tx_ack(&new->async_tx);
702 prev = new;
703
704 /* Insert the link descriptor to the LD ring */
705 list_add_tail(&new->node, &first->tx_list);
706
707 /* update metadata */
708 dst_avail -= len;
709 src_avail -= len;
710
711fetch:
712 /* fetch the next dst scatterlist entry */
713 if (dst_avail == 0) {
714
715 /* no more entries: we're done */
716 if (dst_nents == 0)
717 break;
718
719 /* fetch the next entry: if there are no more: done */
720 dst_sg = sg_next(dst_sg);
721 if (dst_sg == NULL)
722 break;
723
724 dst_nents--;
725 dst_avail = sg_dma_len(dst_sg);
726 }
727
728 /* fetch the next src scatterlist entry */
729 if (src_avail == 0) {
730
731 /* no more entries: we're done */
732 if (src_nents == 0)
733 break;
734
735 /* fetch the next entry: if there are no more: done */
736 src_sg = sg_next(src_sg);
737 if (src_sg == NULL)
738 break;
739
740 src_nents--;
741 src_avail = sg_dma_len(src_sg);
742 }
743 }
744
745 new->async_tx.flags = flags; /* client is in control of this ack */
746 new->async_tx.cookie = -EBUSY;
747
748 /* Set End-of-link to the last link descriptor of new list */
749 set_ld_eol(chan, new);
750
751 return &first->async_tx;
752
753fail:
754 if (!first)
755 return NULL;
756
757 fsldma_free_desc_list_reverse(chan, &first->tx_list);
758 return NULL;
759}
760
761/**
762 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
763 * @chan: DMA channel
764 * @sgl: scatterlist to transfer to/from
765 * @sg_len: number of entries in @scatterlist
766 * @direction: DMA direction
767 * @flags: DMAEngine flags
768 *
769 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
770 * DMA_SLAVE API, this gets the device-specific information from the
771 * chan->private variable.
772 */
773static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
774 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
775 enum dma_data_direction direction, unsigned long flags)
776{
777 /*
778 * This operation is not supported on the Freescale DMA controller
779 *
780 * However, we need to provide the function pointer to allow the
781 * device_control() method to work.
782 */
783 return NULL;
784}
785
786static int fsl_dma_device_control(struct dma_chan *dchan,
787 enum dma_ctrl_cmd cmd, unsigned long arg)
788{
789 struct dma_slave_config *config;
790 struct fsldma_chan *chan;
791 unsigned long flags;
792 int size;
793
794 if (!dchan)
795 return -EINVAL;
796
797 chan = to_fsl_chan(dchan);
798
799 switch (cmd) {
800 case DMA_TERMINATE_ALL:
801 spin_lock_irqsave(&chan->desc_lock, flags);
802
803 /* Halt the DMA engine */
804 dma_halt(chan);
805
806 /* Remove and free all of the descriptors in the LD queue */
807 fsldma_free_desc_list(chan, &chan->ld_pending);
808 fsldma_free_desc_list(chan, &chan->ld_running);
809 chan->idle = true;
810
811 spin_unlock_irqrestore(&chan->desc_lock, flags);
812 return 0;
813
814 case DMA_SLAVE_CONFIG:
815 config = (struct dma_slave_config *)arg;
816
817 /* make sure the channel supports setting burst size */
818 if (!chan->set_request_count)
819 return -ENXIO;
820
821 /* we set the controller burst size depending on direction */
822 if (config->direction == DMA_TO_DEVICE)
823 size = config->dst_addr_width * config->dst_maxburst;
824 else
825 size = config->src_addr_width * config->src_maxburst;
826
827 chan->set_request_count(chan, size);
828 return 0;
829
830 case FSLDMA_EXTERNAL_START:
831
832 /* make sure the channel supports external start */
833 if (!chan->toggle_ext_start)
834 return -ENXIO;
835
836 chan->toggle_ext_start(chan, arg);
837 return 0;
838
839 default:
840 return -ENXIO;
841 }
842
843 return 0;
844}
845
846/**
847 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
848 * @chan: Freescale DMA channel
849 * @desc: descriptor to cleanup and free
850 *
851 * This function is used on a descriptor which has been executed by the DMA
852 * controller. It will run any callbacks, submit any dependencies, and then
853 * free the descriptor.
854 */
855static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
856 struct fsl_desc_sw *desc)
857{
858 struct dma_async_tx_descriptor *txd = &desc->async_tx;
859 struct device *dev = chan->common.device->dev;
860 dma_addr_t src = get_desc_src(chan, desc);
861 dma_addr_t dst = get_desc_dst(chan, desc);
862 u32 len = get_desc_cnt(chan, desc);
863
864 /* Run the link descriptor callback function */
865 if (txd->callback) {
866#ifdef FSL_DMA_LD_DEBUG
867 chan_dbg(chan, "LD %p callback\n", desc);
868#endif
869 txd->callback(txd->callback_param);
870 }
871
872 /* Run any dependencies */
873 dma_run_dependencies(txd);
874
875 /* Unmap the dst buffer, if requested */
876 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
877 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
878 dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
879 else
880 dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
881 }
882
883 /* Unmap the src buffer, if requested */
884 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
885 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
886 dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
887 else
888 dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
889 }
890
891#ifdef FSL_DMA_LD_DEBUG
892 chan_dbg(chan, "LD %p free\n", desc);
893#endif
894 dma_pool_free(chan->desc_pool, desc, txd->phys);
895}
896
897/**
898 * fsl_chan_xfer_ld_queue - transfer any pending transactions
899 * @chan : Freescale DMA channel
900 *
901 * HARDWARE STATE: idle
902 * LOCKING: must hold chan->desc_lock
903 */
904static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
905{
906 struct fsl_desc_sw *desc;
907
908 /*
909 * If the list of pending descriptors is empty, then we
910 * don't need to do any work at all
911 */
912 if (list_empty(&chan->ld_pending)) {
913 chan_dbg(chan, "no pending LDs\n");
914 return;
915 }
916
917 /*
918 * The DMA controller is not idle, which means that the interrupt
919 * handler will start any queued transactions when it runs after
920 * this transaction finishes
921 */
922 if (!chan->idle) {
923 chan_dbg(chan, "DMA controller still busy\n");
924 return;
925 }
926
927 /*
928 * If there are some link descriptors which have not been
929 * transferred, we need to start the controller
930 */
931
932 /*
933 * Move all elements from the queue of pending transactions
934 * onto the list of running transactions
935 */
936 chan_dbg(chan, "idle, starting controller\n");
937 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
938 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
939
940 /*
941 * The 85xx DMA controller doesn't clear the channel start bit
942 * automatically at the end of a transfer. Therefore we must clear
943 * it in software before starting the transfer.
944 */
945 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
946 u32 mode;
947
948 mode = DMA_IN(chan, &chan->regs->mr, 32);
949 mode &= ~FSL_DMA_MR_CS;
950 DMA_OUT(chan, &chan->regs->mr, mode, 32);
951 }
952
953 /*
954 * Program the descriptor's address into the DMA controller,
955 * then start the DMA transaction
956 */
957 set_cdar(chan, desc->async_tx.phys);
958 get_cdar(chan);
959
960 dma_start(chan);
961 chan->idle = false;
962}
963
964/**
965 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
966 * @chan : Freescale DMA channel
967 */
968static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
969{
970 struct fsldma_chan *chan = to_fsl_chan(dchan);
971 unsigned long flags;
972
973 spin_lock_irqsave(&chan->desc_lock, flags);
974 fsl_chan_xfer_ld_queue(chan);
975 spin_unlock_irqrestore(&chan->desc_lock, flags);
976}
977
978/**
979 * fsl_tx_status - Determine the DMA status
980 * @chan : Freescale DMA channel
981 */
982static enum dma_status fsl_tx_status(struct dma_chan *dchan,
983 dma_cookie_t cookie,
984 struct dma_tx_state *txstate)
985{
986 struct fsldma_chan *chan = to_fsl_chan(dchan);
987 dma_cookie_t last_complete;
988 dma_cookie_t last_used;
989 unsigned long flags;
990
991 spin_lock_irqsave(&chan->desc_lock, flags);
992
993 last_complete = chan->completed_cookie;
994 last_used = dchan->cookie;
995
996 spin_unlock_irqrestore(&chan->desc_lock, flags);
997
998 dma_set_tx_state(txstate, last_complete, last_used, 0);
999 return dma_async_is_complete(cookie, last_complete, last_used);
1000}
1001
1002/*----------------------------------------------------------------------------*/
1003/* Interrupt Handling */
1004/*----------------------------------------------------------------------------*/
1005
1006static irqreturn_t fsldma_chan_irq(int irq, void *data)
1007{
1008 struct fsldma_chan *chan = data;
1009 u32 stat;
1010
1011 /* save and clear the status register */
1012 stat = get_sr(chan);
1013 set_sr(chan, stat);
1014 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1015
1016 /* check that this was really our device */
1017 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
1018 if (!stat)
1019 return IRQ_NONE;
1020
1021 if (stat & FSL_DMA_SR_TE)
1022 chan_err(chan, "Transfer Error!\n");
1023
1024 /*
1025 * Programming Error
1026 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
1027 * triger a PE interrupt.
1028 */
1029 if (stat & FSL_DMA_SR_PE) {
1030 chan_dbg(chan, "irq: Programming Error INT\n");
1031 stat &= ~FSL_DMA_SR_PE;
1032 if (get_bcr(chan) != 0)
1033 chan_err(chan, "Programming Error!\n");
1034 }
1035
1036 /*
1037 * For MPC8349, EOCDI event need to update cookie
1038 * and start the next transfer if it exist.
1039 */
1040 if (stat & FSL_DMA_SR_EOCDI) {
1041 chan_dbg(chan, "irq: End-of-Chain link INT\n");
1042 stat &= ~FSL_DMA_SR_EOCDI;
1043 }
1044
1045 /*
1046 * If it current transfer is the end-of-transfer,
1047 * we should clear the Channel Start bit for
1048 * prepare next transfer.
1049 */
1050 if (stat & FSL_DMA_SR_EOLNI) {
1051 chan_dbg(chan, "irq: End-of-link INT\n");
1052 stat &= ~FSL_DMA_SR_EOLNI;
1053 }
1054
1055 /* check that the DMA controller is really idle */
1056 if (!dma_is_idle(chan))
1057 chan_err(chan, "irq: controller not idle!\n");
1058
1059 /* check that we handled all of the bits */
1060 if (stat)
1061 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1062
1063 /*
1064 * Schedule the tasklet to handle all cleanup of the current
1065 * transaction. It will start a new transaction if there is
1066 * one pending.
1067 */
1068 tasklet_schedule(&chan->tasklet);
1069 chan_dbg(chan, "irq: Exit\n");
1070 return IRQ_HANDLED;
1071}
1072
1073static void dma_do_tasklet(unsigned long data)
1074{
1075 struct fsldma_chan *chan = (struct fsldma_chan *)data;
1076 struct fsl_desc_sw *desc, *_desc;
1077 LIST_HEAD(ld_cleanup);
1078 unsigned long flags;
1079
1080 chan_dbg(chan, "tasklet entry\n");
1081
1082 spin_lock_irqsave(&chan->desc_lock, flags);
1083
1084 /* update the cookie if we have some descriptors to cleanup */
1085 if (!list_empty(&chan->ld_running)) {
1086 dma_cookie_t cookie;
1087
1088 desc = to_fsl_desc(chan->ld_running.prev);
1089 cookie = desc->async_tx.cookie;
1090
1091 chan->completed_cookie = cookie;
1092 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1093 }
1094
1095 /*
1096 * move the descriptors to a temporary list so we can drop the lock
1097 * during the entire cleanup operation
1098 */
1099 list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1100
1101 /* the hardware is now idle and ready for more */
1102 chan->idle = true;
1103
1104 /*
1105 * Start any pending transactions automatically
1106 *
1107 * In the ideal case, we keep the DMA controller busy while we go
1108 * ahead and free the descriptors below.
1109 */
1110 fsl_chan_xfer_ld_queue(chan);
1111 spin_unlock_irqrestore(&chan->desc_lock, flags);
1112
1113 /* Run the callback for each descriptor, in order */
1114 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1115
1116 /* Remove from the list of transactions */
1117 list_del(&desc->node);
1118
1119 /* Run all cleanup for this descriptor */
1120 fsldma_cleanup_descriptor(chan, desc);
1121 }
1122
1123 chan_dbg(chan, "tasklet exit\n");
1124}
1125
1126static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1127{
1128 struct fsldma_device *fdev = data;
1129 struct fsldma_chan *chan;
1130 unsigned int handled = 0;
1131 u32 gsr, mask;
1132 int i;
1133
1134 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1135 : in_le32(fdev->regs);
1136 mask = 0xff000000;
1137 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1138
1139 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1140 chan = fdev->chan[i];
1141 if (!chan)
1142 continue;
1143
1144 if (gsr & mask) {
1145 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1146 fsldma_chan_irq(irq, chan);
1147 handled++;
1148 }
1149
1150 gsr &= ~mask;
1151 mask >>= 8;
1152 }
1153
1154 return IRQ_RETVAL(handled);
1155}
1156
1157static void fsldma_free_irqs(struct fsldma_device *fdev)
1158{
1159 struct fsldma_chan *chan;
1160 int i;
1161
1162 if (fdev->irq != NO_IRQ) {
1163 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1164 free_irq(fdev->irq, fdev);
1165 return;
1166 }
1167
1168 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1169 chan = fdev->chan[i];
1170 if (chan && chan->irq != NO_IRQ) {
1171 chan_dbg(chan, "free per-channel IRQ\n");
1172 free_irq(chan->irq, chan);
1173 }
1174 }
1175}
1176
1177static int fsldma_request_irqs(struct fsldma_device *fdev)
1178{
1179 struct fsldma_chan *chan;
1180 int ret;
1181 int i;
1182
1183 /* if we have a per-controller IRQ, use that */
1184 if (fdev->irq != NO_IRQ) {
1185 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1186 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1187 "fsldma-controller", fdev);
1188 return ret;
1189 }
1190
1191 /* no per-controller IRQ, use the per-channel IRQs */
1192 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1193 chan = fdev->chan[i];
1194 if (!chan)
1195 continue;
1196
1197 if (chan->irq == NO_IRQ) {
1198 chan_err(chan, "interrupts property missing in device tree\n");
1199 ret = -ENODEV;
1200 goto out_unwind;
1201 }
1202
1203 chan_dbg(chan, "request per-channel IRQ\n");
1204 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1205 "fsldma-chan", chan);
1206 if (ret) {
1207 chan_err(chan, "unable to request per-channel IRQ\n");
1208 goto out_unwind;
1209 }
1210 }
1211
1212 return 0;
1213
1214out_unwind:
1215 for (/* none */; i >= 0; i--) {
1216 chan = fdev->chan[i];
1217 if (!chan)
1218 continue;
1219
1220 if (chan->irq == NO_IRQ)
1221 continue;
1222
1223 free_irq(chan->irq, chan);
1224 }
1225
1226 return ret;
1227}
1228
1229/*----------------------------------------------------------------------------*/
1230/* OpenFirmware Subsystem */
1231/*----------------------------------------------------------------------------*/
1232
1233static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1234 struct device_node *node, u32 feature, const char *compatible)
1235{
1236 struct fsldma_chan *chan;
1237 struct resource res;
1238 int err;
1239
1240 /* alloc channel */
1241 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1242 if (!chan) {
1243 dev_err(fdev->dev, "no free memory for DMA channels!\n");
1244 err = -ENOMEM;
1245 goto out_return;
1246 }
1247
1248 /* ioremap registers for use */
1249 chan->regs = of_iomap(node, 0);
1250 if (!chan->regs) {
1251 dev_err(fdev->dev, "unable to ioremap registers\n");
1252 err = -ENOMEM;
1253 goto out_free_chan;
1254 }
1255
1256 err = of_address_to_resource(node, 0, &res);
1257 if (err) {
1258 dev_err(fdev->dev, "unable to find 'reg' property\n");
1259 goto out_iounmap_regs;
1260 }
1261
1262 chan->feature = feature;
1263 if (!fdev->feature)
1264 fdev->feature = chan->feature;
1265
1266 /*
1267 * If the DMA device's feature is different than the feature
1268 * of its channels, report the bug
1269 */
1270 WARN_ON(fdev->feature != chan->feature);
1271
1272 chan->dev = fdev->dev;
1273 chan->id = ((res.start - 0x100) & 0xfff) >> 7;
1274 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1275 dev_err(fdev->dev, "too many channels for device\n");
1276 err = -EINVAL;
1277 goto out_iounmap_regs;
1278 }
1279
1280 fdev->chan[chan->id] = chan;
1281 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1282 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1283
1284 /* Initialize the channel */
1285 dma_init(chan);
1286
1287 /* Clear cdar registers */
1288 set_cdar(chan, 0);
1289
1290 switch (chan->feature & FSL_DMA_IP_MASK) {
1291 case FSL_DMA_IP_85XX:
1292 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1293 case FSL_DMA_IP_83XX:
1294 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1295 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1296 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1297 chan->set_request_count = fsl_chan_set_request_count;
1298 }
1299
1300 spin_lock_init(&chan->desc_lock);
1301 INIT_LIST_HEAD(&chan->ld_pending);
1302 INIT_LIST_HEAD(&chan->ld_running);
1303 chan->idle = true;
1304
1305 chan->common.device = &fdev->common;
1306
1307 /* find the IRQ line, if it exists in the device tree */
1308 chan->irq = irq_of_parse_and_map(node, 0);
1309
1310 /* Add the channel to DMA device channel list */
1311 list_add_tail(&chan->common.device_node, &fdev->common.channels);
1312 fdev->common.chancnt++;
1313
1314 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1315 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1316
1317 return 0;
1318
1319out_iounmap_regs:
1320 iounmap(chan->regs);
1321out_free_chan:
1322 kfree(chan);
1323out_return:
1324 return err;
1325}
1326
1327static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1328{
1329 irq_dispose_mapping(chan->irq);
1330 list_del(&chan->common.device_node);
1331 iounmap(chan->regs);
1332 kfree(chan);
1333}
1334
1335static int __devinit fsldma_of_probe(struct platform_device *op)
1336{
1337 struct fsldma_device *fdev;
1338 struct device_node *child;
1339 int err;
1340
1341 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1342 if (!fdev) {
1343 dev_err(&op->dev, "No enough memory for 'priv'\n");
1344 err = -ENOMEM;
1345 goto out_return;
1346 }
1347
1348 fdev->dev = &op->dev;
1349 INIT_LIST_HEAD(&fdev->common.channels);
1350
1351 /* ioremap the registers for use */
1352 fdev->regs = of_iomap(op->dev.of_node, 0);
1353 if (!fdev->regs) {
1354 dev_err(&op->dev, "unable to ioremap registers\n");
1355 err = -ENOMEM;
1356 goto out_free_fdev;
1357 }
1358
1359 /* map the channel IRQ if it exists, but don't hookup the handler yet */
1360 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1361
1362 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1363 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1364 dma_cap_set(DMA_SG, fdev->common.cap_mask);
1365 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1366 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1367 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1368 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1369 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1370 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1371 fdev->common.device_tx_status = fsl_tx_status;
1372 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1373 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1374 fdev->common.device_control = fsl_dma_device_control;
1375 fdev->common.dev = &op->dev;
1376
1377 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1378
1379 dev_set_drvdata(&op->dev, fdev);
1380
1381 /*
1382 * We cannot use of_platform_bus_probe() because there is no
1383 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1384 * channel object.
1385 */
1386 for_each_child_of_node(op->dev.of_node, child) {
1387 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1388 fsl_dma_chan_probe(fdev, child,
1389 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1390 "fsl,eloplus-dma-channel");
1391 }
1392
1393 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1394 fsl_dma_chan_probe(fdev, child,
1395 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1396 "fsl,elo-dma-channel");
1397 }
1398 }
1399
1400 /*
1401 * Hookup the IRQ handler(s)
1402 *
1403 * If we have a per-controller interrupt, we prefer that to the
1404 * per-channel interrupts to reduce the number of shared interrupt
1405 * handlers on the same IRQ line
1406 */
1407 err = fsldma_request_irqs(fdev);
1408 if (err) {
1409 dev_err(fdev->dev, "unable to request IRQs\n");
1410 goto out_free_fdev;
1411 }
1412
1413 dma_async_device_register(&fdev->common);
1414 return 0;
1415
1416out_free_fdev:
1417 irq_dispose_mapping(fdev->irq);
1418 kfree(fdev);
1419out_return:
1420 return err;
1421}
1422
1423static int fsldma_of_remove(struct platform_device *op)
1424{
1425 struct fsldma_device *fdev;
1426 unsigned int i;
1427
1428 fdev = dev_get_drvdata(&op->dev);
1429 dma_async_device_unregister(&fdev->common);
1430
1431 fsldma_free_irqs(fdev);
1432
1433 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1434 if (fdev->chan[i])
1435 fsl_dma_chan_remove(fdev->chan[i]);
1436 }
1437
1438 iounmap(fdev->regs);
1439 dev_set_drvdata(&op->dev, NULL);
1440 kfree(fdev);
1441
1442 return 0;
1443}
1444
1445static const struct of_device_id fsldma_of_ids[] = {
1446 { .compatible = "fsl,eloplus-dma", },
1447 { .compatible = "fsl,elo-dma", },
1448 {}
1449};
1450
1451static struct platform_driver fsldma_of_driver = {
1452 .driver = {
1453 .name = "fsl-elo-dma",
1454 .owner = THIS_MODULE,
1455 .of_match_table = fsldma_of_ids,
1456 },
1457 .probe = fsldma_of_probe,
1458 .remove = fsldma_of_remove,
1459};
1460
1461/*----------------------------------------------------------------------------*/
1462/* Module Init / Exit */
1463/*----------------------------------------------------------------------------*/
1464
1465static __init int fsldma_init(void)
1466{
1467 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1468 return platform_driver_register(&fsldma_of_driver);
1469}
1470
1471static void __exit fsldma_exit(void)
1472{
1473 platform_driver_unregister(&fsldma_of_driver);
1474}
1475
1476subsys_initcall(fsldma_init);
1477module_exit(fsldma_exit);
1478
1479MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1480MODULE_LICENSE("GPL");
1/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA controller is also added.
14 *
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
20 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/slab.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/dmapool.h>
36#include <linux/of_address.h>
37#include <linux/of_irq.h>
38#include <linux/of_platform.h>
39#include <linux/fsldma.h>
40#include "dmaengine.h"
41#include "fsldma.h"
42
43#define chan_dbg(chan, fmt, arg...) \
44 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
45#define chan_err(chan, fmt, arg...) \
46 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
47
48static const char msg_ld_oom[] = "No free memory for link descriptor";
49
50/*
51 * Register Helpers
52 */
53
54static void set_sr(struct fsldma_chan *chan, u32 val)
55{
56 DMA_OUT(chan, &chan->regs->sr, val, 32);
57}
58
59static u32 get_sr(struct fsldma_chan *chan)
60{
61 return DMA_IN(chan, &chan->regs->sr, 32);
62}
63
64static void set_mr(struct fsldma_chan *chan, u32 val)
65{
66 DMA_OUT(chan, &chan->regs->mr, val, 32);
67}
68
69static u32 get_mr(struct fsldma_chan *chan)
70{
71 return DMA_IN(chan, &chan->regs->mr, 32);
72}
73
74static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
75{
76 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
77}
78
79static dma_addr_t get_cdar(struct fsldma_chan *chan)
80{
81 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
82}
83
84static void set_bcr(struct fsldma_chan *chan, u32 val)
85{
86 DMA_OUT(chan, &chan->regs->bcr, val, 32);
87}
88
89static u32 get_bcr(struct fsldma_chan *chan)
90{
91 return DMA_IN(chan, &chan->regs->bcr, 32);
92}
93
94/*
95 * Descriptor Helpers
96 */
97
98static void set_desc_cnt(struct fsldma_chan *chan,
99 struct fsl_dma_ld_hw *hw, u32 count)
100{
101 hw->count = CPU_TO_DMA(chan, count, 32);
102}
103
104static void set_desc_src(struct fsldma_chan *chan,
105 struct fsl_dma_ld_hw *hw, dma_addr_t src)
106{
107 u64 snoop_bits;
108
109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
111 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
112}
113
114static void set_desc_dst(struct fsldma_chan *chan,
115 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116{
117 u64 snoop_bits;
118
119 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
120 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122}
123
124static void set_desc_next(struct fsldma_chan *chan,
125 struct fsl_dma_ld_hw *hw, dma_addr_t next)
126{
127 u64 snoop_bits;
128
129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
130 ? FSL_DMA_SNEN : 0;
131 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
132}
133
134static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
135{
136 u64 snoop_bits;
137
138 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
139 ? FSL_DMA_SNEN : 0;
140
141 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
142 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
143 | snoop_bits, 64);
144}
145
146/*
147 * DMA Engine Hardware Control Helpers
148 */
149
150static void dma_init(struct fsldma_chan *chan)
151{
152 /* Reset the channel */
153 set_mr(chan, 0);
154
155 switch (chan->feature & FSL_DMA_IP_MASK) {
156 case FSL_DMA_IP_85XX:
157 /* Set the channel to below modes:
158 * EIE - Error interrupt enable
159 * EOLNIE - End of links interrupt enable
160 * BWC - Bandwidth sharing among channels
161 */
162 set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
163 | FSL_DMA_MR_EOLNIE);
164 break;
165 case FSL_DMA_IP_83XX:
166 /* Set the channel to below modes:
167 * EOTIE - End-of-transfer interrupt enable
168 * PRC_RM - PCI read multiple
169 */
170 set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
171 break;
172 }
173}
174
175static int dma_is_idle(struct fsldma_chan *chan)
176{
177 u32 sr = get_sr(chan);
178 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
179}
180
181/*
182 * Start the DMA controller
183 *
184 * Preconditions:
185 * - the CDAR register must point to the start descriptor
186 * - the MRn[CS] bit must be cleared
187 */
188static void dma_start(struct fsldma_chan *chan)
189{
190 u32 mode;
191
192 mode = get_mr(chan);
193
194 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
195 set_bcr(chan, 0);
196 mode |= FSL_DMA_MR_EMP_EN;
197 } else {
198 mode &= ~FSL_DMA_MR_EMP_EN;
199 }
200
201 if (chan->feature & FSL_DMA_CHAN_START_EXT) {
202 mode |= FSL_DMA_MR_EMS_EN;
203 } else {
204 mode &= ~FSL_DMA_MR_EMS_EN;
205 mode |= FSL_DMA_MR_CS;
206 }
207
208 set_mr(chan, mode);
209}
210
211static void dma_halt(struct fsldma_chan *chan)
212{
213 u32 mode;
214 int i;
215
216 /* read the mode register */
217 mode = get_mr(chan);
218
219 /*
220 * The 85xx controller supports channel abort, which will stop
221 * the current transfer. On 83xx, this bit is the transfer error
222 * mask bit, which should not be changed.
223 */
224 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
225 mode |= FSL_DMA_MR_CA;
226 set_mr(chan, mode);
227
228 mode &= ~FSL_DMA_MR_CA;
229 }
230
231 /* stop the DMA controller */
232 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
233 set_mr(chan, mode);
234
235 /* wait for the DMA controller to become idle */
236 for (i = 0; i < 100; i++) {
237 if (dma_is_idle(chan))
238 return;
239
240 udelay(10);
241 }
242
243 if (!dma_is_idle(chan))
244 chan_err(chan, "DMA halt timeout!\n");
245}
246
247/**
248 * fsl_chan_set_src_loop_size - Set source address hold transfer size
249 * @chan : Freescale DMA channel
250 * @size : Address loop size, 0 for disable loop
251 *
252 * The set source address hold transfer size. The source
253 * address hold or loop transfer size is when the DMA transfer
254 * data from source address (SA), if the loop size is 4, the DMA will
255 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
256 * SA + 1 ... and so on.
257 */
258static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
259{
260 u32 mode;
261
262 mode = get_mr(chan);
263
264 switch (size) {
265 case 0:
266 mode &= ~FSL_DMA_MR_SAHE;
267 break;
268 case 1:
269 case 2:
270 case 4:
271 case 8:
272 mode &= ~FSL_DMA_MR_SAHTS_MASK;
273 mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
274 break;
275 }
276
277 set_mr(chan, mode);
278}
279
280/**
281 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
282 * @chan : Freescale DMA channel
283 * @size : Address loop size, 0 for disable loop
284 *
285 * The set destination address hold transfer size. The destination
286 * address hold or loop transfer size is when the DMA transfer
287 * data to destination address (TA), if the loop size is 4, the DMA will
288 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
289 * TA + 1 ... and so on.
290 */
291static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
292{
293 u32 mode;
294
295 mode = get_mr(chan);
296
297 switch (size) {
298 case 0:
299 mode &= ~FSL_DMA_MR_DAHE;
300 break;
301 case 1:
302 case 2:
303 case 4:
304 case 8:
305 mode &= ~FSL_DMA_MR_DAHTS_MASK;
306 mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
307 break;
308 }
309
310 set_mr(chan, mode);
311}
312
313/**
314 * fsl_chan_set_request_count - Set DMA Request Count for external control
315 * @chan : Freescale DMA channel
316 * @size : Number of bytes to transfer in a single request
317 *
318 * The Freescale DMA channel can be controlled by the external signal DREQ#.
319 * The DMA request count is how many bytes are allowed to transfer before
320 * pausing the channel, after which a new assertion of DREQ# resumes channel
321 * operation.
322 *
323 * A size of 0 disables external pause control. The maximum size is 1024.
324 */
325static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
326{
327 u32 mode;
328
329 BUG_ON(size > 1024);
330
331 mode = get_mr(chan);
332 mode &= ~FSL_DMA_MR_BWC_MASK;
333 mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
334
335 set_mr(chan, mode);
336}
337
338/**
339 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
340 * @chan : Freescale DMA channel
341 * @enable : 0 is disabled, 1 is enabled.
342 *
343 * The Freescale DMA channel can be controlled by the external signal DREQ#.
344 * The DMA Request Count feature should be used in addition to this feature
345 * to set the number of bytes to transfer before pausing the channel.
346 */
347static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
348{
349 if (enable)
350 chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
351 else
352 chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
353}
354
355/**
356 * fsl_chan_toggle_ext_start - Toggle channel external start status
357 * @chan : Freescale DMA channel
358 * @enable : 0 is disabled, 1 is enabled.
359 *
360 * If enable the external start, the channel can be started by an
361 * external DMA start pin. So the dma_start() does not start the
362 * transfer immediately. The DMA channel will wait for the
363 * control pin asserted.
364 */
365static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
366{
367 if (enable)
368 chan->feature |= FSL_DMA_CHAN_START_EXT;
369 else
370 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
371}
372
373int fsl_dma_external_start(struct dma_chan *dchan, int enable)
374{
375 struct fsldma_chan *chan;
376
377 if (!dchan)
378 return -EINVAL;
379
380 chan = to_fsl_chan(dchan);
381
382 fsl_chan_toggle_ext_start(chan, enable);
383 return 0;
384}
385EXPORT_SYMBOL_GPL(fsl_dma_external_start);
386
387static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
388{
389 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
390
391 if (list_empty(&chan->ld_pending))
392 goto out_splice;
393
394 /*
395 * Add the hardware descriptor to the chain of hardware descriptors
396 * that already exists in memory.
397 *
398 * This will un-set the EOL bit of the existing transaction, and the
399 * last link in this transaction will become the EOL descriptor.
400 */
401 set_desc_next(chan, &tail->hw, desc->async_tx.phys);
402
403 /*
404 * Add the software descriptor and all children to the list
405 * of pending transactions
406 */
407out_splice:
408 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
409}
410
411static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
412{
413 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
414 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
415 struct fsl_desc_sw *child;
416 dma_cookie_t cookie = -EINVAL;
417
418 spin_lock_bh(&chan->desc_lock);
419
420#ifdef CONFIG_PM
421 if (unlikely(chan->pm_state != RUNNING)) {
422 chan_dbg(chan, "cannot submit due to suspend\n");
423 spin_unlock_bh(&chan->desc_lock);
424 return -1;
425 }
426#endif
427
428 /*
429 * assign cookies to all of the software descriptors
430 * that make up this transaction
431 */
432 list_for_each_entry(child, &desc->tx_list, node) {
433 cookie = dma_cookie_assign(&child->async_tx);
434 }
435
436 /* put this transaction onto the tail of the pending queue */
437 append_ld_queue(chan, desc);
438
439 spin_unlock_bh(&chan->desc_lock);
440
441 return cookie;
442}
443
444/**
445 * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
446 * @chan : Freescale DMA channel
447 * @desc: descriptor to be freed
448 */
449static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
450 struct fsl_desc_sw *desc)
451{
452 list_del(&desc->node);
453 chan_dbg(chan, "LD %p free\n", desc);
454 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
455}
456
457/**
458 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
459 * @chan : Freescale DMA channel
460 *
461 * Return - The descriptor allocated. NULL for failed.
462 */
463static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
464{
465 struct fsl_desc_sw *desc;
466 dma_addr_t pdesc;
467
468 desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
469 if (!desc) {
470 chan_dbg(chan, "out of memory for link descriptor\n");
471 return NULL;
472 }
473
474 INIT_LIST_HEAD(&desc->tx_list);
475 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
476 desc->async_tx.tx_submit = fsl_dma_tx_submit;
477 desc->async_tx.phys = pdesc;
478
479 chan_dbg(chan, "LD %p allocated\n", desc);
480
481 return desc;
482}
483
484/**
485 * fsldma_clean_completed_descriptor - free all descriptors which
486 * has been completed and acked
487 * @chan: Freescale DMA channel
488 *
489 * This function is used on all completed and acked descriptors.
490 * All descriptors should only be freed in this function.
491 */
492static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
493{
494 struct fsl_desc_sw *desc, *_desc;
495
496 /* Run the callback for each descriptor, in order */
497 list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
498 if (async_tx_test_ack(&desc->async_tx))
499 fsl_dma_free_descriptor(chan, desc);
500}
501
502/**
503 * fsldma_run_tx_complete_actions - cleanup a single link descriptor
504 * @chan: Freescale DMA channel
505 * @desc: descriptor to cleanup and free
506 * @cookie: Freescale DMA transaction identifier
507 *
508 * This function is used on a descriptor which has been executed by the DMA
509 * controller. It will run any callbacks, submit any dependencies.
510 */
511static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
512 struct fsl_desc_sw *desc, dma_cookie_t cookie)
513{
514 struct dma_async_tx_descriptor *txd = &desc->async_tx;
515 dma_cookie_t ret = cookie;
516
517 BUG_ON(txd->cookie < 0);
518
519 if (txd->cookie > 0) {
520 ret = txd->cookie;
521
522 dma_descriptor_unmap(txd);
523 /* Run the link descriptor callback function */
524 dmaengine_desc_get_callback_invoke(txd, NULL);
525 }
526
527 /* Run any dependencies */
528 dma_run_dependencies(txd);
529
530 return ret;
531}
532
533/**
534 * fsldma_clean_running_descriptor - move the completed descriptor from
535 * ld_running to ld_completed
536 * @chan: Freescale DMA channel
537 * @desc: the descriptor which is completed
538 *
539 * Free the descriptor directly if acked by async_tx api, or move it to
540 * queue ld_completed.
541 */
542static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
543 struct fsl_desc_sw *desc)
544{
545 /* Remove from the list of transactions */
546 list_del(&desc->node);
547
548 /*
549 * the client is allowed to attach dependent operations
550 * until 'ack' is set
551 */
552 if (!async_tx_test_ack(&desc->async_tx)) {
553 /*
554 * Move this descriptor to the list of descriptors which is
555 * completed, but still awaiting the 'ack' bit to be set.
556 */
557 list_add_tail(&desc->node, &chan->ld_completed);
558 return;
559 }
560
561 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
562}
563
564/**
565 * fsl_chan_xfer_ld_queue - transfer any pending transactions
566 * @chan : Freescale DMA channel
567 *
568 * HARDWARE STATE: idle
569 * LOCKING: must hold chan->desc_lock
570 */
571static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
572{
573 struct fsl_desc_sw *desc;
574
575 /*
576 * If the list of pending descriptors is empty, then we
577 * don't need to do any work at all
578 */
579 if (list_empty(&chan->ld_pending)) {
580 chan_dbg(chan, "no pending LDs\n");
581 return;
582 }
583
584 /*
585 * The DMA controller is not idle, which means that the interrupt
586 * handler will start any queued transactions when it runs after
587 * this transaction finishes
588 */
589 if (!chan->idle) {
590 chan_dbg(chan, "DMA controller still busy\n");
591 return;
592 }
593
594 /*
595 * If there are some link descriptors which have not been
596 * transferred, we need to start the controller
597 */
598
599 /*
600 * Move all elements from the queue of pending transactions
601 * onto the list of running transactions
602 */
603 chan_dbg(chan, "idle, starting controller\n");
604 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
605 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
606
607 /*
608 * The 85xx DMA controller doesn't clear the channel start bit
609 * automatically at the end of a transfer. Therefore we must clear
610 * it in software before starting the transfer.
611 */
612 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
613 u32 mode;
614
615 mode = get_mr(chan);
616 mode &= ~FSL_DMA_MR_CS;
617 set_mr(chan, mode);
618 }
619
620 /*
621 * Program the descriptor's address into the DMA controller,
622 * then start the DMA transaction
623 */
624 set_cdar(chan, desc->async_tx.phys);
625 get_cdar(chan);
626
627 dma_start(chan);
628 chan->idle = false;
629}
630
631/**
632 * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
633 * and move them to ld_completed to free until flag 'ack' is set
634 * @chan: Freescale DMA channel
635 *
636 * This function is used on descriptors which have been executed by the DMA
637 * controller. It will run any callbacks, submit any dependencies, then
638 * free these descriptors if flag 'ack' is set.
639 */
640static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
641{
642 struct fsl_desc_sw *desc, *_desc;
643 dma_cookie_t cookie = 0;
644 dma_addr_t curr_phys = get_cdar(chan);
645 int seen_current = 0;
646
647 fsldma_clean_completed_descriptor(chan);
648
649 /* Run the callback for each descriptor, in order */
650 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
651 /*
652 * do not advance past the current descriptor loaded into the
653 * hardware channel, subsequent descriptors are either in
654 * process or have not been submitted
655 */
656 if (seen_current)
657 break;
658
659 /*
660 * stop the search if we reach the current descriptor and the
661 * channel is busy
662 */
663 if (desc->async_tx.phys == curr_phys) {
664 seen_current = 1;
665 if (!dma_is_idle(chan))
666 break;
667 }
668
669 cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
670
671 fsldma_clean_running_descriptor(chan, desc);
672 }
673
674 /*
675 * Start any pending transactions automatically
676 *
677 * In the ideal case, we keep the DMA controller busy while we go
678 * ahead and free the descriptors below.
679 */
680 fsl_chan_xfer_ld_queue(chan);
681
682 if (cookie > 0)
683 chan->common.completed_cookie = cookie;
684}
685
686/**
687 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
688 * @chan : Freescale DMA channel
689 *
690 * This function will create a dma pool for descriptor allocation.
691 *
692 * Return - The number of descriptors allocated.
693 */
694static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
695{
696 struct fsldma_chan *chan = to_fsl_chan(dchan);
697
698 /* Has this channel already been allocated? */
699 if (chan->desc_pool)
700 return 1;
701
702 /*
703 * We need the descriptor to be aligned to 32bytes
704 * for meeting FSL DMA specification requirement.
705 */
706 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
707 sizeof(struct fsl_desc_sw),
708 __alignof__(struct fsl_desc_sw), 0);
709 if (!chan->desc_pool) {
710 chan_err(chan, "unable to allocate descriptor pool\n");
711 return -ENOMEM;
712 }
713
714 /* there is at least one descriptor free to be allocated */
715 return 1;
716}
717
718/**
719 * fsldma_free_desc_list - Free all descriptors in a queue
720 * @chan: Freescae DMA channel
721 * @list: the list to free
722 *
723 * LOCKING: must hold chan->desc_lock
724 */
725static void fsldma_free_desc_list(struct fsldma_chan *chan,
726 struct list_head *list)
727{
728 struct fsl_desc_sw *desc, *_desc;
729
730 list_for_each_entry_safe(desc, _desc, list, node)
731 fsl_dma_free_descriptor(chan, desc);
732}
733
734static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
735 struct list_head *list)
736{
737 struct fsl_desc_sw *desc, *_desc;
738
739 list_for_each_entry_safe_reverse(desc, _desc, list, node)
740 fsl_dma_free_descriptor(chan, desc);
741}
742
743/**
744 * fsl_dma_free_chan_resources - Free all resources of the channel.
745 * @chan : Freescale DMA channel
746 */
747static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
748{
749 struct fsldma_chan *chan = to_fsl_chan(dchan);
750
751 chan_dbg(chan, "free all channel resources\n");
752 spin_lock_bh(&chan->desc_lock);
753 fsldma_cleanup_descriptors(chan);
754 fsldma_free_desc_list(chan, &chan->ld_pending);
755 fsldma_free_desc_list(chan, &chan->ld_running);
756 fsldma_free_desc_list(chan, &chan->ld_completed);
757 spin_unlock_bh(&chan->desc_lock);
758
759 dma_pool_destroy(chan->desc_pool);
760 chan->desc_pool = NULL;
761}
762
763static struct dma_async_tx_descriptor *
764fsl_dma_prep_memcpy(struct dma_chan *dchan,
765 dma_addr_t dma_dst, dma_addr_t dma_src,
766 size_t len, unsigned long flags)
767{
768 struct fsldma_chan *chan;
769 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
770 size_t copy;
771
772 if (!dchan)
773 return NULL;
774
775 if (!len)
776 return NULL;
777
778 chan = to_fsl_chan(dchan);
779
780 do {
781
782 /* Allocate the link descriptor from DMA pool */
783 new = fsl_dma_alloc_descriptor(chan);
784 if (!new) {
785 chan_err(chan, "%s\n", msg_ld_oom);
786 goto fail;
787 }
788
789 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
790
791 set_desc_cnt(chan, &new->hw, copy);
792 set_desc_src(chan, &new->hw, dma_src);
793 set_desc_dst(chan, &new->hw, dma_dst);
794
795 if (!first)
796 first = new;
797 else
798 set_desc_next(chan, &prev->hw, new->async_tx.phys);
799
800 new->async_tx.cookie = 0;
801 async_tx_ack(&new->async_tx);
802
803 prev = new;
804 len -= copy;
805 dma_src += copy;
806 dma_dst += copy;
807
808 /* Insert the link descriptor to the LD ring */
809 list_add_tail(&new->node, &first->tx_list);
810 } while (len);
811
812 new->async_tx.flags = flags; /* client is in control of this ack */
813 new->async_tx.cookie = -EBUSY;
814
815 /* Set End-of-link to the last link descriptor of new list */
816 set_ld_eol(chan, new);
817
818 return &first->async_tx;
819
820fail:
821 if (!first)
822 return NULL;
823
824 fsldma_free_desc_list_reverse(chan, &first->tx_list);
825 return NULL;
826}
827
828static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
829{
830 struct fsldma_chan *chan;
831
832 if (!dchan)
833 return -EINVAL;
834
835 chan = to_fsl_chan(dchan);
836
837 spin_lock_bh(&chan->desc_lock);
838
839 /* Halt the DMA engine */
840 dma_halt(chan);
841
842 /* Remove and free all of the descriptors in the LD queue */
843 fsldma_free_desc_list(chan, &chan->ld_pending);
844 fsldma_free_desc_list(chan, &chan->ld_running);
845 fsldma_free_desc_list(chan, &chan->ld_completed);
846 chan->idle = true;
847
848 spin_unlock_bh(&chan->desc_lock);
849 return 0;
850}
851
852static int fsl_dma_device_config(struct dma_chan *dchan,
853 struct dma_slave_config *config)
854{
855 struct fsldma_chan *chan;
856 int size;
857
858 if (!dchan)
859 return -EINVAL;
860
861 chan = to_fsl_chan(dchan);
862
863 /* make sure the channel supports setting burst size */
864 if (!chan->set_request_count)
865 return -ENXIO;
866
867 /* we set the controller burst size depending on direction */
868 if (config->direction == DMA_MEM_TO_DEV)
869 size = config->dst_addr_width * config->dst_maxburst;
870 else
871 size = config->src_addr_width * config->src_maxburst;
872
873 chan->set_request_count(chan, size);
874 return 0;
875}
876
877
878/**
879 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
880 * @chan : Freescale DMA channel
881 */
882static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
883{
884 struct fsldma_chan *chan = to_fsl_chan(dchan);
885
886 spin_lock_bh(&chan->desc_lock);
887 fsl_chan_xfer_ld_queue(chan);
888 spin_unlock_bh(&chan->desc_lock);
889}
890
891/**
892 * fsl_tx_status - Determine the DMA status
893 * @chan : Freescale DMA channel
894 */
895static enum dma_status fsl_tx_status(struct dma_chan *dchan,
896 dma_cookie_t cookie,
897 struct dma_tx_state *txstate)
898{
899 struct fsldma_chan *chan = to_fsl_chan(dchan);
900 enum dma_status ret;
901
902 ret = dma_cookie_status(dchan, cookie, txstate);
903 if (ret == DMA_COMPLETE)
904 return ret;
905
906 spin_lock_bh(&chan->desc_lock);
907 fsldma_cleanup_descriptors(chan);
908 spin_unlock_bh(&chan->desc_lock);
909
910 return dma_cookie_status(dchan, cookie, txstate);
911}
912
913/*----------------------------------------------------------------------------*/
914/* Interrupt Handling */
915/*----------------------------------------------------------------------------*/
916
917static irqreturn_t fsldma_chan_irq(int irq, void *data)
918{
919 struct fsldma_chan *chan = data;
920 u32 stat;
921
922 /* save and clear the status register */
923 stat = get_sr(chan);
924 set_sr(chan, stat);
925 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
926
927 /* check that this was really our device */
928 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
929 if (!stat)
930 return IRQ_NONE;
931
932 if (stat & FSL_DMA_SR_TE)
933 chan_err(chan, "Transfer Error!\n");
934
935 /*
936 * Programming Error
937 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
938 * trigger a PE interrupt.
939 */
940 if (stat & FSL_DMA_SR_PE) {
941 chan_dbg(chan, "irq: Programming Error INT\n");
942 stat &= ~FSL_DMA_SR_PE;
943 if (get_bcr(chan) != 0)
944 chan_err(chan, "Programming Error!\n");
945 }
946
947 /*
948 * For MPC8349, EOCDI event need to update cookie
949 * and start the next transfer if it exist.
950 */
951 if (stat & FSL_DMA_SR_EOCDI) {
952 chan_dbg(chan, "irq: End-of-Chain link INT\n");
953 stat &= ~FSL_DMA_SR_EOCDI;
954 }
955
956 /*
957 * If it current transfer is the end-of-transfer,
958 * we should clear the Channel Start bit for
959 * prepare next transfer.
960 */
961 if (stat & FSL_DMA_SR_EOLNI) {
962 chan_dbg(chan, "irq: End-of-link INT\n");
963 stat &= ~FSL_DMA_SR_EOLNI;
964 }
965
966 /* check that the DMA controller is really idle */
967 if (!dma_is_idle(chan))
968 chan_err(chan, "irq: controller not idle!\n");
969
970 /* check that we handled all of the bits */
971 if (stat)
972 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
973
974 /*
975 * Schedule the tasklet to handle all cleanup of the current
976 * transaction. It will start a new transaction if there is
977 * one pending.
978 */
979 tasklet_schedule(&chan->tasklet);
980 chan_dbg(chan, "irq: Exit\n");
981 return IRQ_HANDLED;
982}
983
984static void dma_do_tasklet(unsigned long data)
985{
986 struct fsldma_chan *chan = (struct fsldma_chan *)data;
987
988 chan_dbg(chan, "tasklet entry\n");
989
990 spin_lock_bh(&chan->desc_lock);
991
992 /* the hardware is now idle and ready for more */
993 chan->idle = true;
994
995 /* Run all cleanup for descriptors which have been completed */
996 fsldma_cleanup_descriptors(chan);
997
998 spin_unlock_bh(&chan->desc_lock);
999
1000 chan_dbg(chan, "tasklet exit\n");
1001}
1002
1003static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1004{
1005 struct fsldma_device *fdev = data;
1006 struct fsldma_chan *chan;
1007 unsigned int handled = 0;
1008 u32 gsr, mask;
1009 int i;
1010
1011 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1012 : in_le32(fdev->regs);
1013 mask = 0xff000000;
1014 dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1015
1016 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1017 chan = fdev->chan[i];
1018 if (!chan)
1019 continue;
1020
1021 if (gsr & mask) {
1022 dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1023 fsldma_chan_irq(irq, chan);
1024 handled++;
1025 }
1026
1027 gsr &= ~mask;
1028 mask >>= 8;
1029 }
1030
1031 return IRQ_RETVAL(handled);
1032}
1033
1034static void fsldma_free_irqs(struct fsldma_device *fdev)
1035{
1036 struct fsldma_chan *chan;
1037 int i;
1038
1039 if (fdev->irq) {
1040 dev_dbg(fdev->dev, "free per-controller IRQ\n");
1041 free_irq(fdev->irq, fdev);
1042 return;
1043 }
1044
1045 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1046 chan = fdev->chan[i];
1047 if (chan && chan->irq) {
1048 chan_dbg(chan, "free per-channel IRQ\n");
1049 free_irq(chan->irq, chan);
1050 }
1051 }
1052}
1053
1054static int fsldma_request_irqs(struct fsldma_device *fdev)
1055{
1056 struct fsldma_chan *chan;
1057 int ret;
1058 int i;
1059
1060 /* if we have a per-controller IRQ, use that */
1061 if (fdev->irq) {
1062 dev_dbg(fdev->dev, "request per-controller IRQ\n");
1063 ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1064 "fsldma-controller", fdev);
1065 return ret;
1066 }
1067
1068 /* no per-controller IRQ, use the per-channel IRQs */
1069 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1070 chan = fdev->chan[i];
1071 if (!chan)
1072 continue;
1073
1074 if (!chan->irq) {
1075 chan_err(chan, "interrupts property missing in device tree\n");
1076 ret = -ENODEV;
1077 goto out_unwind;
1078 }
1079
1080 chan_dbg(chan, "request per-channel IRQ\n");
1081 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1082 "fsldma-chan", chan);
1083 if (ret) {
1084 chan_err(chan, "unable to request per-channel IRQ\n");
1085 goto out_unwind;
1086 }
1087 }
1088
1089 return 0;
1090
1091out_unwind:
1092 for (/* none */; i >= 0; i--) {
1093 chan = fdev->chan[i];
1094 if (!chan)
1095 continue;
1096
1097 if (!chan->irq)
1098 continue;
1099
1100 free_irq(chan->irq, chan);
1101 }
1102
1103 return ret;
1104}
1105
1106/*----------------------------------------------------------------------------*/
1107/* OpenFirmware Subsystem */
1108/*----------------------------------------------------------------------------*/
1109
1110static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1111 struct device_node *node, u32 feature, const char *compatible)
1112{
1113 struct fsldma_chan *chan;
1114 struct resource res;
1115 int err;
1116
1117 /* alloc channel */
1118 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1119 if (!chan) {
1120 err = -ENOMEM;
1121 goto out_return;
1122 }
1123
1124 /* ioremap registers for use */
1125 chan->regs = of_iomap(node, 0);
1126 if (!chan->regs) {
1127 dev_err(fdev->dev, "unable to ioremap registers\n");
1128 err = -ENOMEM;
1129 goto out_free_chan;
1130 }
1131
1132 err = of_address_to_resource(node, 0, &res);
1133 if (err) {
1134 dev_err(fdev->dev, "unable to find 'reg' property\n");
1135 goto out_iounmap_regs;
1136 }
1137
1138 chan->feature = feature;
1139 if (!fdev->feature)
1140 fdev->feature = chan->feature;
1141
1142 /*
1143 * If the DMA device's feature is different than the feature
1144 * of its channels, report the bug
1145 */
1146 WARN_ON(fdev->feature != chan->feature);
1147
1148 chan->dev = fdev->dev;
1149 chan->id = (res.start & 0xfff) < 0x300 ?
1150 ((res.start - 0x100) & 0xfff) >> 7 :
1151 ((res.start - 0x200) & 0xfff) >> 7;
1152 if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1153 dev_err(fdev->dev, "too many channels for device\n");
1154 err = -EINVAL;
1155 goto out_iounmap_regs;
1156 }
1157
1158 fdev->chan[chan->id] = chan;
1159 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1160 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1161
1162 /* Initialize the channel */
1163 dma_init(chan);
1164
1165 /* Clear cdar registers */
1166 set_cdar(chan, 0);
1167
1168 switch (chan->feature & FSL_DMA_IP_MASK) {
1169 case FSL_DMA_IP_85XX:
1170 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1171 case FSL_DMA_IP_83XX:
1172 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1173 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1174 chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1175 chan->set_request_count = fsl_chan_set_request_count;
1176 }
1177
1178 spin_lock_init(&chan->desc_lock);
1179 INIT_LIST_HEAD(&chan->ld_pending);
1180 INIT_LIST_HEAD(&chan->ld_running);
1181 INIT_LIST_HEAD(&chan->ld_completed);
1182 chan->idle = true;
1183#ifdef CONFIG_PM
1184 chan->pm_state = RUNNING;
1185#endif
1186
1187 chan->common.device = &fdev->common;
1188 dma_cookie_init(&chan->common);
1189
1190 /* find the IRQ line, if it exists in the device tree */
1191 chan->irq = irq_of_parse_and_map(node, 0);
1192
1193 /* Add the channel to DMA device channel list */
1194 list_add_tail(&chan->common.device_node, &fdev->common.channels);
1195
1196 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1197 chan->irq ? chan->irq : fdev->irq);
1198
1199 return 0;
1200
1201out_iounmap_regs:
1202 iounmap(chan->regs);
1203out_free_chan:
1204 kfree(chan);
1205out_return:
1206 return err;
1207}
1208
1209static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1210{
1211 irq_dispose_mapping(chan->irq);
1212 list_del(&chan->common.device_node);
1213 iounmap(chan->regs);
1214 kfree(chan);
1215}
1216
1217static int fsldma_of_probe(struct platform_device *op)
1218{
1219 struct fsldma_device *fdev;
1220 struct device_node *child;
1221 int err;
1222
1223 fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1224 if (!fdev) {
1225 err = -ENOMEM;
1226 goto out_return;
1227 }
1228
1229 fdev->dev = &op->dev;
1230 INIT_LIST_HEAD(&fdev->common.channels);
1231
1232 /* ioremap the registers for use */
1233 fdev->regs = of_iomap(op->dev.of_node, 0);
1234 if (!fdev->regs) {
1235 dev_err(&op->dev, "unable to ioremap registers\n");
1236 err = -ENOMEM;
1237 goto out_free;
1238 }
1239
1240 /* map the channel IRQ if it exists, but don't hookup the handler yet */
1241 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1242
1243 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1244 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1245 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1246 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1247 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1248 fdev->common.device_tx_status = fsl_tx_status;
1249 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1250 fdev->common.device_config = fsl_dma_device_config;
1251 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1252 fdev->common.dev = &op->dev;
1253
1254 fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
1255 fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
1256 fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1257 fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1258
1259 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1260
1261 platform_set_drvdata(op, fdev);
1262
1263 /*
1264 * We cannot use of_platform_bus_probe() because there is no
1265 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1266 * channel object.
1267 */
1268 for_each_child_of_node(op->dev.of_node, child) {
1269 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1270 fsl_dma_chan_probe(fdev, child,
1271 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1272 "fsl,eloplus-dma-channel");
1273 }
1274
1275 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1276 fsl_dma_chan_probe(fdev, child,
1277 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1278 "fsl,elo-dma-channel");
1279 }
1280 }
1281
1282 /*
1283 * Hookup the IRQ handler(s)
1284 *
1285 * If we have a per-controller interrupt, we prefer that to the
1286 * per-channel interrupts to reduce the number of shared interrupt
1287 * handlers on the same IRQ line
1288 */
1289 err = fsldma_request_irqs(fdev);
1290 if (err) {
1291 dev_err(fdev->dev, "unable to request IRQs\n");
1292 goto out_free_fdev;
1293 }
1294
1295 dma_async_device_register(&fdev->common);
1296 return 0;
1297
1298out_free_fdev:
1299 irq_dispose_mapping(fdev->irq);
1300 iounmap(fdev->regs);
1301out_free:
1302 kfree(fdev);
1303out_return:
1304 return err;
1305}
1306
1307static int fsldma_of_remove(struct platform_device *op)
1308{
1309 struct fsldma_device *fdev;
1310 unsigned int i;
1311
1312 fdev = platform_get_drvdata(op);
1313 dma_async_device_unregister(&fdev->common);
1314
1315 fsldma_free_irqs(fdev);
1316
1317 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1318 if (fdev->chan[i])
1319 fsl_dma_chan_remove(fdev->chan[i]);
1320 }
1321
1322 iounmap(fdev->regs);
1323 kfree(fdev);
1324
1325 return 0;
1326}
1327
1328#ifdef CONFIG_PM
1329static int fsldma_suspend_late(struct device *dev)
1330{
1331 struct platform_device *pdev = to_platform_device(dev);
1332 struct fsldma_device *fdev = platform_get_drvdata(pdev);
1333 struct fsldma_chan *chan;
1334 int i;
1335
1336 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1337 chan = fdev->chan[i];
1338 if (!chan)
1339 continue;
1340
1341 spin_lock_bh(&chan->desc_lock);
1342 if (unlikely(!chan->idle))
1343 goto out;
1344 chan->regs_save.mr = get_mr(chan);
1345 chan->pm_state = SUSPENDED;
1346 spin_unlock_bh(&chan->desc_lock);
1347 }
1348 return 0;
1349
1350out:
1351 for (; i >= 0; i--) {
1352 chan = fdev->chan[i];
1353 if (!chan)
1354 continue;
1355 chan->pm_state = RUNNING;
1356 spin_unlock_bh(&chan->desc_lock);
1357 }
1358 return -EBUSY;
1359}
1360
1361static int fsldma_resume_early(struct device *dev)
1362{
1363 struct platform_device *pdev = to_platform_device(dev);
1364 struct fsldma_device *fdev = platform_get_drvdata(pdev);
1365 struct fsldma_chan *chan;
1366 u32 mode;
1367 int i;
1368
1369 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1370 chan = fdev->chan[i];
1371 if (!chan)
1372 continue;
1373
1374 spin_lock_bh(&chan->desc_lock);
1375 mode = chan->regs_save.mr
1376 & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
1377 set_mr(chan, mode);
1378 chan->pm_state = RUNNING;
1379 spin_unlock_bh(&chan->desc_lock);
1380 }
1381
1382 return 0;
1383}
1384
1385static const struct dev_pm_ops fsldma_pm_ops = {
1386 .suspend_late = fsldma_suspend_late,
1387 .resume_early = fsldma_resume_early,
1388};
1389#endif
1390
1391static const struct of_device_id fsldma_of_ids[] = {
1392 { .compatible = "fsl,elo3-dma", },
1393 { .compatible = "fsl,eloplus-dma", },
1394 { .compatible = "fsl,elo-dma", },
1395 {}
1396};
1397MODULE_DEVICE_TABLE(of, fsldma_of_ids);
1398
1399static struct platform_driver fsldma_of_driver = {
1400 .driver = {
1401 .name = "fsl-elo-dma",
1402 .of_match_table = fsldma_of_ids,
1403#ifdef CONFIG_PM
1404 .pm = &fsldma_pm_ops,
1405#endif
1406 },
1407 .probe = fsldma_of_probe,
1408 .remove = fsldma_of_remove,
1409};
1410
1411/*----------------------------------------------------------------------------*/
1412/* Module Init / Exit */
1413/*----------------------------------------------------------------------------*/
1414
1415static __init int fsldma_init(void)
1416{
1417 pr_info("Freescale Elo series DMA driver\n");
1418 return platform_driver_register(&fsldma_of_driver);
1419}
1420
1421static void __exit fsldma_exit(void)
1422{
1423 platform_driver_unregister(&fsldma_of_driver);
1424}
1425
1426subsys_initcall(fsldma_init);
1427module_exit(fsldma_exit);
1428
1429MODULE_DESCRIPTION("Freescale Elo series DMA driver");
1430MODULE_LICENSE("GPL");