Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Core driver for the Synopsys DesignWare DMA Controller
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
7 * Copyright (C) 2013 Intel Corporation
8 */
9
10#include <linux/bitops.h>
11#include <linux/delay.h>
12#include <linux/dmaengine.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/pm_runtime.h>
23
24#include "../dmaengine.h"
25#include "internal.h"
26
27/*
28 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
29 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
30 * of which use ARM any more). See the "Databook" from Synopsys for
31 * information beyond what licensees probably provide.
32 */
33
34/* The set of bus widths supported by the DMA controller */
35#define DW_DMA_BUSWIDTHS \
36 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
37 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
38 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
39 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
40
41/*----------------------------------------------------------------------*/
42
43static struct device *chan2dev(struct dma_chan *chan)
44{
45 return &chan->dev->device;
46}
47
48static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
49{
50 return to_dw_desc(dwc->active_list.next);
51}
52
53static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
54{
55 struct dw_desc *desc = txd_to_dw_desc(tx);
56 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
57 dma_cookie_t cookie;
58 unsigned long flags;
59
60 spin_lock_irqsave(&dwc->lock, flags);
61 cookie = dma_cookie_assign(tx);
62
63 /*
64 * REVISIT: We should attempt to chain as many descriptors as
65 * possible, perhaps even appending to those already submitted
66 * for DMA. But this is hard to do in a race-free manner.
67 */
68
69 list_add_tail(&desc->desc_node, &dwc->queue);
70 spin_unlock_irqrestore(&dwc->lock, flags);
71 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
72 __func__, desc->txd.cookie);
73
74 return cookie;
75}
76
77static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
78{
79 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
80 struct dw_desc *desc;
81 dma_addr_t phys;
82
83 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
84 if (!desc)
85 return NULL;
86
87 dwc->descs_allocated++;
88 INIT_LIST_HEAD(&desc->tx_list);
89 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
90 desc->txd.tx_submit = dwc_tx_submit;
91 desc->txd.flags = DMA_CTRL_ACK;
92 desc->txd.phys = phys;
93 return desc;
94}
95
96static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
97{
98 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
99 struct dw_desc *child, *_next;
100
101 if (unlikely(!desc))
102 return;
103
104 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
105 list_del(&child->desc_node);
106 dma_pool_free(dw->desc_pool, child, child->txd.phys);
107 dwc->descs_allocated--;
108 }
109
110 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
111 dwc->descs_allocated--;
112}
113
114static void dwc_initialize(struct dw_dma_chan *dwc)
115{
116 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
117
118 dw->initialize_chan(dwc);
119
120 /* Enable interrupts */
121 channel_set_bit(dw, MASK.XFER, dwc->mask);
122 channel_set_bit(dw, MASK.ERROR, dwc->mask);
123}
124
125/*----------------------------------------------------------------------*/
126
127static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
128{
129 dev_err(chan2dev(&dwc->chan),
130 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
131 channel_readl(dwc, SAR),
132 channel_readl(dwc, DAR),
133 channel_readl(dwc, LLP),
134 channel_readl(dwc, CTL_HI),
135 channel_readl(dwc, CTL_LO));
136}
137
138static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
139{
140 channel_clear_bit(dw, CH_EN, dwc->mask);
141 while (dma_readl(dw, CH_EN) & dwc->mask)
142 cpu_relax();
143}
144
145/*----------------------------------------------------------------------*/
146
147/* Perform single block transfer */
148static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
149 struct dw_desc *desc)
150{
151 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
152 u32 ctllo;
153
154 /*
155 * Software emulation of LLP mode relies on interrupts to continue
156 * multi block transfer.
157 */
158 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
159
160 channel_writel(dwc, SAR, lli_read(desc, sar));
161 channel_writel(dwc, DAR, lli_read(desc, dar));
162 channel_writel(dwc, CTL_LO, ctllo);
163 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
164 channel_set_bit(dw, CH_EN, dwc->mask);
165
166 /* Move pointer to next descriptor */
167 dwc->tx_node_active = dwc->tx_node_active->next;
168}
169
170/* Called with dwc->lock held and bh disabled */
171static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
172{
173 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
174 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
175 unsigned long was_soft_llp;
176
177 /* ASSERT: channel is idle */
178 if (dma_readl(dw, CH_EN) & dwc->mask) {
179 dev_err(chan2dev(&dwc->chan),
180 "%s: BUG: Attempted to start non-idle channel\n",
181 __func__);
182 dwc_dump_chan_regs(dwc);
183
184 /* The tasklet will hopefully advance the queue... */
185 return;
186 }
187
188 if (dwc->nollp) {
189 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
190 &dwc->flags);
191 if (was_soft_llp) {
192 dev_err(chan2dev(&dwc->chan),
193 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
194 return;
195 }
196
197 dwc_initialize(dwc);
198
199 first->residue = first->total_len;
200 dwc->tx_node_active = &first->tx_list;
201
202 /* Submit first block */
203 dwc_do_single_block(dwc, first);
204
205 return;
206 }
207
208 dwc_initialize(dwc);
209
210 channel_writel(dwc, LLP, first->txd.phys | lms);
211 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
212 channel_writel(dwc, CTL_HI, 0);
213 channel_set_bit(dw, CH_EN, dwc->mask);
214}
215
216static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
217{
218 struct dw_desc *desc;
219
220 if (list_empty(&dwc->queue))
221 return;
222
223 list_move(dwc->queue.next, &dwc->active_list);
224 desc = dwc_first_active(dwc);
225 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
226 dwc_dostart(dwc, desc);
227}
228
229/*----------------------------------------------------------------------*/
230
231static void
232dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
233 bool callback_required)
234{
235 struct dma_async_tx_descriptor *txd = &desc->txd;
236 struct dw_desc *child;
237 unsigned long flags;
238 struct dmaengine_desc_callback cb;
239
240 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
241
242 spin_lock_irqsave(&dwc->lock, flags);
243 dma_cookie_complete(txd);
244 if (callback_required)
245 dmaengine_desc_get_callback(txd, &cb);
246 else
247 memset(&cb, 0, sizeof(cb));
248
249 /* async_tx_ack */
250 list_for_each_entry(child, &desc->tx_list, desc_node)
251 async_tx_ack(&child->txd);
252 async_tx_ack(&desc->txd);
253 dwc_desc_put(dwc, desc);
254 spin_unlock_irqrestore(&dwc->lock, flags);
255
256 dmaengine_desc_callback_invoke(&cb, NULL);
257}
258
259static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
260{
261 struct dw_desc *desc, *_desc;
262 LIST_HEAD(list);
263 unsigned long flags;
264
265 spin_lock_irqsave(&dwc->lock, flags);
266 if (dma_readl(dw, CH_EN) & dwc->mask) {
267 dev_err(chan2dev(&dwc->chan),
268 "BUG: XFER bit set, but channel not idle!\n");
269
270 /* Try to continue after resetting the channel... */
271 dwc_chan_disable(dw, dwc);
272 }
273
274 /*
275 * Submit queued descriptors ASAP, i.e. before we go through
276 * the completed ones.
277 */
278 list_splice_init(&dwc->active_list, &list);
279 dwc_dostart_first_queued(dwc);
280
281 spin_unlock_irqrestore(&dwc->lock, flags);
282
283 list_for_each_entry_safe(desc, _desc, &list, desc_node)
284 dwc_descriptor_complete(dwc, desc, true);
285}
286
287/* Returns how many bytes were already received from source */
288static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
289{
290 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
291 u32 ctlhi = channel_readl(dwc, CTL_HI);
292 u32 ctllo = channel_readl(dwc, CTL_LO);
293
294 return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
295}
296
297static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
298{
299 dma_addr_t llp;
300 struct dw_desc *desc, *_desc;
301 struct dw_desc *child;
302 u32 status_xfer;
303 unsigned long flags;
304
305 spin_lock_irqsave(&dwc->lock, flags);
306 llp = channel_readl(dwc, LLP);
307 status_xfer = dma_readl(dw, RAW.XFER);
308
309 if (status_xfer & dwc->mask) {
310 /* Everything we've submitted is done */
311 dma_writel(dw, CLEAR.XFER, dwc->mask);
312
313 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
314 struct list_head *head, *active = dwc->tx_node_active;
315
316 /*
317 * We are inside first active descriptor.
318 * Otherwise something is really wrong.
319 */
320 desc = dwc_first_active(dwc);
321
322 head = &desc->tx_list;
323 if (active != head) {
324 /* Update residue to reflect last sent descriptor */
325 if (active == head->next)
326 desc->residue -= desc->len;
327 else
328 desc->residue -= to_dw_desc(active->prev)->len;
329
330 child = to_dw_desc(active);
331
332 /* Submit next block */
333 dwc_do_single_block(dwc, child);
334
335 spin_unlock_irqrestore(&dwc->lock, flags);
336 return;
337 }
338
339 /* We are done here */
340 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
341 }
342
343 spin_unlock_irqrestore(&dwc->lock, flags);
344
345 dwc_complete_all(dw, dwc);
346 return;
347 }
348
349 if (list_empty(&dwc->active_list)) {
350 spin_unlock_irqrestore(&dwc->lock, flags);
351 return;
352 }
353
354 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
355 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
356 spin_unlock_irqrestore(&dwc->lock, flags);
357 return;
358 }
359
360 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
361
362 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
363 /* Initial residue value */
364 desc->residue = desc->total_len;
365
366 /* Check first descriptors addr */
367 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
368 spin_unlock_irqrestore(&dwc->lock, flags);
369 return;
370 }
371
372 /* Check first descriptors llp */
373 if (lli_read(desc, llp) == llp) {
374 /* This one is currently in progress */
375 desc->residue -= dwc_get_sent(dwc);
376 spin_unlock_irqrestore(&dwc->lock, flags);
377 return;
378 }
379
380 desc->residue -= desc->len;
381 list_for_each_entry(child, &desc->tx_list, desc_node) {
382 if (lli_read(child, llp) == llp) {
383 /* Currently in progress */
384 desc->residue -= dwc_get_sent(dwc);
385 spin_unlock_irqrestore(&dwc->lock, flags);
386 return;
387 }
388 desc->residue -= child->len;
389 }
390
391 /*
392 * No descriptors so far seem to be in progress, i.e.
393 * this one must be done.
394 */
395 spin_unlock_irqrestore(&dwc->lock, flags);
396 dwc_descriptor_complete(dwc, desc, true);
397 spin_lock_irqsave(&dwc->lock, flags);
398 }
399
400 dev_err(chan2dev(&dwc->chan),
401 "BUG: All descriptors done, but channel not idle!\n");
402
403 /* Try to continue after resetting the channel... */
404 dwc_chan_disable(dw, dwc);
405
406 dwc_dostart_first_queued(dwc);
407 spin_unlock_irqrestore(&dwc->lock, flags);
408}
409
410static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
411{
412 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
413 lli_read(desc, sar),
414 lli_read(desc, dar),
415 lli_read(desc, llp),
416 lli_read(desc, ctlhi),
417 lli_read(desc, ctllo));
418}
419
420static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
421{
422 struct dw_desc *bad_desc;
423 struct dw_desc *child;
424 unsigned long flags;
425
426 dwc_scan_descriptors(dw, dwc);
427
428 spin_lock_irqsave(&dwc->lock, flags);
429
430 /*
431 * The descriptor currently at the head of the active list is
432 * borked. Since we don't have any way to report errors, we'll
433 * just have to scream loudly and try to carry on.
434 */
435 bad_desc = dwc_first_active(dwc);
436 list_del_init(&bad_desc->desc_node);
437 list_move(dwc->queue.next, dwc->active_list.prev);
438
439 /* Clear the error flag and try to restart the controller */
440 dma_writel(dw, CLEAR.ERROR, dwc->mask);
441 if (!list_empty(&dwc->active_list))
442 dwc_dostart(dwc, dwc_first_active(dwc));
443
444 /*
445 * WARN may seem harsh, but since this only happens
446 * when someone submits a bad physical address in a
447 * descriptor, we should consider ourselves lucky that the
448 * controller flagged an error instead of scribbling over
449 * random memory locations.
450 */
451 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
452 " cookie: %d\n", bad_desc->txd.cookie);
453 dwc_dump_lli(dwc, bad_desc);
454 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
455 dwc_dump_lli(dwc, child);
456
457 spin_unlock_irqrestore(&dwc->lock, flags);
458
459 /* Pretend the descriptor completed successfully */
460 dwc_descriptor_complete(dwc, bad_desc, true);
461}
462
463static void dw_dma_tasklet(struct tasklet_struct *t)
464{
465 struct dw_dma *dw = from_tasklet(dw, t, tasklet);
466 struct dw_dma_chan *dwc;
467 u32 status_xfer;
468 u32 status_err;
469 unsigned int i;
470
471 status_xfer = dma_readl(dw, RAW.XFER);
472 status_err = dma_readl(dw, RAW.ERROR);
473
474 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
475
476 for (i = 0; i < dw->dma.chancnt; i++) {
477 dwc = &dw->chan[i];
478 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
479 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
480 else if (status_err & (1 << i))
481 dwc_handle_error(dw, dwc);
482 else if (status_xfer & (1 << i))
483 dwc_scan_descriptors(dw, dwc);
484 }
485
486 /* Re-enable interrupts */
487 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
488 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
489}
490
491static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
492{
493 struct dw_dma *dw = dev_id;
494 u32 status;
495
496 /* Check if we have any interrupt from the DMAC which is not in use */
497 if (!dw->in_use)
498 return IRQ_NONE;
499
500 status = dma_readl(dw, STATUS_INT);
501 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
502
503 /* Check if we have any interrupt from the DMAC */
504 if (!status)
505 return IRQ_NONE;
506
507 /*
508 * Just disable the interrupts. We'll turn them back on in the
509 * softirq handler.
510 */
511 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
512 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
513 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
514
515 status = dma_readl(dw, STATUS_INT);
516 if (status) {
517 dev_err(dw->dma.dev,
518 "BUG: Unexpected interrupts pending: 0x%x\n",
519 status);
520
521 /* Try to recover */
522 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
523 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
524 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
525 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
526 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
527 }
528
529 tasklet_schedule(&dw->tasklet);
530
531 return IRQ_HANDLED;
532}
533
534/*----------------------------------------------------------------------*/
535
536static struct dma_async_tx_descriptor *
537dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
538 size_t len, unsigned long flags)
539{
540 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
541 struct dw_dma *dw = to_dw_dma(chan->device);
542 struct dw_desc *desc;
543 struct dw_desc *first;
544 struct dw_desc *prev;
545 size_t xfer_count;
546 size_t offset;
547 u8 m_master = dwc->dws.m_master;
548 unsigned int src_width;
549 unsigned int dst_width;
550 unsigned int data_width = dw->pdata->data_width[m_master];
551 u32 ctllo, ctlhi;
552 u8 lms = DWC_LLP_LMS(m_master);
553
554 dev_vdbg(chan2dev(chan),
555 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
556 &dest, &src, len, flags);
557
558 if (unlikely(!len)) {
559 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
560 return NULL;
561 }
562
563 dwc->direction = DMA_MEM_TO_MEM;
564
565 src_width = dst_width = __ffs(data_width | src | dest | len);
566
567 ctllo = dw->prepare_ctllo(dwc)
568 | DWC_CTLL_DST_WIDTH(dst_width)
569 | DWC_CTLL_SRC_WIDTH(src_width)
570 | DWC_CTLL_DST_INC
571 | DWC_CTLL_SRC_INC
572 | DWC_CTLL_FC_M2M;
573 prev = first = NULL;
574
575 for (offset = 0; offset < len; offset += xfer_count) {
576 desc = dwc_desc_get(dwc);
577 if (!desc)
578 goto err_desc_get;
579
580 ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
581
582 lli_write(desc, sar, src + offset);
583 lli_write(desc, dar, dest + offset);
584 lli_write(desc, ctllo, ctllo);
585 lli_write(desc, ctlhi, ctlhi);
586 desc->len = xfer_count;
587
588 if (!first) {
589 first = desc;
590 } else {
591 lli_write(prev, llp, desc->txd.phys | lms);
592 list_add_tail(&desc->desc_node, &first->tx_list);
593 }
594 prev = desc;
595 }
596
597 if (flags & DMA_PREP_INTERRUPT)
598 /* Trigger interrupt after last block */
599 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
600
601 prev->lli.llp = 0;
602 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
603 first->txd.flags = flags;
604 first->total_len = len;
605
606 return &first->txd;
607
608err_desc_get:
609 dwc_desc_put(dwc, first);
610 return NULL;
611}
612
613static struct dma_async_tx_descriptor *
614dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
615 unsigned int sg_len, enum dma_transfer_direction direction,
616 unsigned long flags, void *context)
617{
618 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
619 struct dw_dma *dw = to_dw_dma(chan->device);
620 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
621 struct dw_desc *prev;
622 struct dw_desc *first;
623 u32 ctllo, ctlhi;
624 u8 m_master = dwc->dws.m_master;
625 u8 lms = DWC_LLP_LMS(m_master);
626 dma_addr_t reg;
627 unsigned int reg_width;
628 unsigned int mem_width;
629 unsigned int data_width = dw->pdata->data_width[m_master];
630 unsigned int i;
631 struct scatterlist *sg;
632 size_t total_len = 0;
633
634 dev_vdbg(chan2dev(chan), "%s\n", __func__);
635
636 if (unlikely(!is_slave_direction(direction) || !sg_len))
637 return NULL;
638
639 dwc->direction = direction;
640
641 prev = first = NULL;
642
643 switch (direction) {
644 case DMA_MEM_TO_DEV:
645 reg_width = __ffs(sconfig->dst_addr_width);
646 reg = sconfig->dst_addr;
647 ctllo = dw->prepare_ctllo(dwc)
648 | DWC_CTLL_DST_WIDTH(reg_width)
649 | DWC_CTLL_DST_FIX
650 | DWC_CTLL_SRC_INC;
651
652 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
653 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
654
655 for_each_sg(sgl, sg, sg_len, i) {
656 struct dw_desc *desc;
657 u32 len, mem;
658 size_t dlen;
659
660 mem = sg_dma_address(sg);
661 len = sg_dma_len(sg);
662
663 mem_width = __ffs(data_width | mem | len);
664
665slave_sg_todev_fill_desc:
666 desc = dwc_desc_get(dwc);
667 if (!desc)
668 goto err_desc_get;
669
670 ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
671
672 lli_write(desc, sar, mem);
673 lli_write(desc, dar, reg);
674 lli_write(desc, ctlhi, ctlhi);
675 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
676 desc->len = dlen;
677
678 if (!first) {
679 first = desc;
680 } else {
681 lli_write(prev, llp, desc->txd.phys | lms);
682 list_add_tail(&desc->desc_node, &first->tx_list);
683 }
684 prev = desc;
685
686 mem += dlen;
687 len -= dlen;
688 total_len += dlen;
689
690 if (len)
691 goto slave_sg_todev_fill_desc;
692 }
693 break;
694 case DMA_DEV_TO_MEM:
695 reg_width = __ffs(sconfig->src_addr_width);
696 reg = sconfig->src_addr;
697 ctllo = dw->prepare_ctllo(dwc)
698 | DWC_CTLL_SRC_WIDTH(reg_width)
699 | DWC_CTLL_DST_INC
700 | DWC_CTLL_SRC_FIX;
701
702 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
703 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
704
705 for_each_sg(sgl, sg, sg_len, i) {
706 struct dw_desc *desc;
707 u32 len, mem;
708 size_t dlen;
709
710 mem = sg_dma_address(sg);
711 len = sg_dma_len(sg);
712
713slave_sg_fromdev_fill_desc:
714 desc = dwc_desc_get(dwc);
715 if (!desc)
716 goto err_desc_get;
717
718 ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
719
720 lli_write(desc, sar, reg);
721 lli_write(desc, dar, mem);
722 lli_write(desc, ctlhi, ctlhi);
723 mem_width = __ffs(data_width | mem);
724 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
725 desc->len = dlen;
726
727 if (!first) {
728 first = desc;
729 } else {
730 lli_write(prev, llp, desc->txd.phys | lms);
731 list_add_tail(&desc->desc_node, &first->tx_list);
732 }
733 prev = desc;
734
735 mem += dlen;
736 len -= dlen;
737 total_len += dlen;
738
739 if (len)
740 goto slave_sg_fromdev_fill_desc;
741 }
742 break;
743 default:
744 return NULL;
745 }
746
747 if (flags & DMA_PREP_INTERRUPT)
748 /* Trigger interrupt after last block */
749 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
750
751 prev->lli.llp = 0;
752 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
753 first->total_len = total_len;
754
755 return &first->txd;
756
757err_desc_get:
758 dev_err(chan2dev(chan),
759 "not enough descriptors available. Direction %d\n", direction);
760 dwc_desc_put(dwc, first);
761 return NULL;
762}
763
764bool dw_dma_filter(struct dma_chan *chan, void *param)
765{
766 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
767 struct dw_dma_slave *dws = param;
768
769 if (dws->dma_dev != chan->device->dev)
770 return false;
771
772 /* permit channels in accordance with the channels mask */
773 if (dws->channels && !(dws->channels & dwc->mask))
774 return false;
775
776 /* We have to copy data since dws can be temporary storage */
777 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
778
779 return true;
780}
781EXPORT_SYMBOL_GPL(dw_dma_filter);
782
783static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
784{
785 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
786 struct dw_dma *dw = to_dw_dma(chan->device);
787
788 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
789
790 dwc->dma_sconfig.src_maxburst =
791 clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst);
792 dwc->dma_sconfig.dst_maxburst =
793 clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst);
794
795 dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
796 dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
797
798 return 0;
799}
800
801static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
802{
803 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
804 unsigned int count = 20; /* timeout iterations */
805
806 dw->suspend_chan(dwc, drain);
807
808 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
809 udelay(2);
810
811 set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
812}
813
814static int dwc_pause(struct dma_chan *chan)
815{
816 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
817 unsigned long flags;
818
819 spin_lock_irqsave(&dwc->lock, flags);
820 dwc_chan_pause(dwc, false);
821 spin_unlock_irqrestore(&dwc->lock, flags);
822
823 return 0;
824}
825
826static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
827{
828 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
829
830 dw->resume_chan(dwc, drain);
831
832 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
833}
834
835static int dwc_resume(struct dma_chan *chan)
836{
837 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
838 unsigned long flags;
839
840 spin_lock_irqsave(&dwc->lock, flags);
841
842 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
843 dwc_chan_resume(dwc, false);
844
845 spin_unlock_irqrestore(&dwc->lock, flags);
846
847 return 0;
848}
849
850static int dwc_terminate_all(struct dma_chan *chan)
851{
852 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
853 struct dw_dma *dw = to_dw_dma(chan->device);
854 struct dw_desc *desc, *_desc;
855 unsigned long flags;
856 LIST_HEAD(list);
857
858 spin_lock_irqsave(&dwc->lock, flags);
859
860 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
861
862 dwc_chan_pause(dwc, true);
863
864 dwc_chan_disable(dw, dwc);
865
866 dwc_chan_resume(dwc, true);
867
868 /* active_list entries will end up before queued entries */
869 list_splice_init(&dwc->queue, &list);
870 list_splice_init(&dwc->active_list, &list);
871
872 spin_unlock_irqrestore(&dwc->lock, flags);
873
874 /* Flush all pending and queued descriptors */
875 list_for_each_entry_safe(desc, _desc, &list, desc_node)
876 dwc_descriptor_complete(dwc, desc, false);
877
878 return 0;
879}
880
881static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
882{
883 struct dw_desc *desc;
884
885 list_for_each_entry(desc, &dwc->active_list, desc_node)
886 if (desc->txd.cookie == c)
887 return desc;
888
889 return NULL;
890}
891
892static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie,
893 enum dma_status *status)
894{
895 struct dw_desc *desc;
896 unsigned long flags;
897 u32 residue;
898
899 spin_lock_irqsave(&dwc->lock, flags);
900
901 desc = dwc_find_desc(dwc, cookie);
902 if (desc) {
903 if (desc == dwc_first_active(dwc)) {
904 residue = desc->residue;
905 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
906 residue -= dwc_get_sent(dwc);
907 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
908 *status = DMA_PAUSED;
909 } else {
910 residue = desc->total_len;
911 }
912 } else {
913 residue = 0;
914 }
915
916 spin_unlock_irqrestore(&dwc->lock, flags);
917 return residue;
918}
919
920static enum dma_status
921dwc_tx_status(struct dma_chan *chan,
922 dma_cookie_t cookie,
923 struct dma_tx_state *txstate)
924{
925 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
926 enum dma_status ret;
927
928 ret = dma_cookie_status(chan, cookie, txstate);
929 if (ret == DMA_COMPLETE)
930 return ret;
931
932 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
933
934 ret = dma_cookie_status(chan, cookie, txstate);
935 if (ret == DMA_COMPLETE)
936 return ret;
937
938 dma_set_residue(txstate, dwc_get_residue_and_status(dwc, cookie, &ret));
939 return ret;
940}
941
942static void dwc_issue_pending(struct dma_chan *chan)
943{
944 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
945 unsigned long flags;
946
947 spin_lock_irqsave(&dwc->lock, flags);
948 if (list_empty(&dwc->active_list))
949 dwc_dostart_first_queued(dwc);
950 spin_unlock_irqrestore(&dwc->lock, flags);
951}
952
953/*----------------------------------------------------------------------*/
954
955void do_dw_dma_off(struct dw_dma *dw)
956{
957 dma_writel(dw, CFG, 0);
958
959 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
960 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
961 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
962 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
963 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
964
965 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
966 cpu_relax();
967}
968
969void do_dw_dma_on(struct dw_dma *dw)
970{
971 dma_writel(dw, CFG, DW_CFG_DMA_EN);
972}
973
974static int dwc_alloc_chan_resources(struct dma_chan *chan)
975{
976 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
977 struct dw_dma *dw = to_dw_dma(chan->device);
978
979 dev_vdbg(chan2dev(chan), "%s\n", __func__);
980
981 /* ASSERT: channel is idle */
982 if (dma_readl(dw, CH_EN) & dwc->mask) {
983 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
984 return -EIO;
985 }
986
987 dma_cookie_init(chan);
988
989 /*
990 * NOTE: some controllers may have additional features that we
991 * need to initialize here, like "scatter-gather" (which
992 * doesn't mean what you think it means), and status writeback.
993 */
994
995 /*
996 * We need controller-specific data to set up slave transfers.
997 */
998 if (chan->private && !dw_dma_filter(chan, chan->private)) {
999 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1000 return -EINVAL;
1001 }
1002
1003 /* Enable controller here if needed */
1004 if (!dw->in_use)
1005 do_dw_dma_on(dw);
1006 dw->in_use |= dwc->mask;
1007
1008 return 0;
1009}
1010
1011static void dwc_free_chan_resources(struct dma_chan *chan)
1012{
1013 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1014 struct dw_dma *dw = to_dw_dma(chan->device);
1015 unsigned long flags;
1016
1017 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1018 dwc->descs_allocated);
1019
1020 /* ASSERT: channel is idle */
1021 BUG_ON(!list_empty(&dwc->active_list));
1022 BUG_ON(!list_empty(&dwc->queue));
1023 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1024
1025 spin_lock_irqsave(&dwc->lock, flags);
1026
1027 /* Clear custom channel configuration */
1028 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1029
1030 /* Disable interrupts */
1031 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1032 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1033 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1034
1035 spin_unlock_irqrestore(&dwc->lock, flags);
1036
1037 /* Disable controller in case it was a last user */
1038 dw->in_use &= ~dwc->mask;
1039 if (!dw->in_use)
1040 do_dw_dma_off(dw);
1041
1042 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1043}
1044
1045static void dwc_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
1046{
1047 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1048
1049 caps->max_burst = dwc->max_burst;
1050
1051 /*
1052 * It might be crucial for some devices to have the hardware
1053 * accelerated multi-block transfers supported, aka LLPs in DW DMAC
1054 * notation. So if LLPs are supported then max_sg_burst is set to
1055 * zero which means unlimited number of SG entries can be handled in a
1056 * single DMA transaction, otherwise it's just one SG entry.
1057 */
1058 if (dwc->nollp)
1059 caps->max_sg_burst = 1;
1060 else
1061 caps->max_sg_burst = 0;
1062}
1063
1064int do_dma_probe(struct dw_dma_chip *chip)
1065{
1066 struct dw_dma *dw = chip->dw;
1067 struct dw_dma_platform_data *pdata;
1068 bool autocfg = false;
1069 unsigned int dw_params;
1070 unsigned int i;
1071 int err;
1072
1073 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1074 if (!dw->pdata)
1075 return -ENOMEM;
1076
1077 dw->regs = chip->regs;
1078
1079 pm_runtime_get_sync(chip->dev);
1080
1081 if (!chip->pdata) {
1082 dw_params = dma_readl(dw, DW_PARAMS);
1083 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1084
1085 autocfg = dw_params >> DW_PARAMS_EN & 1;
1086 if (!autocfg) {
1087 err = -EINVAL;
1088 goto err_pdata;
1089 }
1090
1091 /* Reassign the platform data pointer */
1092 pdata = dw->pdata;
1093
1094 /* Get hardware configuration parameters */
1095 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1096 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1097 for (i = 0; i < pdata->nr_masters; i++) {
1098 pdata->data_width[i] =
1099 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1100 }
1101 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1102
1103 /* Fill platform data with the default values */
1104 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1105 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1106 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1107 err = -EINVAL;
1108 goto err_pdata;
1109 } else {
1110 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1111
1112 /* Reassign the platform data pointer */
1113 pdata = dw->pdata;
1114 }
1115
1116 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1117 GFP_KERNEL);
1118 if (!dw->chan) {
1119 err = -ENOMEM;
1120 goto err_pdata;
1121 }
1122
1123 /* Calculate all channel mask before DMA setup */
1124 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1125
1126 /* Force dma off, just in case */
1127 dw->disable(dw);
1128
1129 /* Device and instance ID for IRQ and DMA pool */
1130 dw->set_device_name(dw, chip->id);
1131
1132 /* Create a pool of consistent memory blocks for hardware descriptors */
1133 dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
1134 sizeof(struct dw_desc), 4, 0);
1135 if (!dw->desc_pool) {
1136 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1137 err = -ENOMEM;
1138 goto err_pdata;
1139 }
1140
1141 tasklet_setup(&dw->tasklet, dw_dma_tasklet);
1142
1143 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1144 dw->name, dw);
1145 if (err)
1146 goto err_pdata;
1147
1148 INIT_LIST_HEAD(&dw->dma.channels);
1149 for (i = 0; i < pdata->nr_channels; i++) {
1150 struct dw_dma_chan *dwc = &dw->chan[i];
1151
1152 dwc->chan.device = &dw->dma;
1153 dma_cookie_init(&dwc->chan);
1154 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1155 list_add_tail(&dwc->chan.device_node,
1156 &dw->dma.channels);
1157 else
1158 list_add(&dwc->chan.device_node, &dw->dma.channels);
1159
1160 /* 7 is highest priority & 0 is lowest. */
1161 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1162 dwc->priority = pdata->nr_channels - i - 1;
1163 else
1164 dwc->priority = i;
1165
1166 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1167 spin_lock_init(&dwc->lock);
1168 dwc->mask = 1 << i;
1169
1170 INIT_LIST_HEAD(&dwc->active_list);
1171 INIT_LIST_HEAD(&dwc->queue);
1172
1173 channel_clear_bit(dw, CH_EN, dwc->mask);
1174
1175 dwc->direction = DMA_TRANS_NONE;
1176
1177 /* Hardware configuration */
1178 if (autocfg) {
1179 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1180 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1181 unsigned int dwc_params = readl(addr);
1182
1183 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1184 dwc_params);
1185
1186 /*
1187 * Decode maximum block size for given channel. The
1188 * stored 4 bit value represents blocks from 0x00 for 3
1189 * up to 0x0a for 4095.
1190 */
1191 dwc->block_size =
1192 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1193
1194 /*
1195 * According to the DW DMA databook the true scatter-
1196 * gether LLPs aren't available if either multi-block
1197 * config is disabled (CHx_MULTI_BLK_EN == 0) or the
1198 * LLP register is hard-coded to zeros
1199 * (CHx_HC_LLP == 1).
1200 */
1201 dwc->nollp =
1202 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 ||
1203 (dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1;
1204 dwc->max_burst =
1205 (0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7));
1206 } else {
1207 dwc->block_size = pdata->block_size;
1208 dwc->nollp = !pdata->multi_block[i];
1209 dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST;
1210 }
1211 }
1212
1213 /* Clear all interrupts on all channels. */
1214 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1215 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1216 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1217 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1218 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1219
1220 /* Set capabilities */
1221 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1222 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1223 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1224
1225 dw->dma.dev = chip->dev;
1226 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1227 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1228
1229 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1230 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1231
1232 dw->dma.device_caps = dwc_caps;
1233 dw->dma.device_config = dwc_config;
1234 dw->dma.device_pause = dwc_pause;
1235 dw->dma.device_resume = dwc_resume;
1236 dw->dma.device_terminate_all = dwc_terminate_all;
1237
1238 dw->dma.device_tx_status = dwc_tx_status;
1239 dw->dma.device_issue_pending = dwc_issue_pending;
1240
1241 /* DMA capabilities */
1242 dw->dma.min_burst = DW_DMA_MIN_BURST;
1243 dw->dma.max_burst = DW_DMA_MAX_BURST;
1244 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1245 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1246 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1247 BIT(DMA_MEM_TO_MEM);
1248 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1249
1250 /*
1251 * For now there is no hardware with non uniform maximum block size
1252 * across all of the device channels, so we set the maximum segment
1253 * size as the block size found for the very first channel.
1254 */
1255 dma_set_max_seg_size(dw->dma.dev, dw->chan[0].block_size);
1256
1257 err = dma_async_device_register(&dw->dma);
1258 if (err)
1259 goto err_dma_register;
1260
1261 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1262 pdata->nr_channels);
1263
1264 pm_runtime_put_sync_suspend(chip->dev);
1265
1266 return 0;
1267
1268err_dma_register:
1269 free_irq(chip->irq, dw);
1270err_pdata:
1271 pm_runtime_put_sync_suspend(chip->dev);
1272 return err;
1273}
1274
1275int do_dma_remove(struct dw_dma_chip *chip)
1276{
1277 struct dw_dma *dw = chip->dw;
1278 struct dw_dma_chan *dwc, *_dwc;
1279
1280 pm_runtime_get_sync(chip->dev);
1281
1282 do_dw_dma_off(dw);
1283 dma_async_device_unregister(&dw->dma);
1284
1285 free_irq(chip->irq, dw);
1286 tasklet_kill(&dw->tasklet);
1287
1288 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1289 chan.device_node) {
1290 list_del(&dwc->chan.device_node);
1291 channel_clear_bit(dw, CH_EN, dwc->mask);
1292 }
1293
1294 pm_runtime_put_sync_suspend(chip->dev);
1295 return 0;
1296}
1297
1298int do_dw_dma_disable(struct dw_dma_chip *chip)
1299{
1300 struct dw_dma *dw = chip->dw;
1301
1302 dw->disable(dw);
1303 return 0;
1304}
1305EXPORT_SYMBOL_GPL(do_dw_dma_disable);
1306
1307int do_dw_dma_enable(struct dw_dma_chip *chip)
1308{
1309 struct dw_dma *dw = chip->dw;
1310
1311 dw->enable(dw);
1312 return 0;
1313}
1314EXPORT_SYMBOL_GPL(do_dw_dma_enable);
1315
1316MODULE_LICENSE("GPL v2");
1317MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1318MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1319MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Core driver for the Synopsys DesignWare DMA Controller
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
7 * Copyright (C) 2013 Intel Corporation
8 */
9
10#include <linux/bitops.h>
11#include <linux/delay.h>
12#include <linux/dmaengine.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/log2.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/pm_runtime.h>
24
25#include "../dmaengine.h"
26#include "internal.h"
27
28/*
29 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
30 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
31 * of which use ARM any more). See the "Databook" from Synopsys for
32 * information beyond what licensees probably provide.
33 */
34
35/* The set of bus widths supported by the DMA controller */
36#define DW_DMA_BUSWIDTHS \
37 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
38 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
39 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
40 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
41
42/*----------------------------------------------------------------------*/
43
44static struct device *chan2dev(struct dma_chan *chan)
45{
46 return &chan->dev->device;
47}
48
49static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
50{
51 return to_dw_desc(dwc->active_list.next);
52}
53
54static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
55{
56 struct dw_desc *desc = txd_to_dw_desc(tx);
57 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
58 dma_cookie_t cookie;
59 unsigned long flags;
60
61 spin_lock_irqsave(&dwc->lock, flags);
62 cookie = dma_cookie_assign(tx);
63
64 /*
65 * REVISIT: We should attempt to chain as many descriptors as
66 * possible, perhaps even appending to those already submitted
67 * for DMA. But this is hard to do in a race-free manner.
68 */
69
70 list_add_tail(&desc->desc_node, &dwc->queue);
71 spin_unlock_irqrestore(&dwc->lock, flags);
72 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
73 __func__, desc->txd.cookie);
74
75 return cookie;
76}
77
78static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
79{
80 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
81 struct dw_desc *desc;
82 dma_addr_t phys;
83
84 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
85 if (!desc)
86 return NULL;
87
88 dwc->descs_allocated++;
89 INIT_LIST_HEAD(&desc->tx_list);
90 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
91 desc->txd.tx_submit = dwc_tx_submit;
92 desc->txd.flags = DMA_CTRL_ACK;
93 desc->txd.phys = phys;
94 return desc;
95}
96
97static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
98{
99 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
100 struct dw_desc *child, *_next;
101
102 if (unlikely(!desc))
103 return;
104
105 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
106 list_del(&child->desc_node);
107 dma_pool_free(dw->desc_pool, child, child->txd.phys);
108 dwc->descs_allocated--;
109 }
110
111 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
112 dwc->descs_allocated--;
113}
114
115static void dwc_initialize(struct dw_dma_chan *dwc)
116{
117 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
118
119 dw->initialize_chan(dwc);
120
121 /* Enable interrupts */
122 channel_set_bit(dw, MASK.XFER, dwc->mask);
123 channel_set_bit(dw, MASK.ERROR, dwc->mask);
124}
125
126/*----------------------------------------------------------------------*/
127
128static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
129{
130 dev_err(chan2dev(&dwc->chan),
131 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
132 channel_readl(dwc, SAR),
133 channel_readl(dwc, DAR),
134 channel_readl(dwc, LLP),
135 channel_readl(dwc, CTL_HI),
136 channel_readl(dwc, CTL_LO));
137}
138
139static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
140{
141 channel_clear_bit(dw, CH_EN, dwc->mask);
142 while (dma_readl(dw, CH_EN) & dwc->mask)
143 cpu_relax();
144}
145
146/*----------------------------------------------------------------------*/
147
148/* Perform single block transfer */
149static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
150 struct dw_desc *desc)
151{
152 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
153 u32 ctllo;
154
155 /*
156 * Software emulation of LLP mode relies on interrupts to continue
157 * multi block transfer.
158 */
159 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
160
161 channel_writel(dwc, SAR, lli_read(desc, sar));
162 channel_writel(dwc, DAR, lli_read(desc, dar));
163 channel_writel(dwc, CTL_LO, ctllo);
164 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
165 channel_set_bit(dw, CH_EN, dwc->mask);
166
167 /* Move pointer to next descriptor */
168 dwc->tx_node_active = dwc->tx_node_active->next;
169}
170
171/* Called with dwc->lock held and bh disabled */
172static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
173{
174 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
175 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
176 unsigned long was_soft_llp;
177
178 /* ASSERT: channel is idle */
179 if (dma_readl(dw, CH_EN) & dwc->mask) {
180 dev_err(chan2dev(&dwc->chan),
181 "%s: BUG: Attempted to start non-idle channel\n",
182 __func__);
183 dwc_dump_chan_regs(dwc);
184
185 /* The tasklet will hopefully advance the queue... */
186 return;
187 }
188
189 if (dwc->nollp) {
190 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
191 &dwc->flags);
192 if (was_soft_llp) {
193 dev_err(chan2dev(&dwc->chan),
194 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
195 return;
196 }
197
198 dwc_initialize(dwc);
199
200 first->residue = first->total_len;
201 dwc->tx_node_active = &first->tx_list;
202
203 /* Submit first block */
204 dwc_do_single_block(dwc, first);
205
206 return;
207 }
208
209 dwc_initialize(dwc);
210
211 channel_writel(dwc, LLP, first->txd.phys | lms);
212 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
213 channel_writel(dwc, CTL_HI, 0);
214 channel_set_bit(dw, CH_EN, dwc->mask);
215}
216
217static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
218{
219 struct dw_desc *desc;
220
221 if (list_empty(&dwc->queue))
222 return;
223
224 list_move(dwc->queue.next, &dwc->active_list);
225 desc = dwc_first_active(dwc);
226 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
227 dwc_dostart(dwc, desc);
228}
229
230/*----------------------------------------------------------------------*/
231
232static void
233dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
234 bool callback_required)
235{
236 struct dma_async_tx_descriptor *txd = &desc->txd;
237 struct dw_desc *child;
238 unsigned long flags;
239 struct dmaengine_desc_callback cb;
240
241 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
242
243 spin_lock_irqsave(&dwc->lock, flags);
244 dma_cookie_complete(txd);
245 if (callback_required)
246 dmaengine_desc_get_callback(txd, &cb);
247 else
248 memset(&cb, 0, sizeof(cb));
249
250 /* async_tx_ack */
251 list_for_each_entry(child, &desc->tx_list, desc_node)
252 async_tx_ack(&child->txd);
253 async_tx_ack(&desc->txd);
254 dwc_desc_put(dwc, desc);
255 spin_unlock_irqrestore(&dwc->lock, flags);
256
257 dmaengine_desc_callback_invoke(&cb, NULL);
258}
259
260static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
261{
262 struct dw_desc *desc, *_desc;
263 LIST_HEAD(list);
264 unsigned long flags;
265
266 spin_lock_irqsave(&dwc->lock, flags);
267 if (dma_readl(dw, CH_EN) & dwc->mask) {
268 dev_err(chan2dev(&dwc->chan),
269 "BUG: XFER bit set, but channel not idle!\n");
270
271 /* Try to continue after resetting the channel... */
272 dwc_chan_disable(dw, dwc);
273 }
274
275 /*
276 * Submit queued descriptors ASAP, i.e. before we go through
277 * the completed ones.
278 */
279 list_splice_init(&dwc->active_list, &list);
280 dwc_dostart_first_queued(dwc);
281
282 spin_unlock_irqrestore(&dwc->lock, flags);
283
284 list_for_each_entry_safe(desc, _desc, &list, desc_node)
285 dwc_descriptor_complete(dwc, desc, true);
286}
287
288/* Returns how many bytes were already received from source */
289static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
290{
291 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
292 u32 ctlhi = channel_readl(dwc, CTL_HI);
293 u32 ctllo = channel_readl(dwc, CTL_LO);
294
295 return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
296}
297
298static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
299{
300 dma_addr_t llp;
301 struct dw_desc *desc, *_desc;
302 struct dw_desc *child;
303 u32 status_xfer;
304 unsigned long flags;
305
306 spin_lock_irqsave(&dwc->lock, flags);
307 llp = channel_readl(dwc, LLP);
308 status_xfer = dma_readl(dw, RAW.XFER);
309
310 if (status_xfer & dwc->mask) {
311 /* Everything we've submitted is done */
312 dma_writel(dw, CLEAR.XFER, dwc->mask);
313
314 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
315 struct list_head *head, *active = dwc->tx_node_active;
316
317 /*
318 * We are inside first active descriptor.
319 * Otherwise something is really wrong.
320 */
321 desc = dwc_first_active(dwc);
322
323 head = &desc->tx_list;
324 if (active != head) {
325 /* Update residue to reflect last sent descriptor */
326 if (active == head->next)
327 desc->residue -= desc->len;
328 else
329 desc->residue -= to_dw_desc(active->prev)->len;
330
331 child = to_dw_desc(active);
332
333 /* Submit next block */
334 dwc_do_single_block(dwc, child);
335
336 spin_unlock_irqrestore(&dwc->lock, flags);
337 return;
338 }
339
340 /* We are done here */
341 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
342 }
343
344 spin_unlock_irqrestore(&dwc->lock, flags);
345
346 dwc_complete_all(dw, dwc);
347 return;
348 }
349
350 if (list_empty(&dwc->active_list)) {
351 spin_unlock_irqrestore(&dwc->lock, flags);
352 return;
353 }
354
355 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
356 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
357 spin_unlock_irqrestore(&dwc->lock, flags);
358 return;
359 }
360
361 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
362
363 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
364 /* Initial residue value */
365 desc->residue = desc->total_len;
366
367 /* Check first descriptors addr */
368 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
369 spin_unlock_irqrestore(&dwc->lock, flags);
370 return;
371 }
372
373 /* Check first descriptors llp */
374 if (lli_read(desc, llp) == llp) {
375 /* This one is currently in progress */
376 desc->residue -= dwc_get_sent(dwc);
377 spin_unlock_irqrestore(&dwc->lock, flags);
378 return;
379 }
380
381 desc->residue -= desc->len;
382 list_for_each_entry(child, &desc->tx_list, desc_node) {
383 if (lli_read(child, llp) == llp) {
384 /* Currently in progress */
385 desc->residue -= dwc_get_sent(dwc);
386 spin_unlock_irqrestore(&dwc->lock, flags);
387 return;
388 }
389 desc->residue -= child->len;
390 }
391
392 /*
393 * No descriptors so far seem to be in progress, i.e.
394 * this one must be done.
395 */
396 spin_unlock_irqrestore(&dwc->lock, flags);
397 dwc_descriptor_complete(dwc, desc, true);
398 spin_lock_irqsave(&dwc->lock, flags);
399 }
400
401 dev_err(chan2dev(&dwc->chan),
402 "BUG: All descriptors done, but channel not idle!\n");
403
404 /* Try to continue after resetting the channel... */
405 dwc_chan_disable(dw, dwc);
406
407 dwc_dostart_first_queued(dwc);
408 spin_unlock_irqrestore(&dwc->lock, flags);
409}
410
411static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
412{
413 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
414 lli_read(desc, sar),
415 lli_read(desc, dar),
416 lli_read(desc, llp),
417 lli_read(desc, ctlhi),
418 lli_read(desc, ctllo));
419}
420
421static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
422{
423 struct dw_desc *bad_desc;
424 struct dw_desc *child;
425 unsigned long flags;
426
427 dwc_scan_descriptors(dw, dwc);
428
429 spin_lock_irqsave(&dwc->lock, flags);
430
431 /*
432 * The descriptor currently at the head of the active list is
433 * borked. Since we don't have any way to report errors, we'll
434 * just have to scream loudly and try to carry on.
435 */
436 bad_desc = dwc_first_active(dwc);
437 list_del_init(&bad_desc->desc_node);
438 list_move(dwc->queue.next, dwc->active_list.prev);
439
440 /* Clear the error flag and try to restart the controller */
441 dma_writel(dw, CLEAR.ERROR, dwc->mask);
442 if (!list_empty(&dwc->active_list))
443 dwc_dostart(dwc, dwc_first_active(dwc));
444
445 /*
446 * WARN may seem harsh, but since this only happens
447 * when someone submits a bad physical address in a
448 * descriptor, we should consider ourselves lucky that the
449 * controller flagged an error instead of scribbling over
450 * random memory locations.
451 */
452 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
453 " cookie: %d\n", bad_desc->txd.cookie);
454 dwc_dump_lli(dwc, bad_desc);
455 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
456 dwc_dump_lli(dwc, child);
457
458 spin_unlock_irqrestore(&dwc->lock, flags);
459
460 /* Pretend the descriptor completed successfully */
461 dwc_descriptor_complete(dwc, bad_desc, true);
462}
463
464static void dw_dma_tasklet(struct tasklet_struct *t)
465{
466 struct dw_dma *dw = from_tasklet(dw, t, tasklet);
467 struct dw_dma_chan *dwc;
468 u32 status_xfer;
469 u32 status_err;
470 unsigned int i;
471
472 status_xfer = dma_readl(dw, RAW.XFER);
473 status_err = dma_readl(dw, RAW.ERROR);
474
475 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
476
477 for (i = 0; i < dw->dma.chancnt; i++) {
478 dwc = &dw->chan[i];
479 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
480 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
481 else if (status_err & (1 << i))
482 dwc_handle_error(dw, dwc);
483 else if (status_xfer & (1 << i))
484 dwc_scan_descriptors(dw, dwc);
485 }
486
487 /* Re-enable interrupts */
488 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
489 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
490}
491
492static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
493{
494 struct dw_dma *dw = dev_id;
495 u32 status;
496
497 /* Check if we have any interrupt from the DMAC which is not in use */
498 if (!dw->in_use)
499 return IRQ_NONE;
500
501 status = dma_readl(dw, STATUS_INT);
502 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
503
504 /* Check if we have any interrupt from the DMAC */
505 if (!status)
506 return IRQ_NONE;
507
508 /*
509 * Just disable the interrupts. We'll turn them back on in the
510 * softirq handler.
511 */
512 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
513 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
514 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
515
516 status = dma_readl(dw, STATUS_INT);
517 if (status) {
518 dev_err(dw->dma.dev,
519 "BUG: Unexpected interrupts pending: 0x%x\n",
520 status);
521
522 /* Try to recover */
523 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
524 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
525 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
526 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
527 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
528 }
529
530 tasklet_schedule(&dw->tasklet);
531
532 return IRQ_HANDLED;
533}
534
535/*----------------------------------------------------------------------*/
536
537static struct dma_async_tx_descriptor *
538dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
539 size_t len, unsigned long flags)
540{
541 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
542 struct dw_dma *dw = to_dw_dma(chan->device);
543 struct dw_desc *desc;
544 struct dw_desc *first;
545 struct dw_desc *prev;
546 size_t xfer_count;
547 size_t offset;
548 u8 m_master = dwc->dws.m_master;
549 unsigned int src_width;
550 unsigned int dst_width;
551 unsigned int data_width = dw->pdata->data_width[m_master];
552 u32 ctllo, ctlhi;
553 u8 lms = DWC_LLP_LMS(m_master);
554
555 dev_vdbg(chan2dev(chan),
556 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
557 &dest, &src, len, flags);
558
559 if (unlikely(!len)) {
560 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
561 return NULL;
562 }
563
564 dwc->direction = DMA_MEM_TO_MEM;
565
566 src_width = dst_width = __ffs(data_width | src | dest | len);
567
568 ctllo = dw->prepare_ctllo(dwc)
569 | DWC_CTLL_DST_WIDTH(dst_width)
570 | DWC_CTLL_SRC_WIDTH(src_width)
571 | DWC_CTLL_DST_INC
572 | DWC_CTLL_SRC_INC
573 | DWC_CTLL_FC_M2M;
574 prev = first = NULL;
575
576 for (offset = 0; offset < len; offset += xfer_count) {
577 desc = dwc_desc_get(dwc);
578 if (!desc)
579 goto err_desc_get;
580
581 ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
582
583 lli_write(desc, sar, src + offset);
584 lli_write(desc, dar, dest + offset);
585 lli_write(desc, ctllo, ctllo);
586 lli_write(desc, ctlhi, ctlhi);
587 desc->len = xfer_count;
588
589 if (!first) {
590 first = desc;
591 } else {
592 lli_write(prev, llp, desc->txd.phys | lms);
593 list_add_tail(&desc->desc_node, &first->tx_list);
594 }
595 prev = desc;
596 }
597
598 if (flags & DMA_PREP_INTERRUPT)
599 /* Trigger interrupt after last block */
600 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
601
602 prev->lli.llp = 0;
603 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
604 first->txd.flags = flags;
605 first->total_len = len;
606
607 return &first->txd;
608
609err_desc_get:
610 dwc_desc_put(dwc, first);
611 return NULL;
612}
613
614static struct dma_async_tx_descriptor *
615dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
616 unsigned int sg_len, enum dma_transfer_direction direction,
617 unsigned long flags, void *context)
618{
619 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
620 struct dw_dma *dw = to_dw_dma(chan->device);
621 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
622 struct dw_desc *prev;
623 struct dw_desc *first;
624 u32 ctllo, ctlhi;
625 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
626 dma_addr_t reg;
627 unsigned int reg_width;
628 unsigned int mem_width;
629 unsigned int i;
630 struct scatterlist *sg;
631 size_t total_len = 0;
632
633 dev_vdbg(chan2dev(chan), "%s\n", __func__);
634
635 if (unlikely(!is_slave_direction(direction) || !sg_len))
636 return NULL;
637
638 dwc->direction = direction;
639
640 prev = first = NULL;
641
642 switch (direction) {
643 case DMA_MEM_TO_DEV:
644 reg_width = __ffs(sconfig->dst_addr_width);
645 reg = sconfig->dst_addr;
646 ctllo = dw->prepare_ctllo(dwc)
647 | DWC_CTLL_DST_WIDTH(reg_width)
648 | DWC_CTLL_DST_FIX
649 | DWC_CTLL_SRC_INC;
650
651 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
652 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
653
654 for_each_sg(sgl, sg, sg_len, i) {
655 struct dw_desc *desc;
656 u32 len, mem;
657 size_t dlen;
658
659 mem = sg_dma_address(sg);
660 len = sg_dma_len(sg);
661
662 mem_width = __ffs(sconfig->src_addr_width | mem | len);
663
664slave_sg_todev_fill_desc:
665 desc = dwc_desc_get(dwc);
666 if (!desc)
667 goto err_desc_get;
668
669 ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
670
671 lli_write(desc, sar, mem);
672 lli_write(desc, dar, reg);
673 lli_write(desc, ctlhi, ctlhi);
674 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
675 desc->len = dlen;
676
677 if (!first) {
678 first = desc;
679 } else {
680 lli_write(prev, llp, desc->txd.phys | lms);
681 list_add_tail(&desc->desc_node, &first->tx_list);
682 }
683 prev = desc;
684
685 mem += dlen;
686 len -= dlen;
687 total_len += dlen;
688
689 if (len)
690 goto slave_sg_todev_fill_desc;
691 }
692 break;
693 case DMA_DEV_TO_MEM:
694 reg_width = __ffs(sconfig->src_addr_width);
695 reg = sconfig->src_addr;
696 ctllo = dw->prepare_ctllo(dwc)
697 | DWC_CTLL_SRC_WIDTH(reg_width)
698 | DWC_CTLL_DST_INC
699 | DWC_CTLL_SRC_FIX;
700
701 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
702 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
703
704 for_each_sg(sgl, sg, sg_len, i) {
705 struct dw_desc *desc;
706 u32 len, mem;
707 size_t dlen;
708
709 mem = sg_dma_address(sg);
710 len = sg_dma_len(sg);
711
712slave_sg_fromdev_fill_desc:
713 desc = dwc_desc_get(dwc);
714 if (!desc)
715 goto err_desc_get;
716
717 ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
718
719 lli_write(desc, sar, reg);
720 lli_write(desc, dar, mem);
721 lli_write(desc, ctlhi, ctlhi);
722 mem_width = __ffs(sconfig->dst_addr_width | mem);
723 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
724 desc->len = dlen;
725
726 if (!first) {
727 first = desc;
728 } else {
729 lli_write(prev, llp, desc->txd.phys | lms);
730 list_add_tail(&desc->desc_node, &first->tx_list);
731 }
732 prev = desc;
733
734 mem += dlen;
735 len -= dlen;
736 total_len += dlen;
737
738 if (len)
739 goto slave_sg_fromdev_fill_desc;
740 }
741 break;
742 default:
743 return NULL;
744 }
745
746 if (flags & DMA_PREP_INTERRUPT)
747 /* Trigger interrupt after last block */
748 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
749
750 prev->lli.llp = 0;
751 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
752 first->total_len = total_len;
753
754 return &first->txd;
755
756err_desc_get:
757 dev_err(chan2dev(chan),
758 "not enough descriptors available. Direction %d\n", direction);
759 dwc_desc_put(dwc, first);
760 return NULL;
761}
762
763bool dw_dma_filter(struct dma_chan *chan, void *param)
764{
765 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
766 struct dw_dma_slave *dws = param;
767
768 if (dws->dma_dev != chan->device->dev)
769 return false;
770
771 /* permit channels in accordance with the channels mask */
772 if (dws->channels && !(dws->channels & dwc->mask))
773 return false;
774
775 /* We have to copy data since dws can be temporary storage */
776 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
777
778 return true;
779}
780EXPORT_SYMBOL_GPL(dw_dma_filter);
781
782static int dwc_verify_maxburst(struct dma_chan *chan)
783{
784 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
785
786 dwc->dma_sconfig.src_maxburst =
787 clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst);
788 dwc->dma_sconfig.dst_maxburst =
789 clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst);
790
791 dwc->dma_sconfig.src_maxburst =
792 rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst);
793 dwc->dma_sconfig.dst_maxburst =
794 rounddown_pow_of_two(dwc->dma_sconfig.dst_maxburst);
795
796 return 0;
797}
798
799static int dwc_verify_p_buswidth(struct dma_chan *chan)
800{
801 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
802 struct dw_dma *dw = to_dw_dma(chan->device);
803 u32 reg_width, max_width;
804
805 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
806 reg_width = dwc->dma_sconfig.dst_addr_width;
807 else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
808 reg_width = dwc->dma_sconfig.src_addr_width;
809 else /* DMA_MEM_TO_MEM */
810 return 0;
811
812 max_width = dw->pdata->data_width[dwc->dws.p_master];
813
814 /* Fall-back to 1-byte transfer width if undefined */
815 if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
816 reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
817 else if (!is_power_of_2(reg_width) || reg_width > max_width)
818 return -EINVAL;
819 else /* bus width is valid */
820 return 0;
821
822 /* Update undefined addr width value */
823 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
824 dwc->dma_sconfig.dst_addr_width = reg_width;
825 else /* DMA_DEV_TO_MEM */
826 dwc->dma_sconfig.src_addr_width = reg_width;
827
828 return 0;
829}
830
831static int dwc_verify_m_buswidth(struct dma_chan *chan)
832{
833 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
834 struct dw_dma *dw = to_dw_dma(chan->device);
835 u32 reg_width, reg_burst, mem_width;
836
837 mem_width = dw->pdata->data_width[dwc->dws.m_master];
838
839 /*
840 * It's possible to have a data portion locked in the DMA FIFO in case
841 * of the channel suspension. Subsequent channel disabling will cause
842 * that data silent loss. In order to prevent that maintain the src and
843 * dst transfer widths coherency by means of the relation:
844 * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH)
845 * Look for the details in the commit message that brings this change.
846 *
847 * Note the DMA configs utilized in the calculations below must have
848 * been verified to have correct values by this method call.
849 */
850 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) {
851 reg_width = dwc->dma_sconfig.dst_addr_width;
852 if (mem_width < reg_width)
853 return -EINVAL;
854
855 dwc->dma_sconfig.src_addr_width = mem_width;
856 } else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) {
857 reg_width = dwc->dma_sconfig.src_addr_width;
858 reg_burst = dwc->dma_sconfig.src_maxburst;
859
860 dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst);
861 }
862
863 return 0;
864}
865
866static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
867{
868 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
869 int ret;
870
871 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
872
873 ret = dwc_verify_maxburst(chan);
874 if (ret)
875 return ret;
876
877 ret = dwc_verify_p_buswidth(chan);
878 if (ret)
879 return ret;
880
881 ret = dwc_verify_m_buswidth(chan);
882 if (ret)
883 return ret;
884
885 return 0;
886}
887
888static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
889{
890 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
891 unsigned int count = 20; /* timeout iterations */
892
893 dw->suspend_chan(dwc, drain);
894
895 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
896 udelay(2);
897
898 set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
899}
900
901static int dwc_pause(struct dma_chan *chan)
902{
903 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
904 unsigned long flags;
905
906 spin_lock_irqsave(&dwc->lock, flags);
907 dwc_chan_pause(dwc, false);
908 spin_unlock_irqrestore(&dwc->lock, flags);
909
910 return 0;
911}
912
913static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
914{
915 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
916
917 dw->resume_chan(dwc, drain);
918
919 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
920}
921
922static int dwc_resume(struct dma_chan *chan)
923{
924 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
925 unsigned long flags;
926
927 spin_lock_irqsave(&dwc->lock, flags);
928
929 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
930 dwc_chan_resume(dwc, false);
931
932 spin_unlock_irqrestore(&dwc->lock, flags);
933
934 return 0;
935}
936
937static int dwc_terminate_all(struct dma_chan *chan)
938{
939 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
940 struct dw_dma *dw = to_dw_dma(chan->device);
941 struct dw_desc *desc, *_desc;
942 unsigned long flags;
943 LIST_HEAD(list);
944
945 spin_lock_irqsave(&dwc->lock, flags);
946
947 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
948
949 dwc_chan_pause(dwc, true);
950
951 dwc_chan_disable(dw, dwc);
952
953 dwc_chan_resume(dwc, true);
954
955 /* active_list entries will end up before queued entries */
956 list_splice_init(&dwc->queue, &list);
957 list_splice_init(&dwc->active_list, &list);
958
959 spin_unlock_irqrestore(&dwc->lock, flags);
960
961 /* Flush all pending and queued descriptors */
962 list_for_each_entry_safe(desc, _desc, &list, desc_node)
963 dwc_descriptor_complete(dwc, desc, false);
964
965 return 0;
966}
967
968static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
969{
970 struct dw_desc *desc;
971
972 list_for_each_entry(desc, &dwc->active_list, desc_node)
973 if (desc->txd.cookie == c)
974 return desc;
975
976 return NULL;
977}
978
979static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie,
980 enum dma_status *status)
981{
982 struct dw_desc *desc;
983 unsigned long flags;
984 u32 residue;
985
986 spin_lock_irqsave(&dwc->lock, flags);
987
988 desc = dwc_find_desc(dwc, cookie);
989 if (desc) {
990 if (desc == dwc_first_active(dwc)) {
991 residue = desc->residue;
992 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
993 residue -= dwc_get_sent(dwc);
994 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
995 *status = DMA_PAUSED;
996 } else {
997 residue = desc->total_len;
998 }
999 } else {
1000 residue = 0;
1001 }
1002
1003 spin_unlock_irqrestore(&dwc->lock, flags);
1004 return residue;
1005}
1006
1007static enum dma_status
1008dwc_tx_status(struct dma_chan *chan,
1009 dma_cookie_t cookie,
1010 struct dma_tx_state *txstate)
1011{
1012 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1013 enum dma_status ret;
1014
1015 ret = dma_cookie_status(chan, cookie, txstate);
1016 if (ret == DMA_COMPLETE)
1017 return ret;
1018
1019 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1020
1021 ret = dma_cookie_status(chan, cookie, txstate);
1022 if (ret == DMA_COMPLETE)
1023 return ret;
1024
1025 dma_set_residue(txstate, dwc_get_residue_and_status(dwc, cookie, &ret));
1026 return ret;
1027}
1028
1029static void dwc_issue_pending(struct dma_chan *chan)
1030{
1031 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1032 unsigned long flags;
1033
1034 spin_lock_irqsave(&dwc->lock, flags);
1035 if (list_empty(&dwc->active_list))
1036 dwc_dostart_first_queued(dwc);
1037 spin_unlock_irqrestore(&dwc->lock, flags);
1038}
1039
1040/*----------------------------------------------------------------------*/
1041
1042void do_dw_dma_off(struct dw_dma *dw)
1043{
1044 dma_writel(dw, CFG, 0);
1045
1046 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1047 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1048 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1049 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1050 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1051
1052 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1053 cpu_relax();
1054}
1055
1056void do_dw_dma_on(struct dw_dma *dw)
1057{
1058 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1059}
1060
1061static int dwc_alloc_chan_resources(struct dma_chan *chan)
1062{
1063 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1064 struct dw_dma *dw = to_dw_dma(chan->device);
1065
1066 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1067
1068 /* ASSERT: channel is idle */
1069 if (dma_readl(dw, CH_EN) & dwc->mask) {
1070 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1071 return -EIO;
1072 }
1073
1074 dma_cookie_init(chan);
1075
1076 /*
1077 * NOTE: some controllers may have additional features that we
1078 * need to initialize here, like "scatter-gather" (which
1079 * doesn't mean what you think it means), and status writeback.
1080 */
1081
1082 /*
1083 * We need controller-specific data to set up slave transfers.
1084 */
1085 if (chan->private && !dw_dma_filter(chan, chan->private)) {
1086 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1087 return -EINVAL;
1088 }
1089
1090 /* Enable controller here if needed */
1091 if (!dw->in_use)
1092 do_dw_dma_on(dw);
1093 dw->in_use |= dwc->mask;
1094
1095 return 0;
1096}
1097
1098static void dwc_free_chan_resources(struct dma_chan *chan)
1099{
1100 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1101 struct dw_dma *dw = to_dw_dma(chan->device);
1102 unsigned long flags;
1103
1104 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1105 dwc->descs_allocated);
1106
1107 /* ASSERT: channel is idle */
1108 BUG_ON(!list_empty(&dwc->active_list));
1109 BUG_ON(!list_empty(&dwc->queue));
1110 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1111
1112 spin_lock_irqsave(&dwc->lock, flags);
1113
1114 /* Clear custom channel configuration */
1115 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1116
1117 /* Disable interrupts */
1118 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1119 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1120 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1121
1122 spin_unlock_irqrestore(&dwc->lock, flags);
1123
1124 /* Disable controller in case it was a last user */
1125 dw->in_use &= ~dwc->mask;
1126 if (!dw->in_use)
1127 do_dw_dma_off(dw);
1128
1129 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1130}
1131
1132static void dwc_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
1133{
1134 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1135
1136 caps->max_burst = dwc->max_burst;
1137
1138 /*
1139 * It might be crucial for some devices to have the hardware
1140 * accelerated multi-block transfers supported, aka LLPs in DW DMAC
1141 * notation. So if LLPs are supported then max_sg_burst is set to
1142 * zero which means unlimited number of SG entries can be handled in a
1143 * single DMA transaction, otherwise it's just one SG entry.
1144 */
1145 if (dwc->nollp)
1146 caps->max_sg_burst = 1;
1147 else
1148 caps->max_sg_burst = 0;
1149}
1150
1151int do_dma_probe(struct dw_dma_chip *chip)
1152{
1153 struct dw_dma *dw = chip->dw;
1154 struct dw_dma_platform_data *pdata;
1155 bool autocfg = false;
1156 unsigned int dw_params;
1157 unsigned int i;
1158 int ret;
1159
1160 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1161 if (!dw->pdata)
1162 return -ENOMEM;
1163
1164 dw->regs = chip->regs;
1165
1166 pm_runtime_get_sync(chip->dev);
1167
1168 if (!chip->pdata) {
1169 dw_params = dma_readl(dw, DW_PARAMS);
1170 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1171
1172 autocfg = dw_params >> DW_PARAMS_EN & 1;
1173 if (!autocfg) {
1174 ret = -EINVAL;
1175 goto err_pdata;
1176 }
1177
1178 /* Reassign the platform data pointer */
1179 pdata = dw->pdata;
1180
1181 /* Get hardware configuration parameters */
1182 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1183 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1184 for (i = 0; i < pdata->nr_masters; i++) {
1185 pdata->data_width[i] =
1186 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1187 }
1188 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1189
1190 /* Fill platform data with the default values */
1191 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1192 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1193 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1194 ret = -EINVAL;
1195 goto err_pdata;
1196 } else {
1197 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1198
1199 /* Reassign the platform data pointer */
1200 pdata = dw->pdata;
1201 }
1202
1203 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1204 GFP_KERNEL);
1205 if (!dw->chan) {
1206 ret = -ENOMEM;
1207 goto err_pdata;
1208 }
1209
1210 /* Calculate all channel mask before DMA setup */
1211 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1212
1213 /* Force dma off, just in case */
1214 dw->disable(dw);
1215
1216 /* Device and instance ID for IRQ and DMA pool */
1217 dw->set_device_name(dw, chip->id);
1218
1219 /* Create a pool of consistent memory blocks for hardware descriptors */
1220 dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
1221 sizeof(struct dw_desc), 4, 0);
1222 if (!dw->desc_pool) {
1223 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1224 ret = -ENOMEM;
1225 goto err_pdata;
1226 }
1227
1228 tasklet_setup(&dw->tasklet, dw_dma_tasklet);
1229
1230 ret = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1231 dw->name, dw);
1232 if (ret)
1233 goto err_pdata;
1234
1235 INIT_LIST_HEAD(&dw->dma.channels);
1236 for (i = 0; i < pdata->nr_channels; i++) {
1237 struct dw_dma_chan *dwc = &dw->chan[i];
1238
1239 dwc->chan.device = &dw->dma;
1240 dma_cookie_init(&dwc->chan);
1241 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1242 list_add_tail(&dwc->chan.device_node,
1243 &dw->dma.channels);
1244 else
1245 list_add(&dwc->chan.device_node, &dw->dma.channels);
1246
1247 /* 7 is highest priority & 0 is lowest. */
1248 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1249 dwc->priority = pdata->nr_channels - i - 1;
1250 else
1251 dwc->priority = i;
1252
1253 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1254 spin_lock_init(&dwc->lock);
1255 dwc->mask = 1 << i;
1256
1257 INIT_LIST_HEAD(&dwc->active_list);
1258 INIT_LIST_HEAD(&dwc->queue);
1259
1260 channel_clear_bit(dw, CH_EN, dwc->mask);
1261
1262 dwc->direction = DMA_TRANS_NONE;
1263
1264 /* Hardware configuration */
1265 if (autocfg) {
1266 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1267 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1268 unsigned int dwc_params = readl(addr);
1269
1270 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1271 dwc_params);
1272
1273 /*
1274 * Decode maximum block size for given channel. The
1275 * stored 4 bit value represents blocks from 0x00 for 3
1276 * up to 0x0a for 4095.
1277 */
1278 dwc->block_size =
1279 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1280
1281 /*
1282 * According to the DW DMA databook the true scatter-
1283 * gether LLPs aren't available if either multi-block
1284 * config is disabled (CHx_MULTI_BLK_EN == 0) or the
1285 * LLP register is hard-coded to zeros
1286 * (CHx_HC_LLP == 1).
1287 */
1288 dwc->nollp =
1289 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 ||
1290 (dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1;
1291 dwc->max_burst =
1292 (0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7));
1293 } else {
1294 dwc->block_size = pdata->block_size;
1295 dwc->nollp = !pdata->multi_block[i];
1296 dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST;
1297 }
1298 }
1299
1300 /* Clear all interrupts on all channels. */
1301 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1302 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1303 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1304 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1305 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1306
1307 /* Set capabilities */
1308 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1309 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1310 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1311
1312 dw->dma.dev = chip->dev;
1313 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1314 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1315
1316 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1317 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1318
1319 dw->dma.device_caps = dwc_caps;
1320 dw->dma.device_config = dwc_config;
1321 dw->dma.device_pause = dwc_pause;
1322 dw->dma.device_resume = dwc_resume;
1323 dw->dma.device_terminate_all = dwc_terminate_all;
1324
1325 dw->dma.device_tx_status = dwc_tx_status;
1326 dw->dma.device_issue_pending = dwc_issue_pending;
1327
1328 /* DMA capabilities */
1329 dw->dma.min_burst = DW_DMA_MIN_BURST;
1330 dw->dma.max_burst = DW_DMA_MAX_BURST;
1331 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1332 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1333 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1334 BIT(DMA_MEM_TO_MEM);
1335 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1336
1337 /*
1338 * For now there is no hardware with non uniform maximum block size
1339 * across all of the device channels, so we set the maximum segment
1340 * size as the block size found for the very first channel.
1341 */
1342 dma_set_max_seg_size(dw->dma.dev, dw->chan[0].block_size);
1343
1344 ret = dma_async_device_register(&dw->dma);
1345 if (ret)
1346 goto err_dma_register;
1347
1348 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1349 pdata->nr_channels);
1350
1351 pm_runtime_put_sync_suspend(chip->dev);
1352
1353 return 0;
1354
1355err_dma_register:
1356 free_irq(chip->irq, dw);
1357err_pdata:
1358 pm_runtime_put_sync_suspend(chip->dev);
1359 return ret;
1360}
1361
1362int do_dma_remove(struct dw_dma_chip *chip)
1363{
1364 struct dw_dma *dw = chip->dw;
1365 struct dw_dma_chan *dwc, *_dwc;
1366
1367 pm_runtime_get_sync(chip->dev);
1368
1369 do_dw_dma_off(dw);
1370 dma_async_device_unregister(&dw->dma);
1371
1372 free_irq(chip->irq, dw);
1373 tasklet_kill(&dw->tasklet);
1374
1375 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1376 chan.device_node) {
1377 list_del(&dwc->chan.device_node);
1378 channel_clear_bit(dw, CH_EN, dwc->mask);
1379 }
1380
1381 pm_runtime_put_sync_suspend(chip->dev);
1382 return 0;
1383}
1384
1385int do_dw_dma_disable(struct dw_dma_chip *chip)
1386{
1387 struct dw_dma *dw = chip->dw;
1388
1389 dw->disable(dw);
1390 return 0;
1391}
1392EXPORT_SYMBOL_GPL(do_dw_dma_disable);
1393
1394int do_dw_dma_enable(struct dw_dma_chip *chip)
1395{
1396 struct dw_dma *dw = chip->dw;
1397
1398 dw->enable(dw);
1399 return 0;
1400}
1401EXPORT_SYMBOL_GPL(do_dw_dma_enable);
1402
1403MODULE_LICENSE("GPL v2");
1404MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1405MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1406MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");