Loading...
1/*
2 * IMG Multi-threaded DMA Controller (MDC)
3 *
4 * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
5 * Copyright (C) 2014 Google, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 */
11
12#include <linux/clk.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/dmapool.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/irq.h>
19#include <linux/kernel.h>
20#include <linux/mfd/syscon.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_dma.h>
25#include <linux/platform_device.h>
26#include <linux/regmap.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29
30#include "dmaengine.h"
31#include "virt-dma.h"
32
33#define MDC_MAX_DMA_CHANNELS 32
34
35#define MDC_GENERAL_CONFIG 0x000
36#define MDC_GENERAL_CONFIG_LIST_IEN BIT(31)
37#define MDC_GENERAL_CONFIG_IEN BIT(29)
38#define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28)
39#define MDC_GENERAL_CONFIG_INC_W BIT(12)
40#define MDC_GENERAL_CONFIG_INC_R BIT(8)
41#define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7)
42#define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4
43#define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7
44#define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3)
45#define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0
46#define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7
47
48#define MDC_READ_PORT_CONFIG 0x004
49#define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28
50#define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf
51#define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24
52#define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf
53#define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16
54#define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf
55#define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4
56#define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff
57#define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1)
58
59#define MDC_READ_ADDRESS 0x008
60
61#define MDC_WRITE_ADDRESS 0x00c
62
63#define MDC_TRANSFER_SIZE 0x010
64#define MDC_TRANSFER_SIZE_MASK 0xffffff
65
66#define MDC_LIST_NODE_ADDRESS 0x014
67
68#define MDC_CMDS_PROCESSED 0x018
69#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16
70#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f
71#define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8)
72#define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0
73#define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f
74
75#define MDC_CONTROL_AND_STATUS 0x01c
76#define MDC_CONTROL_AND_STATUS_CANCEL BIT(20)
77#define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4)
78#define MDC_CONTROL_AND_STATUS_EN BIT(0)
79
80#define MDC_ACTIVE_TRANSFER_SIZE 0x030
81
82#define MDC_GLOBAL_CONFIG_A 0x900
83#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16
84#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff
85#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8
86#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff
87#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0
88#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff
89
90struct mdc_hw_list_desc {
91 u32 gen_conf;
92 u32 readport_conf;
93 u32 read_addr;
94 u32 write_addr;
95 u32 xfer_size;
96 u32 node_addr;
97 u32 cmds_done;
98 u32 ctrl_status;
99 /*
100 * Not part of the list descriptor, but instead used by the CPU to
101 * traverse the list.
102 */
103 struct mdc_hw_list_desc *next_desc;
104};
105
106struct mdc_tx_desc {
107 struct mdc_chan *chan;
108 struct virt_dma_desc vd;
109 dma_addr_t list_phys;
110 struct mdc_hw_list_desc *list;
111 bool cyclic;
112 bool cmd_loaded;
113 unsigned int list_len;
114 unsigned int list_period_len;
115 size_t list_xfer_size;
116 unsigned int list_cmds_done;
117};
118
119struct mdc_chan {
120 struct mdc_dma *mdma;
121 struct virt_dma_chan vc;
122 struct dma_slave_config config;
123 struct mdc_tx_desc *desc;
124 int irq;
125 unsigned int periph;
126 unsigned int thread;
127 unsigned int chan_nr;
128};
129
130struct mdc_dma_soc_data {
131 void (*enable_chan)(struct mdc_chan *mchan);
132 void (*disable_chan)(struct mdc_chan *mchan);
133};
134
135struct mdc_dma {
136 struct dma_device dma_dev;
137 void __iomem *regs;
138 struct clk *clk;
139 struct dma_pool *desc_pool;
140 struct regmap *periph_regs;
141 spinlock_t lock;
142 unsigned int nr_threads;
143 unsigned int nr_channels;
144 unsigned int bus_width;
145 unsigned int max_burst_mult;
146 unsigned int max_xfer_size;
147 const struct mdc_dma_soc_data *soc;
148 struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
149};
150
151static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
152{
153 return readl(mdma->regs + reg);
154}
155
156static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
157{
158 writel(val, mdma->regs + reg);
159}
160
161static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
162{
163 return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
164}
165
166static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
167{
168 mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
169}
170
171static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
172{
173 return container_of(to_virt_chan(c), struct mdc_chan, vc);
174}
175
176static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
177{
178 struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
179
180 return container_of(vdesc, struct mdc_tx_desc, vd);
181}
182
183static inline struct device *mdma2dev(struct mdc_dma *mdma)
184{
185 return mdma->dma_dev.dev;
186}
187
188static inline unsigned int to_mdc_width(unsigned int bytes)
189{
190 return ffs(bytes) - 1;
191}
192
193static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
194 unsigned int bytes)
195{
196 ldesc->gen_conf |= to_mdc_width(bytes) <<
197 MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
198}
199
200static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
201 unsigned int bytes)
202{
203 ldesc->gen_conf |= to_mdc_width(bytes) <<
204 MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
205}
206
207static void mdc_list_desc_config(struct mdc_chan *mchan,
208 struct mdc_hw_list_desc *ldesc,
209 enum dma_transfer_direction dir,
210 dma_addr_t src, dma_addr_t dst, size_t len)
211{
212 struct mdc_dma *mdma = mchan->mdma;
213 unsigned int max_burst, burst_size;
214
215 ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
216 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
217 MDC_GENERAL_CONFIG_PHYSICAL_R;
218 ldesc->readport_conf =
219 (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
220 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
221 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
222 ldesc->read_addr = src;
223 ldesc->write_addr = dst;
224 ldesc->xfer_size = len - 1;
225 ldesc->node_addr = 0;
226 ldesc->cmds_done = 0;
227 ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
228 MDC_CONTROL_AND_STATUS_EN;
229 ldesc->next_desc = NULL;
230
231 if (IS_ALIGNED(dst, mdma->bus_width) &&
232 IS_ALIGNED(src, mdma->bus_width))
233 max_burst = mdma->bus_width * mdma->max_burst_mult;
234 else
235 max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
236
237 if (dir == DMA_MEM_TO_DEV) {
238 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
239 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
240 mdc_set_read_width(ldesc, mdma->bus_width);
241 mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
242 burst_size = min(max_burst, mchan->config.dst_maxburst *
243 mchan->config.dst_addr_width);
244 } else if (dir == DMA_DEV_TO_MEM) {
245 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
246 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
247 mdc_set_read_width(ldesc, mchan->config.src_addr_width);
248 mdc_set_write_width(ldesc, mdma->bus_width);
249 burst_size = min(max_burst, mchan->config.src_maxburst *
250 mchan->config.src_addr_width);
251 } else {
252 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
253 MDC_GENERAL_CONFIG_INC_W;
254 mdc_set_read_width(ldesc, mdma->bus_width);
255 mdc_set_write_width(ldesc, mdma->bus_width);
256 burst_size = max_burst;
257 }
258 ldesc->readport_conf |= (burst_size - 1) <<
259 MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
260}
261
262static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
263{
264 struct mdc_dma *mdma = mdesc->chan->mdma;
265 struct mdc_hw_list_desc *curr, *next;
266 dma_addr_t curr_phys, next_phys;
267
268 curr = mdesc->list;
269 curr_phys = mdesc->list_phys;
270 while (curr) {
271 next = curr->next_desc;
272 next_phys = curr->node_addr;
273 dma_pool_free(mdma->desc_pool, curr, curr_phys);
274 curr = next;
275 curr_phys = next_phys;
276 }
277}
278
279static void mdc_desc_free(struct virt_dma_desc *vd)
280{
281 struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
282
283 mdc_list_desc_free(mdesc);
284 kfree(mdesc);
285}
286
287static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
288 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
289 unsigned long flags)
290{
291 struct mdc_chan *mchan = to_mdc_chan(chan);
292 struct mdc_dma *mdma = mchan->mdma;
293 struct mdc_tx_desc *mdesc;
294 struct mdc_hw_list_desc *curr, *prev = NULL;
295 dma_addr_t curr_phys, prev_phys;
296
297 if (!len)
298 return NULL;
299
300 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
301 if (!mdesc)
302 return NULL;
303 mdesc->chan = mchan;
304 mdesc->list_xfer_size = len;
305
306 while (len > 0) {
307 size_t xfer_size;
308
309 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
310 if (!curr)
311 goto free_desc;
312
313 if (prev) {
314 prev->node_addr = curr_phys;
315 prev->next_desc = curr;
316 } else {
317 mdesc->list_phys = curr_phys;
318 mdesc->list = curr;
319 }
320
321 xfer_size = min_t(size_t, mdma->max_xfer_size, len);
322
323 mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
324 xfer_size);
325
326 prev = curr;
327 prev_phys = curr_phys;
328
329 mdesc->list_len++;
330 src += xfer_size;
331 dest += xfer_size;
332 len -= xfer_size;
333 }
334
335 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
336
337free_desc:
338 mdc_desc_free(&mdesc->vd);
339
340 return NULL;
341}
342
343static int mdc_check_slave_width(struct mdc_chan *mchan,
344 enum dma_transfer_direction dir)
345{
346 enum dma_slave_buswidth width;
347
348 if (dir == DMA_MEM_TO_DEV)
349 width = mchan->config.dst_addr_width;
350 else
351 width = mchan->config.src_addr_width;
352
353 switch (width) {
354 case DMA_SLAVE_BUSWIDTH_1_BYTE:
355 case DMA_SLAVE_BUSWIDTH_2_BYTES:
356 case DMA_SLAVE_BUSWIDTH_4_BYTES:
357 case DMA_SLAVE_BUSWIDTH_8_BYTES:
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 if (width > mchan->mdma->bus_width)
364 return -EINVAL;
365
366 return 0;
367}
368
369static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
370 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
371 size_t period_len, enum dma_transfer_direction dir,
372 unsigned long flags)
373{
374 struct mdc_chan *mchan = to_mdc_chan(chan);
375 struct mdc_dma *mdma = mchan->mdma;
376 struct mdc_tx_desc *mdesc;
377 struct mdc_hw_list_desc *curr, *prev = NULL;
378 dma_addr_t curr_phys, prev_phys;
379
380 if (!buf_len && !period_len)
381 return NULL;
382
383 if (!is_slave_direction(dir))
384 return NULL;
385
386 if (mdc_check_slave_width(mchan, dir) < 0)
387 return NULL;
388
389 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
390 if (!mdesc)
391 return NULL;
392 mdesc->chan = mchan;
393 mdesc->cyclic = true;
394 mdesc->list_xfer_size = buf_len;
395 mdesc->list_period_len = DIV_ROUND_UP(period_len,
396 mdma->max_xfer_size);
397
398 while (buf_len > 0) {
399 size_t remainder = min(period_len, buf_len);
400
401 while (remainder > 0) {
402 size_t xfer_size;
403
404 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
405 &curr_phys);
406 if (!curr)
407 goto free_desc;
408
409 if (!prev) {
410 mdesc->list_phys = curr_phys;
411 mdesc->list = curr;
412 } else {
413 prev->node_addr = curr_phys;
414 prev->next_desc = curr;
415 }
416
417 xfer_size = min_t(size_t, mdma->max_xfer_size,
418 remainder);
419
420 if (dir == DMA_MEM_TO_DEV) {
421 mdc_list_desc_config(mchan, curr, dir,
422 buf_addr,
423 mchan->config.dst_addr,
424 xfer_size);
425 } else {
426 mdc_list_desc_config(mchan, curr, dir,
427 mchan->config.src_addr,
428 buf_addr,
429 xfer_size);
430 }
431
432 prev = curr;
433 prev_phys = curr_phys;
434
435 mdesc->list_len++;
436 buf_addr += xfer_size;
437 buf_len -= xfer_size;
438 remainder -= xfer_size;
439 }
440 }
441 prev->node_addr = mdesc->list_phys;
442
443 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
444
445free_desc:
446 mdc_desc_free(&mdesc->vd);
447
448 return NULL;
449}
450
451static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
452 struct dma_chan *chan, struct scatterlist *sgl,
453 unsigned int sg_len, enum dma_transfer_direction dir,
454 unsigned long flags, void *context)
455{
456 struct mdc_chan *mchan = to_mdc_chan(chan);
457 struct mdc_dma *mdma = mchan->mdma;
458 struct mdc_tx_desc *mdesc;
459 struct scatterlist *sg;
460 struct mdc_hw_list_desc *curr, *prev = NULL;
461 dma_addr_t curr_phys, prev_phys;
462 unsigned int i;
463
464 if (!sgl)
465 return NULL;
466
467 if (!is_slave_direction(dir))
468 return NULL;
469
470 if (mdc_check_slave_width(mchan, dir) < 0)
471 return NULL;
472
473 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
474 if (!mdesc)
475 return NULL;
476 mdesc->chan = mchan;
477
478 for_each_sg(sgl, sg, sg_len, i) {
479 dma_addr_t buf = sg_dma_address(sg);
480 size_t buf_len = sg_dma_len(sg);
481
482 while (buf_len > 0) {
483 size_t xfer_size;
484
485 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
486 &curr_phys);
487 if (!curr)
488 goto free_desc;
489
490 if (!prev) {
491 mdesc->list_phys = curr_phys;
492 mdesc->list = curr;
493 } else {
494 prev->node_addr = curr_phys;
495 prev->next_desc = curr;
496 }
497
498 xfer_size = min_t(size_t, mdma->max_xfer_size,
499 buf_len);
500
501 if (dir == DMA_MEM_TO_DEV) {
502 mdc_list_desc_config(mchan, curr, dir, buf,
503 mchan->config.dst_addr,
504 xfer_size);
505 } else {
506 mdc_list_desc_config(mchan, curr, dir,
507 mchan->config.src_addr,
508 buf, xfer_size);
509 }
510
511 prev = curr;
512 prev_phys = curr_phys;
513
514 mdesc->list_len++;
515 mdesc->list_xfer_size += xfer_size;
516 buf += xfer_size;
517 buf_len -= xfer_size;
518 }
519 }
520
521 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
522
523free_desc:
524 mdc_desc_free(&mdesc->vd);
525
526 return NULL;
527}
528
529static void mdc_issue_desc(struct mdc_chan *mchan)
530{
531 struct mdc_dma *mdma = mchan->mdma;
532 struct virt_dma_desc *vd;
533 struct mdc_tx_desc *mdesc;
534 u32 val;
535
536 vd = vchan_next_desc(&mchan->vc);
537 if (!vd)
538 return;
539
540 list_del(&vd->node);
541
542 mdesc = to_mdc_desc(&vd->tx);
543 mchan->desc = mdesc;
544
545 dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
546 mchan->chan_nr);
547
548 mdma->soc->enable_chan(mchan);
549
550 val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
551 val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
552 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
553 MDC_GENERAL_CONFIG_PHYSICAL_R;
554 mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
555 val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
556 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
557 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
558 mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
559 mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
560 val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
561 val |= MDC_CONTROL_AND_STATUS_LIST_EN;
562 mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
563}
564
565static void mdc_issue_pending(struct dma_chan *chan)
566{
567 struct mdc_chan *mchan = to_mdc_chan(chan);
568 unsigned long flags;
569
570 spin_lock_irqsave(&mchan->vc.lock, flags);
571 if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
572 mdc_issue_desc(mchan);
573 spin_unlock_irqrestore(&mchan->vc.lock, flags);
574}
575
576static enum dma_status mdc_tx_status(struct dma_chan *chan,
577 dma_cookie_t cookie, struct dma_tx_state *txstate)
578{
579 struct mdc_chan *mchan = to_mdc_chan(chan);
580 struct mdc_tx_desc *mdesc;
581 struct virt_dma_desc *vd;
582 unsigned long flags;
583 size_t bytes = 0;
584 int ret;
585
586 ret = dma_cookie_status(chan, cookie, txstate);
587 if (ret == DMA_COMPLETE)
588 return ret;
589
590 if (!txstate)
591 return ret;
592
593 spin_lock_irqsave(&mchan->vc.lock, flags);
594 vd = vchan_find_desc(&mchan->vc, cookie);
595 if (vd) {
596 mdesc = to_mdc_desc(&vd->tx);
597 bytes = mdesc->list_xfer_size;
598 } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
599 struct mdc_hw_list_desc *ldesc;
600 u32 val1, val2, done, processed, residue;
601 int i, cmds;
602
603 mdesc = mchan->desc;
604
605 /*
606 * Determine the number of commands that haven't been
607 * processed (handled by the IRQ handler) yet.
608 */
609 do {
610 val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
611 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
612 residue = mdc_chan_readl(mchan,
613 MDC_ACTIVE_TRANSFER_SIZE);
614 val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
615 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
616 } while (val1 != val2);
617
618 done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
619 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
620 processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
621 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
622 cmds = (done - processed) %
623 (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
624
625 /*
626 * If the command loaded event hasn't been processed yet, then
627 * the difference above includes an extra command.
628 */
629 if (!mdesc->cmd_loaded)
630 cmds--;
631 else
632 cmds += mdesc->list_cmds_done;
633
634 bytes = mdesc->list_xfer_size;
635 ldesc = mdesc->list;
636 for (i = 0; i < cmds; i++) {
637 bytes -= ldesc->xfer_size + 1;
638 ldesc = ldesc->next_desc;
639 }
640 if (ldesc) {
641 if (residue != MDC_TRANSFER_SIZE_MASK)
642 bytes -= ldesc->xfer_size - residue;
643 else
644 bytes -= ldesc->xfer_size + 1;
645 }
646 }
647 spin_unlock_irqrestore(&mchan->vc.lock, flags);
648
649 dma_set_residue(txstate, bytes);
650
651 return ret;
652}
653
654static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
655{
656 u32 val, processed, done1, done2;
657 unsigned int ret;
658
659 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
660 processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
661 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
662 /*
663 * CMDS_DONE may have incremented between reading CMDS_PROCESSED
664 * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
665 * didn't miss a command completion.
666 */
667 do {
668 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
669
670 done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
671 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
672
673 val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
674 MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
675 MDC_CMDS_PROCESSED_INT_ACTIVE);
676
677 val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
678
679 mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
680
681 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
682
683 done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
684 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
685 } while (done1 != done2);
686
687 if (done1 >= processed)
688 ret = done1 - processed;
689 else
690 ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) -
691 processed) + done1;
692
693 return ret;
694}
695
696static int mdc_terminate_all(struct dma_chan *chan)
697{
698 struct mdc_chan *mchan = to_mdc_chan(chan);
699 struct mdc_tx_desc *mdesc;
700 unsigned long flags;
701 LIST_HEAD(head);
702
703 spin_lock_irqsave(&mchan->vc.lock, flags);
704
705 mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
706 MDC_CONTROL_AND_STATUS);
707
708 mdesc = mchan->desc;
709 mchan->desc = NULL;
710 vchan_get_all_descriptors(&mchan->vc, &head);
711
712 mdc_get_new_events(mchan);
713
714 spin_unlock_irqrestore(&mchan->vc.lock, flags);
715
716 if (mdesc)
717 mdc_desc_free(&mdesc->vd);
718 vchan_dma_desc_free_list(&mchan->vc, &head);
719
720 return 0;
721}
722
723static int mdc_slave_config(struct dma_chan *chan,
724 struct dma_slave_config *config)
725{
726 struct mdc_chan *mchan = to_mdc_chan(chan);
727 unsigned long flags;
728
729 spin_lock_irqsave(&mchan->vc.lock, flags);
730 mchan->config = *config;
731 spin_unlock_irqrestore(&mchan->vc.lock, flags);
732
733 return 0;
734}
735
736static void mdc_free_chan_resources(struct dma_chan *chan)
737{
738 struct mdc_chan *mchan = to_mdc_chan(chan);
739 struct mdc_dma *mdma = mchan->mdma;
740
741 mdc_terminate_all(chan);
742
743 mdma->soc->disable_chan(mchan);
744}
745
746static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
747{
748 struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
749 struct mdc_tx_desc *mdesc;
750 unsigned int i, new_events;
751
752 spin_lock(&mchan->vc.lock);
753
754 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
755
756 new_events = mdc_get_new_events(mchan);
757
758 if (!new_events)
759 goto out;
760
761 mdesc = mchan->desc;
762 if (!mdesc) {
763 dev_warn(mdma2dev(mchan->mdma),
764 "IRQ with no active descriptor on channel %d\n",
765 mchan->chan_nr);
766 goto out;
767 }
768
769 for (i = 0; i < new_events; i++) {
770 /*
771 * The first interrupt in a transfer indicates that the
772 * command list has been loaded, not that a command has
773 * been completed.
774 */
775 if (!mdesc->cmd_loaded) {
776 mdesc->cmd_loaded = true;
777 continue;
778 }
779
780 mdesc->list_cmds_done++;
781 if (mdesc->cyclic) {
782 mdesc->list_cmds_done %= mdesc->list_len;
783 if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
784 vchan_cyclic_callback(&mdesc->vd);
785 } else if (mdesc->list_cmds_done == mdesc->list_len) {
786 mchan->desc = NULL;
787 vchan_cookie_complete(&mdesc->vd);
788 mdc_issue_desc(mchan);
789 break;
790 }
791 }
792out:
793 spin_unlock(&mchan->vc.lock);
794
795 return IRQ_HANDLED;
796}
797
798static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
799 struct of_dma *ofdma)
800{
801 struct mdc_dma *mdma = ofdma->of_dma_data;
802 struct dma_chan *chan;
803
804 if (dma_spec->args_count != 3)
805 return NULL;
806
807 list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
808 struct mdc_chan *mchan = to_mdc_chan(chan);
809
810 if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
811 continue;
812 if (dma_get_slave_channel(chan)) {
813 mchan->periph = dma_spec->args[0];
814 mchan->thread = dma_spec->args[2];
815 return chan;
816 }
817 }
818
819 return NULL;
820}
821
822#define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4))
823#define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
824#define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f
825
826static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
827{
828 struct mdc_dma *mdma = mchan->mdma;
829
830 regmap_update_bits(mdma->periph_regs,
831 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
832 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
833 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
834 mchan->periph <<
835 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
836}
837
838static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
839{
840 struct mdc_dma *mdma = mchan->mdma;
841
842 regmap_update_bits(mdma->periph_regs,
843 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
844 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
845 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
846 0);
847}
848
849static const struct mdc_dma_soc_data pistachio_mdc_data = {
850 .enable_chan = pistachio_mdc_enable_chan,
851 .disable_chan = pistachio_mdc_disable_chan,
852};
853
854static const struct of_device_id mdc_dma_of_match[] = {
855 { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
856 { },
857};
858MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
859
860static int mdc_dma_probe(struct platform_device *pdev)
861{
862 struct mdc_dma *mdma;
863 struct resource *res;
864 const struct of_device_id *match;
865 unsigned int i;
866 u32 val;
867 int ret;
868
869 mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
870 if (!mdma)
871 return -ENOMEM;
872 platform_set_drvdata(pdev, mdma);
873
874 match = of_match_device(mdc_dma_of_match, &pdev->dev);
875 mdma->soc = match->data;
876
877 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
878 mdma->regs = devm_ioremap_resource(&pdev->dev, res);
879 if (IS_ERR(mdma->regs))
880 return PTR_ERR(mdma->regs);
881
882 mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
883 "img,cr-periph");
884 if (IS_ERR(mdma->periph_regs))
885 return PTR_ERR(mdma->periph_regs);
886
887 mdma->clk = devm_clk_get(&pdev->dev, "sys");
888 if (IS_ERR(mdma->clk))
889 return PTR_ERR(mdma->clk);
890
891 ret = clk_prepare_enable(mdma->clk);
892 if (ret)
893 return ret;
894
895 dma_cap_zero(mdma->dma_dev.cap_mask);
896 dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
897 dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
898 dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
899 dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
900
901 val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
902 mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
903 MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
904 mdma->nr_threads =
905 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
906 MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
907 mdma->bus_width =
908 (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
909 MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
910 /*
911 * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
912 * are supported, this makes it possible for the value reported in
913 * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
914 * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
915 * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this
916 * ambiguity, restrict transfer sizes to one bus-width less than the
917 * actual maximum.
918 */
919 mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
920
921 of_property_read_u32(pdev->dev.of_node, "dma-channels",
922 &mdma->nr_channels);
923 ret = of_property_read_u32(pdev->dev.of_node,
924 "img,max-burst-multiplier",
925 &mdma->max_burst_mult);
926 if (ret)
927 goto disable_clk;
928
929 mdma->dma_dev.dev = &pdev->dev;
930 mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
931 mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
932 mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
933 mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
934 mdma->dma_dev.device_tx_status = mdc_tx_status;
935 mdma->dma_dev.device_issue_pending = mdc_issue_pending;
936 mdma->dma_dev.device_terminate_all = mdc_terminate_all;
937 mdma->dma_dev.device_config = mdc_slave_config;
938
939 mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
940 mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
941 for (i = 1; i <= mdma->bus_width; i <<= 1) {
942 mdma->dma_dev.src_addr_widths |= BIT(i);
943 mdma->dma_dev.dst_addr_widths |= BIT(i);
944 }
945
946 INIT_LIST_HEAD(&mdma->dma_dev.channels);
947 for (i = 0; i < mdma->nr_channels; i++) {
948 struct mdc_chan *mchan = &mdma->channels[i];
949
950 mchan->mdma = mdma;
951 mchan->chan_nr = i;
952 mchan->irq = platform_get_irq(pdev, i);
953 if (mchan->irq < 0) {
954 ret = mchan->irq;
955 goto disable_clk;
956 }
957 ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
958 IRQ_TYPE_LEVEL_HIGH,
959 dev_name(&pdev->dev), mchan);
960 if (ret < 0)
961 goto disable_clk;
962
963 mchan->vc.desc_free = mdc_desc_free;
964 vchan_init(&mchan->vc, &mdma->dma_dev);
965 }
966
967 mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
968 sizeof(struct mdc_hw_list_desc),
969 4, 0);
970 if (!mdma->desc_pool) {
971 ret = -ENOMEM;
972 goto disable_clk;
973 }
974
975 ret = dma_async_device_register(&mdma->dma_dev);
976 if (ret)
977 goto disable_clk;
978
979 ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
980 if (ret)
981 goto unregister;
982
983 dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
984 mdma->nr_channels, mdma->nr_threads);
985
986 return 0;
987
988unregister:
989 dma_async_device_unregister(&mdma->dma_dev);
990disable_clk:
991 clk_disable_unprepare(mdma->clk);
992 return ret;
993}
994
995static int mdc_dma_remove(struct platform_device *pdev)
996{
997 struct mdc_dma *mdma = platform_get_drvdata(pdev);
998 struct mdc_chan *mchan, *next;
999
1000 of_dma_controller_free(pdev->dev.of_node);
1001 dma_async_device_unregister(&mdma->dma_dev);
1002
1003 list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
1004 vc.chan.device_node) {
1005 list_del(&mchan->vc.chan.device_node);
1006
1007 devm_free_irq(&pdev->dev, mchan->irq, mchan);
1008
1009 tasklet_kill(&mchan->vc.task);
1010 }
1011
1012 clk_disable_unprepare(mdma->clk);
1013
1014 return 0;
1015}
1016
1017static struct platform_driver mdc_dma_driver = {
1018 .driver = {
1019 .name = "img-mdc-dma",
1020 .of_match_table = of_match_ptr(mdc_dma_of_match),
1021 },
1022 .probe = mdc_dma_probe,
1023 .remove = mdc_dma_remove,
1024};
1025module_platform_driver(mdc_dma_driver);
1026
1027MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
1028MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
1029MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * IMG Multi-threaded DMA Controller (MDC)
4 *
5 * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
6 * Copyright (C) 2014 Google, Inc.
7 */
8
9#include <linux/clk.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmaengine.h>
12#include <linux/dmapool.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/irq.h>
16#include <linux/kernel.h>
17#include <linux/mfd/syscon.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/of_dma.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/regmap.h>
25#include <linux/slab.h>
26#include <linux/spinlock.h>
27
28#include "dmaengine.h"
29#include "virt-dma.h"
30
31#define MDC_MAX_DMA_CHANNELS 32
32
33#define MDC_GENERAL_CONFIG 0x000
34#define MDC_GENERAL_CONFIG_LIST_IEN BIT(31)
35#define MDC_GENERAL_CONFIG_IEN BIT(29)
36#define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28)
37#define MDC_GENERAL_CONFIG_INC_W BIT(12)
38#define MDC_GENERAL_CONFIG_INC_R BIT(8)
39#define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7)
40#define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4
41#define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7
42#define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3)
43#define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0
44#define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7
45
46#define MDC_READ_PORT_CONFIG 0x004
47#define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28
48#define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf
49#define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24
50#define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf
51#define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16
52#define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf
53#define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4
54#define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff
55#define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1)
56
57#define MDC_READ_ADDRESS 0x008
58
59#define MDC_WRITE_ADDRESS 0x00c
60
61#define MDC_TRANSFER_SIZE 0x010
62#define MDC_TRANSFER_SIZE_MASK 0xffffff
63
64#define MDC_LIST_NODE_ADDRESS 0x014
65
66#define MDC_CMDS_PROCESSED 0x018
67#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16
68#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f
69#define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8)
70#define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0
71#define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f
72
73#define MDC_CONTROL_AND_STATUS 0x01c
74#define MDC_CONTROL_AND_STATUS_CANCEL BIT(20)
75#define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4)
76#define MDC_CONTROL_AND_STATUS_EN BIT(0)
77
78#define MDC_ACTIVE_TRANSFER_SIZE 0x030
79
80#define MDC_GLOBAL_CONFIG_A 0x900
81#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16
82#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff
83#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8
84#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff
85#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0
86#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff
87
88struct mdc_hw_list_desc {
89 u32 gen_conf;
90 u32 readport_conf;
91 u32 read_addr;
92 u32 write_addr;
93 u32 xfer_size;
94 u32 node_addr;
95 u32 cmds_done;
96 u32 ctrl_status;
97 /*
98 * Not part of the list descriptor, but instead used by the CPU to
99 * traverse the list.
100 */
101 struct mdc_hw_list_desc *next_desc;
102};
103
104struct mdc_tx_desc {
105 struct mdc_chan *chan;
106 struct virt_dma_desc vd;
107 dma_addr_t list_phys;
108 struct mdc_hw_list_desc *list;
109 bool cyclic;
110 bool cmd_loaded;
111 unsigned int list_len;
112 unsigned int list_period_len;
113 size_t list_xfer_size;
114 unsigned int list_cmds_done;
115};
116
117struct mdc_chan {
118 struct mdc_dma *mdma;
119 struct virt_dma_chan vc;
120 struct dma_slave_config config;
121 struct mdc_tx_desc *desc;
122 int irq;
123 unsigned int periph;
124 unsigned int thread;
125 unsigned int chan_nr;
126};
127
128struct mdc_dma_soc_data {
129 void (*enable_chan)(struct mdc_chan *mchan);
130 void (*disable_chan)(struct mdc_chan *mchan);
131};
132
133struct mdc_dma {
134 struct dma_device dma_dev;
135 void __iomem *regs;
136 struct clk *clk;
137 struct dma_pool *desc_pool;
138 struct regmap *periph_regs;
139 spinlock_t lock;
140 unsigned int nr_threads;
141 unsigned int nr_channels;
142 unsigned int bus_width;
143 unsigned int max_burst_mult;
144 unsigned int max_xfer_size;
145 const struct mdc_dma_soc_data *soc;
146 struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
147};
148
149static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
150{
151 return readl(mdma->regs + reg);
152}
153
154static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
155{
156 writel(val, mdma->regs + reg);
157}
158
159static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
160{
161 return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
162}
163
164static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
165{
166 mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
167}
168
169static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
170{
171 return container_of(to_virt_chan(c), struct mdc_chan, vc);
172}
173
174static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
175{
176 struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
177
178 return container_of(vdesc, struct mdc_tx_desc, vd);
179}
180
181static inline struct device *mdma2dev(struct mdc_dma *mdma)
182{
183 return mdma->dma_dev.dev;
184}
185
186static inline unsigned int to_mdc_width(unsigned int bytes)
187{
188 return ffs(bytes) - 1;
189}
190
191static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
192 unsigned int bytes)
193{
194 ldesc->gen_conf |= to_mdc_width(bytes) <<
195 MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
196}
197
198static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
199 unsigned int bytes)
200{
201 ldesc->gen_conf |= to_mdc_width(bytes) <<
202 MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
203}
204
205static void mdc_list_desc_config(struct mdc_chan *mchan,
206 struct mdc_hw_list_desc *ldesc,
207 enum dma_transfer_direction dir,
208 dma_addr_t src, dma_addr_t dst, size_t len)
209{
210 struct mdc_dma *mdma = mchan->mdma;
211 unsigned int max_burst, burst_size;
212
213 ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
214 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
215 MDC_GENERAL_CONFIG_PHYSICAL_R;
216 ldesc->readport_conf =
217 (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
218 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
219 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
220 ldesc->read_addr = src;
221 ldesc->write_addr = dst;
222 ldesc->xfer_size = len - 1;
223 ldesc->node_addr = 0;
224 ldesc->cmds_done = 0;
225 ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
226 MDC_CONTROL_AND_STATUS_EN;
227 ldesc->next_desc = NULL;
228
229 if (IS_ALIGNED(dst, mdma->bus_width) &&
230 IS_ALIGNED(src, mdma->bus_width))
231 max_burst = mdma->bus_width * mdma->max_burst_mult;
232 else
233 max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
234
235 if (dir == DMA_MEM_TO_DEV) {
236 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
237 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
238 mdc_set_read_width(ldesc, mdma->bus_width);
239 mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
240 burst_size = min(max_burst, mchan->config.dst_maxburst *
241 mchan->config.dst_addr_width);
242 } else if (dir == DMA_DEV_TO_MEM) {
243 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
244 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
245 mdc_set_read_width(ldesc, mchan->config.src_addr_width);
246 mdc_set_write_width(ldesc, mdma->bus_width);
247 burst_size = min(max_burst, mchan->config.src_maxburst *
248 mchan->config.src_addr_width);
249 } else {
250 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
251 MDC_GENERAL_CONFIG_INC_W;
252 mdc_set_read_width(ldesc, mdma->bus_width);
253 mdc_set_write_width(ldesc, mdma->bus_width);
254 burst_size = max_burst;
255 }
256 ldesc->readport_conf |= (burst_size - 1) <<
257 MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
258}
259
260static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
261{
262 struct mdc_dma *mdma = mdesc->chan->mdma;
263 struct mdc_hw_list_desc *curr, *next;
264 dma_addr_t curr_phys, next_phys;
265
266 curr = mdesc->list;
267 curr_phys = mdesc->list_phys;
268 while (curr) {
269 next = curr->next_desc;
270 next_phys = curr->node_addr;
271 dma_pool_free(mdma->desc_pool, curr, curr_phys);
272 curr = next;
273 curr_phys = next_phys;
274 }
275}
276
277static void mdc_desc_free(struct virt_dma_desc *vd)
278{
279 struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
280
281 mdc_list_desc_free(mdesc);
282 kfree(mdesc);
283}
284
285static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
286 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
287 unsigned long flags)
288{
289 struct mdc_chan *mchan = to_mdc_chan(chan);
290 struct mdc_dma *mdma = mchan->mdma;
291 struct mdc_tx_desc *mdesc;
292 struct mdc_hw_list_desc *curr, *prev = NULL;
293 dma_addr_t curr_phys;
294
295 if (!len)
296 return NULL;
297
298 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
299 if (!mdesc)
300 return NULL;
301 mdesc->chan = mchan;
302 mdesc->list_xfer_size = len;
303
304 while (len > 0) {
305 size_t xfer_size;
306
307 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
308 if (!curr)
309 goto free_desc;
310
311 if (prev) {
312 prev->node_addr = curr_phys;
313 prev->next_desc = curr;
314 } else {
315 mdesc->list_phys = curr_phys;
316 mdesc->list = curr;
317 }
318
319 xfer_size = min_t(size_t, mdma->max_xfer_size, len);
320
321 mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
322 xfer_size);
323
324 prev = curr;
325
326 mdesc->list_len++;
327 src += xfer_size;
328 dest += xfer_size;
329 len -= xfer_size;
330 }
331
332 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
333
334free_desc:
335 mdc_desc_free(&mdesc->vd);
336
337 return NULL;
338}
339
340static int mdc_check_slave_width(struct mdc_chan *mchan,
341 enum dma_transfer_direction dir)
342{
343 enum dma_slave_buswidth width;
344
345 if (dir == DMA_MEM_TO_DEV)
346 width = mchan->config.dst_addr_width;
347 else
348 width = mchan->config.src_addr_width;
349
350 switch (width) {
351 case DMA_SLAVE_BUSWIDTH_1_BYTE:
352 case DMA_SLAVE_BUSWIDTH_2_BYTES:
353 case DMA_SLAVE_BUSWIDTH_4_BYTES:
354 case DMA_SLAVE_BUSWIDTH_8_BYTES:
355 break;
356 default:
357 return -EINVAL;
358 }
359
360 if (width > mchan->mdma->bus_width)
361 return -EINVAL;
362
363 return 0;
364}
365
366static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
367 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
368 size_t period_len, enum dma_transfer_direction dir,
369 unsigned long flags)
370{
371 struct mdc_chan *mchan = to_mdc_chan(chan);
372 struct mdc_dma *mdma = mchan->mdma;
373 struct mdc_tx_desc *mdesc;
374 struct mdc_hw_list_desc *curr, *prev = NULL;
375 dma_addr_t curr_phys;
376
377 if (!buf_len && !period_len)
378 return NULL;
379
380 if (!is_slave_direction(dir))
381 return NULL;
382
383 if (mdc_check_slave_width(mchan, dir) < 0)
384 return NULL;
385
386 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
387 if (!mdesc)
388 return NULL;
389 mdesc->chan = mchan;
390 mdesc->cyclic = true;
391 mdesc->list_xfer_size = buf_len;
392 mdesc->list_period_len = DIV_ROUND_UP(period_len,
393 mdma->max_xfer_size);
394
395 while (buf_len > 0) {
396 size_t remainder = min(period_len, buf_len);
397
398 while (remainder > 0) {
399 size_t xfer_size;
400
401 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
402 &curr_phys);
403 if (!curr)
404 goto free_desc;
405
406 if (!prev) {
407 mdesc->list_phys = curr_phys;
408 mdesc->list = curr;
409 } else {
410 prev->node_addr = curr_phys;
411 prev->next_desc = curr;
412 }
413
414 xfer_size = min_t(size_t, mdma->max_xfer_size,
415 remainder);
416
417 if (dir == DMA_MEM_TO_DEV) {
418 mdc_list_desc_config(mchan, curr, dir,
419 buf_addr,
420 mchan->config.dst_addr,
421 xfer_size);
422 } else {
423 mdc_list_desc_config(mchan, curr, dir,
424 mchan->config.src_addr,
425 buf_addr,
426 xfer_size);
427 }
428
429 prev = curr;
430
431 mdesc->list_len++;
432 buf_addr += xfer_size;
433 buf_len -= xfer_size;
434 remainder -= xfer_size;
435 }
436 }
437 prev->node_addr = mdesc->list_phys;
438
439 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
440
441free_desc:
442 mdc_desc_free(&mdesc->vd);
443
444 return NULL;
445}
446
447static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
448 struct dma_chan *chan, struct scatterlist *sgl,
449 unsigned int sg_len, enum dma_transfer_direction dir,
450 unsigned long flags, void *context)
451{
452 struct mdc_chan *mchan = to_mdc_chan(chan);
453 struct mdc_dma *mdma = mchan->mdma;
454 struct mdc_tx_desc *mdesc;
455 struct scatterlist *sg;
456 struct mdc_hw_list_desc *curr, *prev = NULL;
457 dma_addr_t curr_phys;
458 unsigned int i;
459
460 if (!sgl)
461 return NULL;
462
463 if (!is_slave_direction(dir))
464 return NULL;
465
466 if (mdc_check_slave_width(mchan, dir) < 0)
467 return NULL;
468
469 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
470 if (!mdesc)
471 return NULL;
472 mdesc->chan = mchan;
473
474 for_each_sg(sgl, sg, sg_len, i) {
475 dma_addr_t buf = sg_dma_address(sg);
476 size_t buf_len = sg_dma_len(sg);
477
478 while (buf_len > 0) {
479 size_t xfer_size;
480
481 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
482 &curr_phys);
483 if (!curr)
484 goto free_desc;
485
486 if (!prev) {
487 mdesc->list_phys = curr_phys;
488 mdesc->list = curr;
489 } else {
490 prev->node_addr = curr_phys;
491 prev->next_desc = curr;
492 }
493
494 xfer_size = min_t(size_t, mdma->max_xfer_size,
495 buf_len);
496
497 if (dir == DMA_MEM_TO_DEV) {
498 mdc_list_desc_config(mchan, curr, dir, buf,
499 mchan->config.dst_addr,
500 xfer_size);
501 } else {
502 mdc_list_desc_config(mchan, curr, dir,
503 mchan->config.src_addr,
504 buf, xfer_size);
505 }
506
507 prev = curr;
508
509 mdesc->list_len++;
510 mdesc->list_xfer_size += xfer_size;
511 buf += xfer_size;
512 buf_len -= xfer_size;
513 }
514 }
515
516 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
517
518free_desc:
519 mdc_desc_free(&mdesc->vd);
520
521 return NULL;
522}
523
524static void mdc_issue_desc(struct mdc_chan *mchan)
525{
526 struct mdc_dma *mdma = mchan->mdma;
527 struct virt_dma_desc *vd;
528 struct mdc_tx_desc *mdesc;
529 u32 val;
530
531 vd = vchan_next_desc(&mchan->vc);
532 if (!vd)
533 return;
534
535 list_del(&vd->node);
536
537 mdesc = to_mdc_desc(&vd->tx);
538 mchan->desc = mdesc;
539
540 dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
541 mchan->chan_nr);
542
543 mdma->soc->enable_chan(mchan);
544
545 val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
546 val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
547 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
548 MDC_GENERAL_CONFIG_PHYSICAL_R;
549 mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
550 val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
551 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
552 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
553 mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
554 mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
555 val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
556 val |= MDC_CONTROL_AND_STATUS_LIST_EN;
557 mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
558}
559
560static void mdc_issue_pending(struct dma_chan *chan)
561{
562 struct mdc_chan *mchan = to_mdc_chan(chan);
563 unsigned long flags;
564
565 spin_lock_irqsave(&mchan->vc.lock, flags);
566 if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
567 mdc_issue_desc(mchan);
568 spin_unlock_irqrestore(&mchan->vc.lock, flags);
569}
570
571static enum dma_status mdc_tx_status(struct dma_chan *chan,
572 dma_cookie_t cookie, struct dma_tx_state *txstate)
573{
574 struct mdc_chan *mchan = to_mdc_chan(chan);
575 struct mdc_tx_desc *mdesc;
576 struct virt_dma_desc *vd;
577 unsigned long flags;
578 size_t bytes = 0;
579 int ret;
580
581 ret = dma_cookie_status(chan, cookie, txstate);
582 if (ret == DMA_COMPLETE)
583 return ret;
584
585 if (!txstate)
586 return ret;
587
588 spin_lock_irqsave(&mchan->vc.lock, flags);
589 vd = vchan_find_desc(&mchan->vc, cookie);
590 if (vd) {
591 mdesc = to_mdc_desc(&vd->tx);
592 bytes = mdesc->list_xfer_size;
593 } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
594 struct mdc_hw_list_desc *ldesc;
595 u32 val1, val2, done, processed, residue;
596 int i, cmds;
597
598 mdesc = mchan->desc;
599
600 /*
601 * Determine the number of commands that haven't been
602 * processed (handled by the IRQ handler) yet.
603 */
604 do {
605 val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
606 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
607 residue = mdc_chan_readl(mchan,
608 MDC_ACTIVE_TRANSFER_SIZE);
609 val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
610 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
611 } while (val1 != val2);
612
613 done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
614 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
615 processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
616 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
617 cmds = (done - processed) %
618 (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
619
620 /*
621 * If the command loaded event hasn't been processed yet, then
622 * the difference above includes an extra command.
623 */
624 if (!mdesc->cmd_loaded)
625 cmds--;
626 else
627 cmds += mdesc->list_cmds_done;
628
629 bytes = mdesc->list_xfer_size;
630 ldesc = mdesc->list;
631 for (i = 0; i < cmds; i++) {
632 bytes -= ldesc->xfer_size + 1;
633 ldesc = ldesc->next_desc;
634 }
635 if (ldesc) {
636 if (residue != MDC_TRANSFER_SIZE_MASK)
637 bytes -= ldesc->xfer_size - residue;
638 else
639 bytes -= ldesc->xfer_size + 1;
640 }
641 }
642 spin_unlock_irqrestore(&mchan->vc.lock, flags);
643
644 dma_set_residue(txstate, bytes);
645
646 return ret;
647}
648
649static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
650{
651 u32 val, processed, done1, done2;
652 unsigned int ret;
653
654 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
655 processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
656 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
657 /*
658 * CMDS_DONE may have incremented between reading CMDS_PROCESSED
659 * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
660 * didn't miss a command completion.
661 */
662 do {
663 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
664
665 done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
666 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
667
668 val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
669 MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
670 MDC_CMDS_PROCESSED_INT_ACTIVE);
671
672 val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
673
674 mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
675
676 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
677
678 done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
679 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
680 } while (done1 != done2);
681
682 if (done1 >= processed)
683 ret = done1 - processed;
684 else
685 ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) -
686 processed) + done1;
687
688 return ret;
689}
690
691static int mdc_terminate_all(struct dma_chan *chan)
692{
693 struct mdc_chan *mchan = to_mdc_chan(chan);
694 unsigned long flags;
695 LIST_HEAD(head);
696
697 spin_lock_irqsave(&mchan->vc.lock, flags);
698
699 mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
700 MDC_CONTROL_AND_STATUS);
701
702 if (mchan->desc) {
703 vchan_terminate_vdesc(&mchan->desc->vd);
704 mchan->desc = NULL;
705 }
706 vchan_get_all_descriptors(&mchan->vc, &head);
707
708 mdc_get_new_events(mchan);
709
710 spin_unlock_irqrestore(&mchan->vc.lock, flags);
711
712 vchan_dma_desc_free_list(&mchan->vc, &head);
713
714 return 0;
715}
716
717static void mdc_synchronize(struct dma_chan *chan)
718{
719 struct mdc_chan *mchan = to_mdc_chan(chan);
720
721 vchan_synchronize(&mchan->vc);
722}
723
724static int mdc_slave_config(struct dma_chan *chan,
725 struct dma_slave_config *config)
726{
727 struct mdc_chan *mchan = to_mdc_chan(chan);
728 unsigned long flags;
729
730 spin_lock_irqsave(&mchan->vc.lock, flags);
731 mchan->config = *config;
732 spin_unlock_irqrestore(&mchan->vc.lock, flags);
733
734 return 0;
735}
736
737static int mdc_alloc_chan_resources(struct dma_chan *chan)
738{
739 struct mdc_chan *mchan = to_mdc_chan(chan);
740 struct device *dev = mdma2dev(mchan->mdma);
741
742 return pm_runtime_get_sync(dev);
743}
744
745static void mdc_free_chan_resources(struct dma_chan *chan)
746{
747 struct mdc_chan *mchan = to_mdc_chan(chan);
748 struct mdc_dma *mdma = mchan->mdma;
749 struct device *dev = mdma2dev(mdma);
750
751 mdc_terminate_all(chan);
752 mdma->soc->disable_chan(mchan);
753 pm_runtime_put(dev);
754}
755
756static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
757{
758 struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
759 struct mdc_tx_desc *mdesc;
760 unsigned int i, new_events;
761
762 spin_lock(&mchan->vc.lock);
763
764 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
765
766 new_events = mdc_get_new_events(mchan);
767
768 if (!new_events)
769 goto out;
770
771 mdesc = mchan->desc;
772 if (!mdesc) {
773 dev_warn(mdma2dev(mchan->mdma),
774 "IRQ with no active descriptor on channel %d\n",
775 mchan->chan_nr);
776 goto out;
777 }
778
779 for (i = 0; i < new_events; i++) {
780 /*
781 * The first interrupt in a transfer indicates that the
782 * command list has been loaded, not that a command has
783 * been completed.
784 */
785 if (!mdesc->cmd_loaded) {
786 mdesc->cmd_loaded = true;
787 continue;
788 }
789
790 mdesc->list_cmds_done++;
791 if (mdesc->cyclic) {
792 mdesc->list_cmds_done %= mdesc->list_len;
793 if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
794 vchan_cyclic_callback(&mdesc->vd);
795 } else if (mdesc->list_cmds_done == mdesc->list_len) {
796 mchan->desc = NULL;
797 vchan_cookie_complete(&mdesc->vd);
798 mdc_issue_desc(mchan);
799 break;
800 }
801 }
802out:
803 spin_unlock(&mchan->vc.lock);
804
805 return IRQ_HANDLED;
806}
807
808static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
809 struct of_dma *ofdma)
810{
811 struct mdc_dma *mdma = ofdma->of_dma_data;
812 struct dma_chan *chan;
813
814 if (dma_spec->args_count != 3)
815 return NULL;
816
817 list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
818 struct mdc_chan *mchan = to_mdc_chan(chan);
819
820 if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
821 continue;
822 if (dma_get_slave_channel(chan)) {
823 mchan->periph = dma_spec->args[0];
824 mchan->thread = dma_spec->args[2];
825 return chan;
826 }
827 }
828
829 return NULL;
830}
831
832#define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4))
833#define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
834#define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f
835
836static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
837{
838 struct mdc_dma *mdma = mchan->mdma;
839
840 regmap_update_bits(mdma->periph_regs,
841 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
842 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
843 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
844 mchan->periph <<
845 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
846}
847
848static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
849{
850 struct mdc_dma *mdma = mchan->mdma;
851
852 regmap_update_bits(mdma->periph_regs,
853 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
854 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
855 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
856 0);
857}
858
859static const struct mdc_dma_soc_data pistachio_mdc_data = {
860 .enable_chan = pistachio_mdc_enable_chan,
861 .disable_chan = pistachio_mdc_disable_chan,
862};
863
864static const struct of_device_id mdc_dma_of_match[] = {
865 { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
866 { },
867};
868MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
869
870static int img_mdc_runtime_suspend(struct device *dev)
871{
872 struct mdc_dma *mdma = dev_get_drvdata(dev);
873
874 clk_disable_unprepare(mdma->clk);
875
876 return 0;
877}
878
879static int img_mdc_runtime_resume(struct device *dev)
880{
881 struct mdc_dma *mdma = dev_get_drvdata(dev);
882
883 return clk_prepare_enable(mdma->clk);
884}
885
886static int mdc_dma_probe(struct platform_device *pdev)
887{
888 struct mdc_dma *mdma;
889 struct resource *res;
890 unsigned int i;
891 u32 val;
892 int ret;
893
894 mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
895 if (!mdma)
896 return -ENOMEM;
897 platform_set_drvdata(pdev, mdma);
898
899 mdma->soc = of_device_get_match_data(&pdev->dev);
900
901 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
902 mdma->regs = devm_ioremap_resource(&pdev->dev, res);
903 if (IS_ERR(mdma->regs))
904 return PTR_ERR(mdma->regs);
905
906 mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
907 "img,cr-periph");
908 if (IS_ERR(mdma->periph_regs))
909 return PTR_ERR(mdma->periph_regs);
910
911 mdma->clk = devm_clk_get(&pdev->dev, "sys");
912 if (IS_ERR(mdma->clk))
913 return PTR_ERR(mdma->clk);
914
915 dma_cap_zero(mdma->dma_dev.cap_mask);
916 dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
917 dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
918 dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
919 dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
920
921 val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
922 mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
923 MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
924 mdma->nr_threads =
925 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
926 MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
927 mdma->bus_width =
928 (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
929 MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
930 /*
931 * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
932 * are supported, this makes it possible for the value reported in
933 * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
934 * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
935 * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this
936 * ambiguity, restrict transfer sizes to one bus-width less than the
937 * actual maximum.
938 */
939 mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
940
941 of_property_read_u32(pdev->dev.of_node, "dma-channels",
942 &mdma->nr_channels);
943 ret = of_property_read_u32(pdev->dev.of_node,
944 "img,max-burst-multiplier",
945 &mdma->max_burst_mult);
946 if (ret)
947 return ret;
948
949 mdma->dma_dev.dev = &pdev->dev;
950 mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
951 mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
952 mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
953 mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
954 mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
955 mdma->dma_dev.device_tx_status = mdc_tx_status;
956 mdma->dma_dev.device_issue_pending = mdc_issue_pending;
957 mdma->dma_dev.device_terminate_all = mdc_terminate_all;
958 mdma->dma_dev.device_synchronize = mdc_synchronize;
959 mdma->dma_dev.device_config = mdc_slave_config;
960
961 mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
962 mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
963 for (i = 1; i <= mdma->bus_width; i <<= 1) {
964 mdma->dma_dev.src_addr_widths |= BIT(i);
965 mdma->dma_dev.dst_addr_widths |= BIT(i);
966 }
967
968 INIT_LIST_HEAD(&mdma->dma_dev.channels);
969 for (i = 0; i < mdma->nr_channels; i++) {
970 struct mdc_chan *mchan = &mdma->channels[i];
971
972 mchan->mdma = mdma;
973 mchan->chan_nr = i;
974 mchan->irq = platform_get_irq(pdev, i);
975 if (mchan->irq < 0)
976 return mchan->irq;
977
978 ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
979 IRQ_TYPE_LEVEL_HIGH,
980 dev_name(&pdev->dev), mchan);
981 if (ret < 0)
982 return ret;
983
984 mchan->vc.desc_free = mdc_desc_free;
985 vchan_init(&mchan->vc, &mdma->dma_dev);
986 }
987
988 mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
989 sizeof(struct mdc_hw_list_desc),
990 4, 0);
991 if (!mdma->desc_pool)
992 return -ENOMEM;
993
994 pm_runtime_enable(&pdev->dev);
995 if (!pm_runtime_enabled(&pdev->dev)) {
996 ret = img_mdc_runtime_resume(&pdev->dev);
997 if (ret)
998 return ret;
999 }
1000
1001 ret = dma_async_device_register(&mdma->dma_dev);
1002 if (ret)
1003 goto suspend;
1004
1005 ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
1006 if (ret)
1007 goto unregister;
1008
1009 dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
1010 mdma->nr_channels, mdma->nr_threads);
1011
1012 return 0;
1013
1014unregister:
1015 dma_async_device_unregister(&mdma->dma_dev);
1016suspend:
1017 if (!pm_runtime_enabled(&pdev->dev))
1018 img_mdc_runtime_suspend(&pdev->dev);
1019 pm_runtime_disable(&pdev->dev);
1020 return ret;
1021}
1022
1023static int mdc_dma_remove(struct platform_device *pdev)
1024{
1025 struct mdc_dma *mdma = platform_get_drvdata(pdev);
1026 struct mdc_chan *mchan, *next;
1027
1028 of_dma_controller_free(pdev->dev.of_node);
1029 dma_async_device_unregister(&mdma->dma_dev);
1030
1031 list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
1032 vc.chan.device_node) {
1033 list_del(&mchan->vc.chan.device_node);
1034
1035 devm_free_irq(&pdev->dev, mchan->irq, mchan);
1036
1037 tasklet_kill(&mchan->vc.task);
1038 }
1039
1040 pm_runtime_disable(&pdev->dev);
1041 if (!pm_runtime_status_suspended(&pdev->dev))
1042 img_mdc_runtime_suspend(&pdev->dev);
1043
1044 return 0;
1045}
1046
1047#ifdef CONFIG_PM_SLEEP
1048static int img_mdc_suspend_late(struct device *dev)
1049{
1050 struct mdc_dma *mdma = dev_get_drvdata(dev);
1051 int i;
1052
1053 /* Check that all channels are idle */
1054 for (i = 0; i < mdma->nr_channels; i++) {
1055 struct mdc_chan *mchan = &mdma->channels[i];
1056
1057 if (unlikely(mchan->desc))
1058 return -EBUSY;
1059 }
1060
1061 return pm_runtime_force_suspend(dev);
1062}
1063
1064static int img_mdc_resume_early(struct device *dev)
1065{
1066 return pm_runtime_force_resume(dev);
1067}
1068#endif /* CONFIG_PM_SLEEP */
1069
1070static const struct dev_pm_ops img_mdc_pm_ops = {
1071 SET_RUNTIME_PM_OPS(img_mdc_runtime_suspend,
1072 img_mdc_runtime_resume, NULL)
1073 SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late,
1074 img_mdc_resume_early)
1075};
1076
1077static struct platform_driver mdc_dma_driver = {
1078 .driver = {
1079 .name = "img-mdc-dma",
1080 .pm = &img_mdc_pm_ops,
1081 .of_match_table = of_match_ptr(mdc_dma_of_match),
1082 },
1083 .probe = mdc_dma_probe,
1084 .remove = mdc_dma_remove,
1085};
1086module_platform_driver(mdc_dma_driver);
1087
1088MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
1089MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
1090MODULE_LICENSE("GPL v2");