Loading...
1#include <linux/delay.h>
2#include <linux/dmaengine.h>
3#include <linux/dma-mapping.h>
4#include <linux/platform_device.h>
5#include <linux/module.h>
6#include <linux/of.h>
7#include <linux/slab.h>
8#include <linux/of_dma.h>
9#include <linux/of_irq.h>
10#include <linux/dmapool.h>
11#include <linux/interrupt.h>
12#include <linux/of_address.h>
13#include <linux/pm_runtime.h>
14#include "dmaengine.h"
15
16#define DESC_TYPE 27
17#define DESC_TYPE_HOST 0x10
18#define DESC_TYPE_TEARD 0x13
19
20#define TD_DESC_IS_RX (1 << 16)
21#define TD_DESC_DMA_NUM 10
22
23#define DESC_LENGTH_BITS_NUM 21
24
25#define DESC_TYPE_USB (5 << 26)
26#define DESC_PD_COMPLETE (1 << 31)
27
28/* DMA engine */
29#define DMA_TDFDQ 4
30#define DMA_TXGCR(x) (0x800 + (x) * 0x20)
31#define DMA_RXGCR(x) (0x808 + (x) * 0x20)
32#define RXHPCRA0 4
33
34#define GCR_CHAN_ENABLE (1 << 31)
35#define GCR_TEARDOWN (1 << 30)
36#define GCR_STARV_RETRY (1 << 24)
37#define GCR_DESC_TYPE_HOST (1 << 14)
38
39/* DMA scheduler */
40#define DMA_SCHED_CTRL 0
41#define DMA_SCHED_CTRL_EN (1 << 31)
42#define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
43
44#define SCHED_ENTRY0_CHAN(x) ((x) << 0)
45#define SCHED_ENTRY0_IS_RX (1 << 7)
46
47#define SCHED_ENTRY1_CHAN(x) ((x) << 8)
48#define SCHED_ENTRY1_IS_RX (1 << 15)
49
50#define SCHED_ENTRY2_CHAN(x) ((x) << 16)
51#define SCHED_ENTRY2_IS_RX (1 << 23)
52
53#define SCHED_ENTRY3_CHAN(x) ((x) << 24)
54#define SCHED_ENTRY3_IS_RX (1 << 31)
55
56/* Queue manager */
57/* 4 KiB of memory for descriptors, 2 for each endpoint */
58#define ALLOC_DECS_NUM 128
59#define DESCS_AREAS 1
60#define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
61#define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
62
63#define QMGR_LRAM0_BASE 0x80
64#define QMGR_LRAM_SIZE 0x84
65#define QMGR_LRAM1_BASE 0x88
66#define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
67#define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
68#define QMGR_MEMCTRL_IDX_SH 16
69#define QMGR_MEMCTRL_DESC_SH 8
70
71#define QMGR_NUM_PEND 5
72#define QMGR_PEND(x) (0x90 + (x) * 4)
73
74#define QMGR_PENDING_SLOT_Q(x) (x / 32)
75#define QMGR_PENDING_BIT_Q(x) (x % 32)
76
77#define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
78#define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
79#define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
80#define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
81
82/* Glue layer specific */
83/* USBSS / USB AM335x */
84#define USBSS_IRQ_STATUS 0x28
85#define USBSS_IRQ_ENABLER 0x2c
86#define USBSS_IRQ_CLEARR 0x30
87
88#define USBSS_IRQ_PD_COMP (1 << 2)
89
90/* Packet Descriptor */
91#define PD2_ZERO_LENGTH (1 << 19)
92
93struct cppi41_channel {
94 struct dma_chan chan;
95 struct dma_async_tx_descriptor txd;
96 struct cppi41_dd *cdd;
97 struct cppi41_desc *desc;
98 dma_addr_t desc_phys;
99 void __iomem *gcr_reg;
100 int is_tx;
101 u32 residue;
102
103 unsigned int q_num;
104 unsigned int q_comp_num;
105 unsigned int port_num;
106
107 unsigned td_retry;
108 unsigned td_queued:1;
109 unsigned td_seen:1;
110 unsigned td_desc_seen:1;
111};
112
113struct cppi41_desc {
114 u32 pd0;
115 u32 pd1;
116 u32 pd2;
117 u32 pd3;
118 u32 pd4;
119 u32 pd5;
120 u32 pd6;
121 u32 pd7;
122} __aligned(32);
123
124struct chan_queues {
125 u16 submit;
126 u16 complete;
127};
128
129struct cppi41_dd {
130 struct dma_device ddev;
131
132 void *qmgr_scratch;
133 dma_addr_t scratch_phys;
134
135 struct cppi41_desc *cd;
136 dma_addr_t descs_phys;
137 u32 first_td_desc;
138 struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
139
140 void __iomem *usbss_mem;
141 void __iomem *ctrl_mem;
142 void __iomem *sched_mem;
143 void __iomem *qmgr_mem;
144 unsigned int irq;
145 const struct chan_queues *queues_rx;
146 const struct chan_queues *queues_tx;
147 struct chan_queues td_queue;
148
149 /* context for suspend/resume */
150 unsigned int dma_tdfdq;
151};
152
153#define FIST_COMPLETION_QUEUE 93
154static struct chan_queues usb_queues_tx[] = {
155 /* USB0 ENDP 1 */
156 [ 0] = { .submit = 32, .complete = 93},
157 [ 1] = { .submit = 34, .complete = 94},
158 [ 2] = { .submit = 36, .complete = 95},
159 [ 3] = { .submit = 38, .complete = 96},
160 [ 4] = { .submit = 40, .complete = 97},
161 [ 5] = { .submit = 42, .complete = 98},
162 [ 6] = { .submit = 44, .complete = 99},
163 [ 7] = { .submit = 46, .complete = 100},
164 [ 8] = { .submit = 48, .complete = 101},
165 [ 9] = { .submit = 50, .complete = 102},
166 [10] = { .submit = 52, .complete = 103},
167 [11] = { .submit = 54, .complete = 104},
168 [12] = { .submit = 56, .complete = 105},
169 [13] = { .submit = 58, .complete = 106},
170 [14] = { .submit = 60, .complete = 107},
171
172 /* USB1 ENDP1 */
173 [15] = { .submit = 62, .complete = 125},
174 [16] = { .submit = 64, .complete = 126},
175 [17] = { .submit = 66, .complete = 127},
176 [18] = { .submit = 68, .complete = 128},
177 [19] = { .submit = 70, .complete = 129},
178 [20] = { .submit = 72, .complete = 130},
179 [21] = { .submit = 74, .complete = 131},
180 [22] = { .submit = 76, .complete = 132},
181 [23] = { .submit = 78, .complete = 133},
182 [24] = { .submit = 80, .complete = 134},
183 [25] = { .submit = 82, .complete = 135},
184 [26] = { .submit = 84, .complete = 136},
185 [27] = { .submit = 86, .complete = 137},
186 [28] = { .submit = 88, .complete = 138},
187 [29] = { .submit = 90, .complete = 139},
188};
189
190static const struct chan_queues usb_queues_rx[] = {
191 /* USB0 ENDP 1 */
192 [ 0] = { .submit = 1, .complete = 109},
193 [ 1] = { .submit = 2, .complete = 110},
194 [ 2] = { .submit = 3, .complete = 111},
195 [ 3] = { .submit = 4, .complete = 112},
196 [ 4] = { .submit = 5, .complete = 113},
197 [ 5] = { .submit = 6, .complete = 114},
198 [ 6] = { .submit = 7, .complete = 115},
199 [ 7] = { .submit = 8, .complete = 116},
200 [ 8] = { .submit = 9, .complete = 117},
201 [ 9] = { .submit = 10, .complete = 118},
202 [10] = { .submit = 11, .complete = 119},
203 [11] = { .submit = 12, .complete = 120},
204 [12] = { .submit = 13, .complete = 121},
205 [13] = { .submit = 14, .complete = 122},
206 [14] = { .submit = 15, .complete = 123},
207
208 /* USB1 ENDP 1 */
209 [15] = { .submit = 16, .complete = 141},
210 [16] = { .submit = 17, .complete = 142},
211 [17] = { .submit = 18, .complete = 143},
212 [18] = { .submit = 19, .complete = 144},
213 [19] = { .submit = 20, .complete = 145},
214 [20] = { .submit = 21, .complete = 146},
215 [21] = { .submit = 22, .complete = 147},
216 [22] = { .submit = 23, .complete = 148},
217 [23] = { .submit = 24, .complete = 149},
218 [24] = { .submit = 25, .complete = 150},
219 [25] = { .submit = 26, .complete = 151},
220 [26] = { .submit = 27, .complete = 152},
221 [27] = { .submit = 28, .complete = 153},
222 [28] = { .submit = 29, .complete = 154},
223 [29] = { .submit = 30, .complete = 155},
224};
225
226struct cppi_glue_infos {
227 irqreturn_t (*isr)(int irq, void *data);
228 const struct chan_queues *queues_rx;
229 const struct chan_queues *queues_tx;
230 struct chan_queues td_queue;
231};
232
233static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
234{
235 return container_of(c, struct cppi41_channel, chan);
236}
237
238static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
239{
240 struct cppi41_channel *c;
241 u32 descs_size;
242 u32 desc_num;
243
244 descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
245
246 if (!((desc >= cdd->descs_phys) &&
247 (desc < (cdd->descs_phys + descs_size)))) {
248 return NULL;
249 }
250
251 desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
252 BUG_ON(desc_num >= ALLOC_DECS_NUM);
253 c = cdd->chan_busy[desc_num];
254 cdd->chan_busy[desc_num] = NULL;
255 return c;
256}
257
258static void cppi_writel(u32 val, void *__iomem *mem)
259{
260 __raw_writel(val, mem);
261}
262
263static u32 cppi_readl(void *__iomem *mem)
264{
265 return __raw_readl(mem);
266}
267
268static u32 pd_trans_len(u32 val)
269{
270 return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
271}
272
273static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
274{
275 u32 desc;
276
277 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
278 desc &= ~0x1f;
279 return desc;
280}
281
282static irqreturn_t cppi41_irq(int irq, void *data)
283{
284 struct cppi41_dd *cdd = data;
285 struct cppi41_channel *c;
286 u32 status;
287 int i;
288
289 status = cppi_readl(cdd->usbss_mem + USBSS_IRQ_STATUS);
290 if (!(status & USBSS_IRQ_PD_COMP))
291 return IRQ_NONE;
292 cppi_writel(status, cdd->usbss_mem + USBSS_IRQ_STATUS);
293
294 for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
295 i++) {
296 u32 val;
297 u32 q_num;
298
299 val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
300 if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
301 u32 mask;
302 /* set corresponding bit for completetion Q 93 */
303 mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
304 /* not set all bits for queues less than Q 93 */
305 mask--;
306 /* now invert and keep only Q 93+ set */
307 val &= ~mask;
308 }
309
310 if (val)
311 __iormb();
312
313 while (val) {
314 u32 desc, len;
315
316 q_num = __fls(val);
317 val &= ~(1 << q_num);
318 q_num += 32 * i;
319 desc = cppi41_pop_desc(cdd, q_num);
320 c = desc_to_chan(cdd, desc);
321 if (WARN_ON(!c)) {
322 pr_err("%s() q %d desc %08x\n", __func__,
323 q_num, desc);
324 continue;
325 }
326
327 if (c->desc->pd2 & PD2_ZERO_LENGTH)
328 len = 0;
329 else
330 len = pd_trans_len(c->desc->pd0);
331
332 c->residue = pd_trans_len(c->desc->pd6) - len;
333 dma_cookie_complete(&c->txd);
334 c->txd.callback(c->txd.callback_param);
335 }
336 }
337 return IRQ_HANDLED;
338}
339
340static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
341{
342 dma_cookie_t cookie;
343
344 cookie = dma_cookie_assign(tx);
345
346 return cookie;
347}
348
349static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
350{
351 struct cppi41_channel *c = to_cpp41_chan(chan);
352
353 dma_cookie_init(chan);
354 dma_async_tx_descriptor_init(&c->txd, chan);
355 c->txd.tx_submit = cppi41_tx_submit;
356
357 if (!c->is_tx)
358 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
359
360 return 0;
361}
362
363static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
364{
365}
366
367static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
368 dma_cookie_t cookie, struct dma_tx_state *txstate)
369{
370 struct cppi41_channel *c = to_cpp41_chan(chan);
371 enum dma_status ret;
372
373 /* lock */
374 ret = dma_cookie_status(chan, cookie, txstate);
375 if (txstate && ret == DMA_COMPLETE)
376 txstate->residue = c->residue;
377 /* unlock */
378
379 return ret;
380}
381
382static void push_desc_queue(struct cppi41_channel *c)
383{
384 struct cppi41_dd *cdd = c->cdd;
385 u32 desc_num;
386 u32 desc_phys;
387 u32 reg;
388
389 desc_phys = lower_32_bits(c->desc_phys);
390 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
391 WARN_ON(cdd->chan_busy[desc_num]);
392 cdd->chan_busy[desc_num] = c;
393
394 reg = (sizeof(struct cppi41_desc) - 24) / 4;
395 reg |= desc_phys;
396 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
397}
398
399static void cppi41_dma_issue_pending(struct dma_chan *chan)
400{
401 struct cppi41_channel *c = to_cpp41_chan(chan);
402 u32 reg;
403
404 c->residue = 0;
405
406 reg = GCR_CHAN_ENABLE;
407 if (!c->is_tx) {
408 reg |= GCR_STARV_RETRY;
409 reg |= GCR_DESC_TYPE_HOST;
410 reg |= c->q_comp_num;
411 }
412
413 cppi_writel(reg, c->gcr_reg);
414
415 /*
416 * We don't use writel() but __raw_writel() so we have to make sure
417 * that the DMA descriptor in coherent memory made to the main memory
418 * before starting the dma engine.
419 */
420 __iowmb();
421 push_desc_queue(c);
422}
423
424static u32 get_host_pd0(u32 length)
425{
426 u32 reg;
427
428 reg = DESC_TYPE_HOST << DESC_TYPE;
429 reg |= length;
430
431 return reg;
432}
433
434static u32 get_host_pd1(struct cppi41_channel *c)
435{
436 u32 reg;
437
438 reg = 0;
439
440 return reg;
441}
442
443static u32 get_host_pd2(struct cppi41_channel *c)
444{
445 u32 reg;
446
447 reg = DESC_TYPE_USB;
448 reg |= c->q_comp_num;
449
450 return reg;
451}
452
453static u32 get_host_pd3(u32 length)
454{
455 u32 reg;
456
457 /* PD3 = packet size */
458 reg = length;
459
460 return reg;
461}
462
463static u32 get_host_pd6(u32 length)
464{
465 u32 reg;
466
467 /* PD6 buffer size */
468 reg = DESC_PD_COMPLETE;
469 reg |= length;
470
471 return reg;
472}
473
474static u32 get_host_pd4_or_7(u32 addr)
475{
476 u32 reg;
477
478 reg = addr;
479
480 return reg;
481}
482
483static u32 get_host_pd5(void)
484{
485 u32 reg;
486
487 reg = 0;
488
489 return reg;
490}
491
492static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
493 struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
494 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
495{
496 struct cppi41_channel *c = to_cpp41_chan(chan);
497 struct cppi41_desc *d;
498 struct scatterlist *sg;
499 unsigned int i;
500 unsigned int num;
501
502 num = 0;
503 d = c->desc;
504 for_each_sg(sgl, sg, sg_len, i) {
505 u32 addr;
506 u32 len;
507
508 /* We need to use more than one desc once musb supports sg */
509 BUG_ON(num > 0);
510 addr = lower_32_bits(sg_dma_address(sg));
511 len = sg_dma_len(sg);
512
513 d->pd0 = get_host_pd0(len);
514 d->pd1 = get_host_pd1(c);
515 d->pd2 = get_host_pd2(c);
516 d->pd3 = get_host_pd3(len);
517 d->pd4 = get_host_pd4_or_7(addr);
518 d->pd5 = get_host_pd5();
519 d->pd6 = get_host_pd6(len);
520 d->pd7 = get_host_pd4_or_7(addr);
521
522 d++;
523 }
524
525 return &c->txd;
526}
527
528static void cppi41_compute_td_desc(struct cppi41_desc *d)
529{
530 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
531}
532
533static int cppi41_tear_down_chan(struct cppi41_channel *c)
534{
535 struct cppi41_dd *cdd = c->cdd;
536 struct cppi41_desc *td;
537 u32 reg;
538 u32 desc_phys;
539 u32 td_desc_phys;
540
541 td = cdd->cd;
542 td += cdd->first_td_desc;
543
544 td_desc_phys = cdd->descs_phys;
545 td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
546
547 if (!c->td_queued) {
548 cppi41_compute_td_desc(td);
549 __iowmb();
550
551 reg = (sizeof(struct cppi41_desc) - 24) / 4;
552 reg |= td_desc_phys;
553 cppi_writel(reg, cdd->qmgr_mem +
554 QMGR_QUEUE_D(cdd->td_queue.submit));
555
556 reg = GCR_CHAN_ENABLE;
557 if (!c->is_tx) {
558 reg |= GCR_STARV_RETRY;
559 reg |= GCR_DESC_TYPE_HOST;
560 reg |= c->q_comp_num;
561 }
562 reg |= GCR_TEARDOWN;
563 cppi_writel(reg, c->gcr_reg);
564 c->td_queued = 1;
565 c->td_retry = 500;
566 }
567
568 if (!c->td_seen || !c->td_desc_seen) {
569
570 desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
571 if (!desc_phys)
572 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
573
574 if (desc_phys == c->desc_phys) {
575 c->td_desc_seen = 1;
576
577 } else if (desc_phys == td_desc_phys) {
578 u32 pd0;
579
580 __iormb();
581 pd0 = td->pd0;
582 WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
583 WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
584 WARN_ON((pd0 & 0x1f) != c->port_num);
585 c->td_seen = 1;
586 } else if (desc_phys) {
587 WARN_ON_ONCE(1);
588 }
589 }
590 c->td_retry--;
591 /*
592 * If the TX descriptor / channel is in use, the caller needs to poke
593 * his TD bit multiple times. After that he hardware releases the
594 * transfer descriptor followed by TD descriptor. Waiting seems not to
595 * cause any difference.
596 * RX seems to be thrown out right away. However once the TearDown
597 * descriptor gets through we are done. If we have seens the transfer
598 * descriptor before the TD we fetch it from enqueue, it has to be
599 * there waiting for us.
600 */
601 if (!c->td_seen && c->td_retry) {
602 udelay(1);
603 return -EAGAIN;
604 }
605 WARN_ON(!c->td_retry);
606
607 if (!c->td_desc_seen) {
608 desc_phys = cppi41_pop_desc(cdd, c->q_num);
609 if (!desc_phys)
610 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
611 WARN_ON(!desc_phys);
612 }
613
614 c->td_queued = 0;
615 c->td_seen = 0;
616 c->td_desc_seen = 0;
617 cppi_writel(0, c->gcr_reg);
618 return 0;
619}
620
621static int cppi41_stop_chan(struct dma_chan *chan)
622{
623 struct cppi41_channel *c = to_cpp41_chan(chan);
624 struct cppi41_dd *cdd = c->cdd;
625 u32 desc_num;
626 u32 desc_phys;
627 int ret;
628
629 desc_phys = lower_32_bits(c->desc_phys);
630 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
631 if (!cdd->chan_busy[desc_num])
632 return 0;
633
634 ret = cppi41_tear_down_chan(c);
635 if (ret)
636 return ret;
637
638 WARN_ON(!cdd->chan_busy[desc_num]);
639 cdd->chan_busy[desc_num] = NULL;
640
641 return 0;
642}
643
644static void cleanup_chans(struct cppi41_dd *cdd)
645{
646 while (!list_empty(&cdd->ddev.channels)) {
647 struct cppi41_channel *cchan;
648
649 cchan = list_first_entry(&cdd->ddev.channels,
650 struct cppi41_channel, chan.device_node);
651 list_del(&cchan->chan.device_node);
652 kfree(cchan);
653 }
654}
655
656static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
657{
658 struct cppi41_channel *cchan;
659 int i;
660 int ret;
661 u32 n_chans;
662
663 ret = of_property_read_u32(dev->of_node, "#dma-channels",
664 &n_chans);
665 if (ret)
666 return ret;
667 /*
668 * The channels can only be used as TX or as RX. So we add twice
669 * that much dma channels because USB can only do RX or TX.
670 */
671 n_chans *= 2;
672
673 for (i = 0; i < n_chans; i++) {
674 cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
675 if (!cchan)
676 goto err;
677
678 cchan->cdd = cdd;
679 if (i & 1) {
680 cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
681 cchan->is_tx = 1;
682 } else {
683 cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
684 cchan->is_tx = 0;
685 }
686 cchan->port_num = i >> 1;
687 cchan->desc = &cdd->cd[i];
688 cchan->desc_phys = cdd->descs_phys;
689 cchan->desc_phys += i * sizeof(struct cppi41_desc);
690 cchan->chan.device = &cdd->ddev;
691 list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
692 }
693 cdd->first_td_desc = n_chans;
694
695 return 0;
696err:
697 cleanup_chans(cdd);
698 return -ENOMEM;
699}
700
701static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
702{
703 unsigned int mem_decs;
704 int i;
705
706 mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
707
708 for (i = 0; i < DESCS_AREAS; i++) {
709
710 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
711 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
712
713 dma_free_coherent(dev, mem_decs, cdd->cd,
714 cdd->descs_phys);
715 }
716}
717
718static void disable_sched(struct cppi41_dd *cdd)
719{
720 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
721}
722
723static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
724{
725 disable_sched(cdd);
726
727 purge_descs(dev, cdd);
728
729 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
730 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
731 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
732 cdd->scratch_phys);
733}
734
735static int init_descs(struct device *dev, struct cppi41_dd *cdd)
736{
737 unsigned int desc_size;
738 unsigned int mem_decs;
739 int i;
740 u32 reg;
741 u32 idx;
742
743 BUILD_BUG_ON(sizeof(struct cppi41_desc) &
744 (sizeof(struct cppi41_desc) - 1));
745 BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
746 BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
747
748 desc_size = sizeof(struct cppi41_desc);
749 mem_decs = ALLOC_DECS_NUM * desc_size;
750
751 idx = 0;
752 for (i = 0; i < DESCS_AREAS; i++) {
753
754 reg = idx << QMGR_MEMCTRL_IDX_SH;
755 reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
756 reg |= ilog2(ALLOC_DECS_NUM) - 5;
757
758 BUILD_BUG_ON(DESCS_AREAS != 1);
759 cdd->cd = dma_alloc_coherent(dev, mem_decs,
760 &cdd->descs_phys, GFP_KERNEL);
761 if (!cdd->cd)
762 return -ENOMEM;
763
764 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
765 cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
766
767 idx += ALLOC_DECS_NUM;
768 }
769 return 0;
770}
771
772static void init_sched(struct cppi41_dd *cdd)
773{
774 unsigned ch;
775 unsigned word;
776 u32 reg;
777
778 word = 0;
779 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
780 for (ch = 0; ch < 15 * 2; ch += 2) {
781
782 reg = SCHED_ENTRY0_CHAN(ch);
783 reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
784
785 reg |= SCHED_ENTRY2_CHAN(ch + 1);
786 reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
787 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
788 word++;
789 }
790 reg = 15 * 2 * 2 - 1;
791 reg |= DMA_SCHED_CTRL_EN;
792 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
793}
794
795static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
796{
797 int ret;
798
799 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
800 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
801 &cdd->scratch_phys, GFP_KERNEL);
802 if (!cdd->qmgr_scratch)
803 return -ENOMEM;
804
805 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
806 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
807 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
808
809 ret = init_descs(dev, cdd);
810 if (ret)
811 goto err_td;
812
813 cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
814 init_sched(cdd);
815 return 0;
816err_td:
817 deinit_cppi41(dev, cdd);
818 return ret;
819}
820
821static struct platform_driver cpp41_dma_driver;
822/*
823 * The param format is:
824 * X Y
825 * X: Port
826 * Y: 0 = RX else TX
827 */
828#define INFO_PORT 0
829#define INFO_IS_TX 1
830
831static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
832{
833 struct cppi41_channel *cchan;
834 struct cppi41_dd *cdd;
835 const struct chan_queues *queues;
836 u32 *num = param;
837
838 if (chan->device->dev->driver != &cpp41_dma_driver.driver)
839 return false;
840
841 cchan = to_cpp41_chan(chan);
842
843 if (cchan->port_num != num[INFO_PORT])
844 return false;
845
846 if (cchan->is_tx && !num[INFO_IS_TX])
847 return false;
848 cdd = cchan->cdd;
849 if (cchan->is_tx)
850 queues = cdd->queues_tx;
851 else
852 queues = cdd->queues_rx;
853
854 BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
855 if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
856 return false;
857
858 cchan->q_num = queues[cchan->port_num].submit;
859 cchan->q_comp_num = queues[cchan->port_num].complete;
860 return true;
861}
862
863static struct of_dma_filter_info cpp41_dma_info = {
864 .filter_fn = cpp41_dma_filter_fn,
865};
866
867static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
868 struct of_dma *ofdma)
869{
870 int count = dma_spec->args_count;
871 struct of_dma_filter_info *info = ofdma->of_dma_data;
872
873 if (!info || !info->filter_fn)
874 return NULL;
875
876 if (count != 2)
877 return NULL;
878
879 return dma_request_channel(info->dma_cap, info->filter_fn,
880 &dma_spec->args[0]);
881}
882
883static const struct cppi_glue_infos usb_infos = {
884 .isr = cppi41_irq,
885 .queues_rx = usb_queues_rx,
886 .queues_tx = usb_queues_tx,
887 .td_queue = { .submit = 31, .complete = 0 },
888};
889
890static const struct of_device_id cppi41_dma_ids[] = {
891 { .compatible = "ti,am3359-cppi41", .data = &usb_infos},
892 {},
893};
894MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
895
896static const struct cppi_glue_infos *get_glue_info(struct device *dev)
897{
898 const struct of_device_id *of_id;
899
900 of_id = of_match_node(cppi41_dma_ids, dev->of_node);
901 if (!of_id)
902 return NULL;
903 return of_id->data;
904}
905
906#define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
907 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
908 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
909 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
910
911static int cppi41_dma_probe(struct platform_device *pdev)
912{
913 struct cppi41_dd *cdd;
914 struct device *dev = &pdev->dev;
915 const struct cppi_glue_infos *glue_info;
916 int irq;
917 int ret;
918
919 glue_info = get_glue_info(dev);
920 if (!glue_info)
921 return -EINVAL;
922
923 cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
924 if (!cdd)
925 return -ENOMEM;
926
927 dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
928 cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
929 cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
930 cdd->ddev.device_tx_status = cppi41_dma_tx_status;
931 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
932 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
933 cdd->ddev.device_terminate_all = cppi41_stop_chan;
934 cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
935 cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
936 cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
937 cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
938 cdd->ddev.dev = dev;
939 INIT_LIST_HEAD(&cdd->ddev.channels);
940 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
941
942 cdd->usbss_mem = of_iomap(dev->of_node, 0);
943 cdd->ctrl_mem = of_iomap(dev->of_node, 1);
944 cdd->sched_mem = of_iomap(dev->of_node, 2);
945 cdd->qmgr_mem = of_iomap(dev->of_node, 3);
946
947 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
948 !cdd->qmgr_mem)
949 return -ENXIO;
950
951 pm_runtime_enable(dev);
952 ret = pm_runtime_get_sync(dev);
953 if (ret < 0)
954 goto err_get_sync;
955
956 cdd->queues_rx = glue_info->queues_rx;
957 cdd->queues_tx = glue_info->queues_tx;
958 cdd->td_queue = glue_info->td_queue;
959
960 ret = init_cppi41(dev, cdd);
961 if (ret)
962 goto err_init_cppi;
963
964 ret = cppi41_add_chans(dev, cdd);
965 if (ret)
966 goto err_chans;
967
968 irq = irq_of_parse_and_map(dev->of_node, 0);
969 if (!irq) {
970 ret = -EINVAL;
971 goto err_irq;
972 }
973
974 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
975
976 ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED,
977 dev_name(dev), cdd);
978 if (ret)
979 goto err_irq;
980 cdd->irq = irq;
981
982 ret = dma_async_device_register(&cdd->ddev);
983 if (ret)
984 goto err_dma_reg;
985
986 ret = of_dma_controller_register(dev->of_node,
987 cppi41_dma_xlate, &cpp41_dma_info);
988 if (ret)
989 goto err_of;
990
991 platform_set_drvdata(pdev, cdd);
992 return 0;
993err_of:
994 dma_async_device_unregister(&cdd->ddev);
995err_dma_reg:
996err_irq:
997 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
998 cleanup_chans(cdd);
999err_chans:
1000 deinit_cppi41(dev, cdd);
1001err_init_cppi:
1002 pm_runtime_put(dev);
1003err_get_sync:
1004 pm_runtime_disable(dev);
1005 iounmap(cdd->usbss_mem);
1006 iounmap(cdd->ctrl_mem);
1007 iounmap(cdd->sched_mem);
1008 iounmap(cdd->qmgr_mem);
1009 return ret;
1010}
1011
1012static int cppi41_dma_remove(struct platform_device *pdev)
1013{
1014 struct cppi41_dd *cdd = platform_get_drvdata(pdev);
1015
1016 of_dma_controller_free(pdev->dev.of_node);
1017 dma_async_device_unregister(&cdd->ddev);
1018
1019 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1020 devm_free_irq(&pdev->dev, cdd->irq, cdd);
1021 cleanup_chans(cdd);
1022 deinit_cppi41(&pdev->dev, cdd);
1023 iounmap(cdd->usbss_mem);
1024 iounmap(cdd->ctrl_mem);
1025 iounmap(cdd->sched_mem);
1026 iounmap(cdd->qmgr_mem);
1027 pm_runtime_put(&pdev->dev);
1028 pm_runtime_disable(&pdev->dev);
1029 return 0;
1030}
1031
1032#ifdef CONFIG_PM_SLEEP
1033static int cppi41_suspend(struct device *dev)
1034{
1035 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1036
1037 cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
1038 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1039 disable_sched(cdd);
1040
1041 return 0;
1042}
1043
1044static int cppi41_resume(struct device *dev)
1045{
1046 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1047 struct cppi41_channel *c;
1048 int i;
1049
1050 for (i = 0; i < DESCS_AREAS; i++)
1051 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
1052
1053 list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
1054 if (!c->is_tx)
1055 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
1056
1057 init_sched(cdd);
1058
1059 cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
1060 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
1061 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
1062 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
1063
1064 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
1065
1066 return 0;
1067}
1068#endif
1069
1070static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
1071
1072static struct platform_driver cpp41_dma_driver = {
1073 .probe = cppi41_dma_probe,
1074 .remove = cppi41_dma_remove,
1075 .driver = {
1076 .name = "cppi41-dma-engine",
1077 .pm = &cppi41_pm_ops,
1078 .of_match_table = of_match_ptr(cppi41_dma_ids),
1079 },
1080};
1081
1082module_platform_driver(cpp41_dma_driver);
1083MODULE_LICENSE("GPL");
1084MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
1#include <linux/delay.h>
2#include <linux/dmaengine.h>
3#include <linux/dma-mapping.h>
4#include <linux/platform_device.h>
5#include <linux/module.h>
6#include <linux/of.h>
7#include <linux/slab.h>
8#include <linux/of_dma.h>
9#include <linux/of_irq.h>
10#include <linux/dmapool.h>
11#include <linux/interrupt.h>
12#include <linux/of_address.h>
13#include <linux/pm_runtime.h>
14#include "dmaengine.h"
15
16#define DESC_TYPE 27
17#define DESC_TYPE_HOST 0x10
18#define DESC_TYPE_TEARD 0x13
19
20#define TD_DESC_IS_RX (1 << 16)
21#define TD_DESC_DMA_NUM 10
22
23#define DESC_LENGTH_BITS_NUM 21
24
25#define DESC_TYPE_USB (5 << 26)
26#define DESC_PD_COMPLETE (1 << 31)
27
28/* DMA engine */
29#define DMA_TDFDQ 4
30#define DMA_TXGCR(x) (0x800 + (x) * 0x20)
31#define DMA_RXGCR(x) (0x808 + (x) * 0x20)
32#define RXHPCRA0 4
33
34#define GCR_CHAN_ENABLE (1 << 31)
35#define GCR_TEARDOWN (1 << 30)
36#define GCR_STARV_RETRY (1 << 24)
37#define GCR_DESC_TYPE_HOST (1 << 14)
38
39/* DMA scheduler */
40#define DMA_SCHED_CTRL 0
41#define DMA_SCHED_CTRL_EN (1 << 31)
42#define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
43
44#define SCHED_ENTRY0_CHAN(x) ((x) << 0)
45#define SCHED_ENTRY0_IS_RX (1 << 7)
46
47#define SCHED_ENTRY1_CHAN(x) ((x) << 8)
48#define SCHED_ENTRY1_IS_RX (1 << 15)
49
50#define SCHED_ENTRY2_CHAN(x) ((x) << 16)
51#define SCHED_ENTRY2_IS_RX (1 << 23)
52
53#define SCHED_ENTRY3_CHAN(x) ((x) << 24)
54#define SCHED_ENTRY3_IS_RX (1 << 31)
55
56/* Queue manager */
57/* 4 KiB of memory for descriptors, 2 for each endpoint */
58#define ALLOC_DECS_NUM 128
59#define DESCS_AREAS 1
60#define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
61#define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
62
63#define QMGR_LRAM0_BASE 0x80
64#define QMGR_LRAM_SIZE 0x84
65#define QMGR_LRAM1_BASE 0x88
66#define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
67#define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
68#define QMGR_MEMCTRL_IDX_SH 16
69#define QMGR_MEMCTRL_DESC_SH 8
70
71#define QMGR_NUM_PEND 5
72#define QMGR_PEND(x) (0x90 + (x) * 4)
73
74#define QMGR_PENDING_SLOT_Q(x) (x / 32)
75#define QMGR_PENDING_BIT_Q(x) (x % 32)
76
77#define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
78#define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
79#define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
80#define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
81
82/* Glue layer specific */
83/* USBSS / USB AM335x */
84#define USBSS_IRQ_STATUS 0x28
85#define USBSS_IRQ_ENABLER 0x2c
86#define USBSS_IRQ_CLEARR 0x30
87
88#define USBSS_IRQ_PD_COMP (1 << 2)
89
90/* Packet Descriptor */
91#define PD2_ZERO_LENGTH (1 << 19)
92
93struct cppi41_channel {
94 struct dma_chan chan;
95 struct dma_async_tx_descriptor txd;
96 struct cppi41_dd *cdd;
97 struct cppi41_desc *desc;
98 dma_addr_t desc_phys;
99 void __iomem *gcr_reg;
100 int is_tx;
101 u32 residue;
102
103 unsigned int q_num;
104 unsigned int q_comp_num;
105 unsigned int port_num;
106
107 unsigned td_retry;
108 unsigned td_queued:1;
109 unsigned td_seen:1;
110 unsigned td_desc_seen:1;
111
112 struct list_head node; /* Node for pending list */
113};
114
115struct cppi41_desc {
116 u32 pd0;
117 u32 pd1;
118 u32 pd2;
119 u32 pd3;
120 u32 pd4;
121 u32 pd5;
122 u32 pd6;
123 u32 pd7;
124} __aligned(32);
125
126struct chan_queues {
127 u16 submit;
128 u16 complete;
129};
130
131struct cppi41_dd {
132 struct dma_device ddev;
133
134 void *qmgr_scratch;
135 dma_addr_t scratch_phys;
136
137 struct cppi41_desc *cd;
138 dma_addr_t descs_phys;
139 u32 first_td_desc;
140 struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
141
142 void __iomem *usbss_mem;
143 void __iomem *ctrl_mem;
144 void __iomem *sched_mem;
145 void __iomem *qmgr_mem;
146 unsigned int irq;
147 const struct chan_queues *queues_rx;
148 const struct chan_queues *queues_tx;
149 struct chan_queues td_queue;
150
151 struct list_head pending; /* Pending queued transfers */
152 spinlock_t lock; /* Lock for pending list */
153
154 /* context for suspend/resume */
155 unsigned int dma_tdfdq;
156
157 bool is_suspended;
158};
159
160#define FIST_COMPLETION_QUEUE 93
161static struct chan_queues usb_queues_tx[] = {
162 /* USB0 ENDP 1 */
163 [ 0] = { .submit = 32, .complete = 93},
164 [ 1] = { .submit = 34, .complete = 94},
165 [ 2] = { .submit = 36, .complete = 95},
166 [ 3] = { .submit = 38, .complete = 96},
167 [ 4] = { .submit = 40, .complete = 97},
168 [ 5] = { .submit = 42, .complete = 98},
169 [ 6] = { .submit = 44, .complete = 99},
170 [ 7] = { .submit = 46, .complete = 100},
171 [ 8] = { .submit = 48, .complete = 101},
172 [ 9] = { .submit = 50, .complete = 102},
173 [10] = { .submit = 52, .complete = 103},
174 [11] = { .submit = 54, .complete = 104},
175 [12] = { .submit = 56, .complete = 105},
176 [13] = { .submit = 58, .complete = 106},
177 [14] = { .submit = 60, .complete = 107},
178
179 /* USB1 ENDP1 */
180 [15] = { .submit = 62, .complete = 125},
181 [16] = { .submit = 64, .complete = 126},
182 [17] = { .submit = 66, .complete = 127},
183 [18] = { .submit = 68, .complete = 128},
184 [19] = { .submit = 70, .complete = 129},
185 [20] = { .submit = 72, .complete = 130},
186 [21] = { .submit = 74, .complete = 131},
187 [22] = { .submit = 76, .complete = 132},
188 [23] = { .submit = 78, .complete = 133},
189 [24] = { .submit = 80, .complete = 134},
190 [25] = { .submit = 82, .complete = 135},
191 [26] = { .submit = 84, .complete = 136},
192 [27] = { .submit = 86, .complete = 137},
193 [28] = { .submit = 88, .complete = 138},
194 [29] = { .submit = 90, .complete = 139},
195};
196
197static const struct chan_queues usb_queues_rx[] = {
198 /* USB0 ENDP 1 */
199 [ 0] = { .submit = 1, .complete = 109},
200 [ 1] = { .submit = 2, .complete = 110},
201 [ 2] = { .submit = 3, .complete = 111},
202 [ 3] = { .submit = 4, .complete = 112},
203 [ 4] = { .submit = 5, .complete = 113},
204 [ 5] = { .submit = 6, .complete = 114},
205 [ 6] = { .submit = 7, .complete = 115},
206 [ 7] = { .submit = 8, .complete = 116},
207 [ 8] = { .submit = 9, .complete = 117},
208 [ 9] = { .submit = 10, .complete = 118},
209 [10] = { .submit = 11, .complete = 119},
210 [11] = { .submit = 12, .complete = 120},
211 [12] = { .submit = 13, .complete = 121},
212 [13] = { .submit = 14, .complete = 122},
213 [14] = { .submit = 15, .complete = 123},
214
215 /* USB1 ENDP 1 */
216 [15] = { .submit = 16, .complete = 141},
217 [16] = { .submit = 17, .complete = 142},
218 [17] = { .submit = 18, .complete = 143},
219 [18] = { .submit = 19, .complete = 144},
220 [19] = { .submit = 20, .complete = 145},
221 [20] = { .submit = 21, .complete = 146},
222 [21] = { .submit = 22, .complete = 147},
223 [22] = { .submit = 23, .complete = 148},
224 [23] = { .submit = 24, .complete = 149},
225 [24] = { .submit = 25, .complete = 150},
226 [25] = { .submit = 26, .complete = 151},
227 [26] = { .submit = 27, .complete = 152},
228 [27] = { .submit = 28, .complete = 153},
229 [28] = { .submit = 29, .complete = 154},
230 [29] = { .submit = 30, .complete = 155},
231};
232
233struct cppi_glue_infos {
234 irqreturn_t (*isr)(int irq, void *data);
235 const struct chan_queues *queues_rx;
236 const struct chan_queues *queues_tx;
237 struct chan_queues td_queue;
238};
239
240static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
241{
242 return container_of(c, struct cppi41_channel, chan);
243}
244
245static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
246{
247 struct cppi41_channel *c;
248 u32 descs_size;
249 u32 desc_num;
250
251 descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
252
253 if (!((desc >= cdd->descs_phys) &&
254 (desc < (cdd->descs_phys + descs_size)))) {
255 return NULL;
256 }
257
258 desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
259 BUG_ON(desc_num >= ALLOC_DECS_NUM);
260 c = cdd->chan_busy[desc_num];
261 cdd->chan_busy[desc_num] = NULL;
262
263 /* Usecount for chan_busy[], paired with push_desc_queue() */
264 pm_runtime_put(cdd->ddev.dev);
265
266 return c;
267}
268
269static void cppi_writel(u32 val, void *__iomem *mem)
270{
271 __raw_writel(val, mem);
272}
273
274static u32 cppi_readl(void *__iomem *mem)
275{
276 return __raw_readl(mem);
277}
278
279static u32 pd_trans_len(u32 val)
280{
281 return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
282}
283
284static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
285{
286 u32 desc;
287
288 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
289 desc &= ~0x1f;
290 return desc;
291}
292
293static irqreturn_t cppi41_irq(int irq, void *data)
294{
295 struct cppi41_dd *cdd = data;
296 struct cppi41_channel *c;
297 u32 status;
298 int i;
299
300 status = cppi_readl(cdd->usbss_mem + USBSS_IRQ_STATUS);
301 if (!(status & USBSS_IRQ_PD_COMP))
302 return IRQ_NONE;
303 cppi_writel(status, cdd->usbss_mem + USBSS_IRQ_STATUS);
304
305 for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
306 i++) {
307 u32 val;
308 u32 q_num;
309
310 val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
311 if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
312 u32 mask;
313 /* set corresponding bit for completetion Q 93 */
314 mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
315 /* not set all bits for queues less than Q 93 */
316 mask--;
317 /* now invert and keep only Q 93+ set */
318 val &= ~mask;
319 }
320
321 if (val)
322 __iormb();
323
324 while (val) {
325 u32 desc, len;
326
327 /*
328 * This should never trigger, see the comments in
329 * push_desc_queue()
330 */
331 WARN_ON(cdd->is_suspended);
332
333 q_num = __fls(val);
334 val &= ~(1 << q_num);
335 q_num += 32 * i;
336 desc = cppi41_pop_desc(cdd, q_num);
337 c = desc_to_chan(cdd, desc);
338 if (WARN_ON(!c)) {
339 pr_err("%s() q %d desc %08x\n", __func__,
340 q_num, desc);
341 continue;
342 }
343
344 if (c->desc->pd2 & PD2_ZERO_LENGTH)
345 len = 0;
346 else
347 len = pd_trans_len(c->desc->pd0);
348
349 c->residue = pd_trans_len(c->desc->pd6) - len;
350 dma_cookie_complete(&c->txd);
351 dmaengine_desc_get_callback_invoke(&c->txd, NULL);
352 }
353 }
354 return IRQ_HANDLED;
355}
356
357static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
358{
359 dma_cookie_t cookie;
360
361 cookie = dma_cookie_assign(tx);
362
363 return cookie;
364}
365
366static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
367{
368 struct cppi41_channel *c = to_cpp41_chan(chan);
369 struct cppi41_dd *cdd = c->cdd;
370 int error;
371
372 error = pm_runtime_get_sync(cdd->ddev.dev);
373 if (error < 0) {
374 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
375 __func__, error);
376 pm_runtime_put_noidle(cdd->ddev.dev);
377
378 return error;
379 }
380
381 dma_cookie_init(chan);
382 dma_async_tx_descriptor_init(&c->txd, chan);
383 c->txd.tx_submit = cppi41_tx_submit;
384
385 if (!c->is_tx)
386 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
387
388 pm_runtime_mark_last_busy(cdd->ddev.dev);
389 pm_runtime_put_autosuspend(cdd->ddev.dev);
390
391 return 0;
392}
393
394static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
395{
396 struct cppi41_channel *c = to_cpp41_chan(chan);
397 struct cppi41_dd *cdd = c->cdd;
398 int error;
399
400 error = pm_runtime_get_sync(cdd->ddev.dev);
401 if (error < 0) {
402 pm_runtime_put_noidle(cdd->ddev.dev);
403
404 return;
405 }
406
407 WARN_ON(!list_empty(&cdd->pending));
408
409 pm_runtime_mark_last_busy(cdd->ddev.dev);
410 pm_runtime_put_autosuspend(cdd->ddev.dev);
411}
412
413static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
414 dma_cookie_t cookie, struct dma_tx_state *txstate)
415{
416 struct cppi41_channel *c = to_cpp41_chan(chan);
417 enum dma_status ret;
418
419 /* lock */
420 ret = dma_cookie_status(chan, cookie, txstate);
421 if (txstate && ret == DMA_COMPLETE)
422 txstate->residue = c->residue;
423 /* unlock */
424
425 return ret;
426}
427
428static void push_desc_queue(struct cppi41_channel *c)
429{
430 struct cppi41_dd *cdd = c->cdd;
431 u32 desc_num;
432 u32 desc_phys;
433 u32 reg;
434
435 c->residue = 0;
436
437 reg = GCR_CHAN_ENABLE;
438 if (!c->is_tx) {
439 reg |= GCR_STARV_RETRY;
440 reg |= GCR_DESC_TYPE_HOST;
441 reg |= c->q_comp_num;
442 }
443
444 cppi_writel(reg, c->gcr_reg);
445
446 /*
447 * We don't use writel() but __raw_writel() so we have to make sure
448 * that the DMA descriptor in coherent memory made to the main memory
449 * before starting the dma engine.
450 */
451 __iowmb();
452
453 /*
454 * DMA transfers can take at least 200ms to complete with USB mass
455 * storage connected. To prevent autosuspend timeouts, we must use
456 * pm_runtime_get/put() when chan_busy[] is modified. This will get
457 * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
458 * outcome of the transfer.
459 */
460 pm_runtime_get(cdd->ddev.dev);
461
462 desc_phys = lower_32_bits(c->desc_phys);
463 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
464 WARN_ON(cdd->chan_busy[desc_num]);
465 cdd->chan_busy[desc_num] = c;
466
467 reg = (sizeof(struct cppi41_desc) - 24) / 4;
468 reg |= desc_phys;
469 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
470}
471
472/*
473 * Caller must hold cdd->lock to prevent push_desc_queue()
474 * getting called out of order. We have both cppi41_dma_issue_pending()
475 * and cppi41_runtime_resume() call this function.
476 */
477static void cppi41_run_queue(struct cppi41_dd *cdd)
478{
479 struct cppi41_channel *c, *_c;
480
481 list_for_each_entry_safe(c, _c, &cdd->pending, node) {
482 push_desc_queue(c);
483 list_del(&c->node);
484 }
485}
486
487static void cppi41_dma_issue_pending(struct dma_chan *chan)
488{
489 struct cppi41_channel *c = to_cpp41_chan(chan);
490 struct cppi41_dd *cdd = c->cdd;
491 unsigned long flags;
492 int error;
493
494 error = pm_runtime_get(cdd->ddev.dev);
495 if ((error != -EINPROGRESS) && error < 0) {
496 pm_runtime_put_noidle(cdd->ddev.dev);
497 dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
498 error);
499
500 return;
501 }
502
503 spin_lock_irqsave(&cdd->lock, flags);
504 list_add_tail(&c->node, &cdd->pending);
505 if (!cdd->is_suspended)
506 cppi41_run_queue(cdd);
507 spin_unlock_irqrestore(&cdd->lock, flags);
508
509 pm_runtime_mark_last_busy(cdd->ddev.dev);
510 pm_runtime_put_autosuspend(cdd->ddev.dev);
511}
512
513static u32 get_host_pd0(u32 length)
514{
515 u32 reg;
516
517 reg = DESC_TYPE_HOST << DESC_TYPE;
518 reg |= length;
519
520 return reg;
521}
522
523static u32 get_host_pd1(struct cppi41_channel *c)
524{
525 u32 reg;
526
527 reg = 0;
528
529 return reg;
530}
531
532static u32 get_host_pd2(struct cppi41_channel *c)
533{
534 u32 reg;
535
536 reg = DESC_TYPE_USB;
537 reg |= c->q_comp_num;
538
539 return reg;
540}
541
542static u32 get_host_pd3(u32 length)
543{
544 u32 reg;
545
546 /* PD3 = packet size */
547 reg = length;
548
549 return reg;
550}
551
552static u32 get_host_pd6(u32 length)
553{
554 u32 reg;
555
556 /* PD6 buffer size */
557 reg = DESC_PD_COMPLETE;
558 reg |= length;
559
560 return reg;
561}
562
563static u32 get_host_pd4_or_7(u32 addr)
564{
565 u32 reg;
566
567 reg = addr;
568
569 return reg;
570}
571
572static u32 get_host_pd5(void)
573{
574 u32 reg;
575
576 reg = 0;
577
578 return reg;
579}
580
581static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
582 struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
583 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
584{
585 struct cppi41_channel *c = to_cpp41_chan(chan);
586 struct cppi41_desc *d;
587 struct scatterlist *sg;
588 unsigned int i;
589
590 d = c->desc;
591 for_each_sg(sgl, sg, sg_len, i) {
592 u32 addr;
593 u32 len;
594
595 /* We need to use more than one desc once musb supports sg */
596 addr = lower_32_bits(sg_dma_address(sg));
597 len = sg_dma_len(sg);
598
599 d->pd0 = get_host_pd0(len);
600 d->pd1 = get_host_pd1(c);
601 d->pd2 = get_host_pd2(c);
602 d->pd3 = get_host_pd3(len);
603 d->pd4 = get_host_pd4_or_7(addr);
604 d->pd5 = get_host_pd5();
605 d->pd6 = get_host_pd6(len);
606 d->pd7 = get_host_pd4_or_7(addr);
607
608 d++;
609 }
610
611 return &c->txd;
612}
613
614static void cppi41_compute_td_desc(struct cppi41_desc *d)
615{
616 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
617}
618
619static int cppi41_tear_down_chan(struct cppi41_channel *c)
620{
621 struct cppi41_dd *cdd = c->cdd;
622 struct cppi41_desc *td;
623 u32 reg;
624 u32 desc_phys;
625 u32 td_desc_phys;
626
627 td = cdd->cd;
628 td += cdd->first_td_desc;
629
630 td_desc_phys = cdd->descs_phys;
631 td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
632
633 if (!c->td_queued) {
634 cppi41_compute_td_desc(td);
635 __iowmb();
636
637 reg = (sizeof(struct cppi41_desc) - 24) / 4;
638 reg |= td_desc_phys;
639 cppi_writel(reg, cdd->qmgr_mem +
640 QMGR_QUEUE_D(cdd->td_queue.submit));
641
642 reg = GCR_CHAN_ENABLE;
643 if (!c->is_tx) {
644 reg |= GCR_STARV_RETRY;
645 reg |= GCR_DESC_TYPE_HOST;
646 reg |= c->q_comp_num;
647 }
648 reg |= GCR_TEARDOWN;
649 cppi_writel(reg, c->gcr_reg);
650 c->td_queued = 1;
651 c->td_retry = 500;
652 }
653
654 if (!c->td_seen || !c->td_desc_seen) {
655
656 desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
657 if (!desc_phys)
658 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
659
660 if (desc_phys == c->desc_phys) {
661 c->td_desc_seen = 1;
662
663 } else if (desc_phys == td_desc_phys) {
664 u32 pd0;
665
666 __iormb();
667 pd0 = td->pd0;
668 WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
669 WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
670 WARN_ON((pd0 & 0x1f) != c->port_num);
671 c->td_seen = 1;
672 } else if (desc_phys) {
673 WARN_ON_ONCE(1);
674 }
675 }
676 c->td_retry--;
677 /*
678 * If the TX descriptor / channel is in use, the caller needs to poke
679 * his TD bit multiple times. After that he hardware releases the
680 * transfer descriptor followed by TD descriptor. Waiting seems not to
681 * cause any difference.
682 * RX seems to be thrown out right away. However once the TearDown
683 * descriptor gets through we are done. If we have seens the transfer
684 * descriptor before the TD we fetch it from enqueue, it has to be
685 * there waiting for us.
686 */
687 if (!c->td_seen && c->td_retry) {
688 udelay(1);
689 return -EAGAIN;
690 }
691 WARN_ON(!c->td_retry);
692
693 if (!c->td_desc_seen) {
694 desc_phys = cppi41_pop_desc(cdd, c->q_num);
695 if (!desc_phys)
696 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
697 WARN_ON(!desc_phys);
698 }
699
700 c->td_queued = 0;
701 c->td_seen = 0;
702 c->td_desc_seen = 0;
703 cppi_writel(0, c->gcr_reg);
704 return 0;
705}
706
707static int cppi41_stop_chan(struct dma_chan *chan)
708{
709 struct cppi41_channel *c = to_cpp41_chan(chan);
710 struct cppi41_dd *cdd = c->cdd;
711 u32 desc_num;
712 u32 desc_phys;
713 int ret;
714
715 desc_phys = lower_32_bits(c->desc_phys);
716 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
717 if (!cdd->chan_busy[desc_num])
718 return 0;
719
720 ret = cppi41_tear_down_chan(c);
721 if (ret)
722 return ret;
723
724 WARN_ON(!cdd->chan_busy[desc_num]);
725 cdd->chan_busy[desc_num] = NULL;
726
727 /* Usecount for chan_busy[], paired with push_desc_queue() */
728 pm_runtime_put(cdd->ddev.dev);
729
730 return 0;
731}
732
733static void cleanup_chans(struct cppi41_dd *cdd)
734{
735 while (!list_empty(&cdd->ddev.channels)) {
736 struct cppi41_channel *cchan;
737
738 cchan = list_first_entry(&cdd->ddev.channels,
739 struct cppi41_channel, chan.device_node);
740 list_del(&cchan->chan.device_node);
741 kfree(cchan);
742 }
743}
744
745static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
746{
747 struct cppi41_channel *cchan;
748 int i;
749 int ret;
750 u32 n_chans;
751
752 ret = of_property_read_u32(dev->of_node, "#dma-channels",
753 &n_chans);
754 if (ret)
755 return ret;
756 /*
757 * The channels can only be used as TX or as RX. So we add twice
758 * that much dma channels because USB can only do RX or TX.
759 */
760 n_chans *= 2;
761
762 for (i = 0; i < n_chans; i++) {
763 cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
764 if (!cchan)
765 goto err;
766
767 cchan->cdd = cdd;
768 if (i & 1) {
769 cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
770 cchan->is_tx = 1;
771 } else {
772 cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
773 cchan->is_tx = 0;
774 }
775 cchan->port_num = i >> 1;
776 cchan->desc = &cdd->cd[i];
777 cchan->desc_phys = cdd->descs_phys;
778 cchan->desc_phys += i * sizeof(struct cppi41_desc);
779 cchan->chan.device = &cdd->ddev;
780 list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
781 }
782 cdd->first_td_desc = n_chans;
783
784 return 0;
785err:
786 cleanup_chans(cdd);
787 return -ENOMEM;
788}
789
790static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
791{
792 unsigned int mem_decs;
793 int i;
794
795 mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
796
797 for (i = 0; i < DESCS_AREAS; i++) {
798
799 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
800 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
801
802 dma_free_coherent(dev, mem_decs, cdd->cd,
803 cdd->descs_phys);
804 }
805}
806
807static void disable_sched(struct cppi41_dd *cdd)
808{
809 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
810}
811
812static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
813{
814 disable_sched(cdd);
815
816 purge_descs(dev, cdd);
817
818 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
819 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
820 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
821 cdd->scratch_phys);
822}
823
824static int init_descs(struct device *dev, struct cppi41_dd *cdd)
825{
826 unsigned int desc_size;
827 unsigned int mem_decs;
828 int i;
829 u32 reg;
830 u32 idx;
831
832 BUILD_BUG_ON(sizeof(struct cppi41_desc) &
833 (sizeof(struct cppi41_desc) - 1));
834 BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
835 BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
836
837 desc_size = sizeof(struct cppi41_desc);
838 mem_decs = ALLOC_DECS_NUM * desc_size;
839
840 idx = 0;
841 for (i = 0; i < DESCS_AREAS; i++) {
842
843 reg = idx << QMGR_MEMCTRL_IDX_SH;
844 reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
845 reg |= ilog2(ALLOC_DECS_NUM) - 5;
846
847 BUILD_BUG_ON(DESCS_AREAS != 1);
848 cdd->cd = dma_alloc_coherent(dev, mem_decs,
849 &cdd->descs_phys, GFP_KERNEL);
850 if (!cdd->cd)
851 return -ENOMEM;
852
853 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
854 cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
855
856 idx += ALLOC_DECS_NUM;
857 }
858 return 0;
859}
860
861static void init_sched(struct cppi41_dd *cdd)
862{
863 unsigned ch;
864 unsigned word;
865 u32 reg;
866
867 word = 0;
868 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
869 for (ch = 0; ch < 15 * 2; ch += 2) {
870
871 reg = SCHED_ENTRY0_CHAN(ch);
872 reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
873
874 reg |= SCHED_ENTRY2_CHAN(ch + 1);
875 reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
876 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
877 word++;
878 }
879 reg = 15 * 2 * 2 - 1;
880 reg |= DMA_SCHED_CTRL_EN;
881 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
882}
883
884static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
885{
886 int ret;
887
888 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
889 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
890 &cdd->scratch_phys, GFP_KERNEL);
891 if (!cdd->qmgr_scratch)
892 return -ENOMEM;
893
894 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
895 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
896 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
897
898 ret = init_descs(dev, cdd);
899 if (ret)
900 goto err_td;
901
902 cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
903 init_sched(cdd);
904 return 0;
905err_td:
906 deinit_cppi41(dev, cdd);
907 return ret;
908}
909
910static struct platform_driver cpp41_dma_driver;
911/*
912 * The param format is:
913 * X Y
914 * X: Port
915 * Y: 0 = RX else TX
916 */
917#define INFO_PORT 0
918#define INFO_IS_TX 1
919
920static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
921{
922 struct cppi41_channel *cchan;
923 struct cppi41_dd *cdd;
924 const struct chan_queues *queues;
925 u32 *num = param;
926
927 if (chan->device->dev->driver != &cpp41_dma_driver.driver)
928 return false;
929
930 cchan = to_cpp41_chan(chan);
931
932 if (cchan->port_num != num[INFO_PORT])
933 return false;
934
935 if (cchan->is_tx && !num[INFO_IS_TX])
936 return false;
937 cdd = cchan->cdd;
938 if (cchan->is_tx)
939 queues = cdd->queues_tx;
940 else
941 queues = cdd->queues_rx;
942
943 BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
944 if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
945 return false;
946
947 cchan->q_num = queues[cchan->port_num].submit;
948 cchan->q_comp_num = queues[cchan->port_num].complete;
949 return true;
950}
951
952static struct of_dma_filter_info cpp41_dma_info = {
953 .filter_fn = cpp41_dma_filter_fn,
954};
955
956static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
957 struct of_dma *ofdma)
958{
959 int count = dma_spec->args_count;
960 struct of_dma_filter_info *info = ofdma->of_dma_data;
961
962 if (!info || !info->filter_fn)
963 return NULL;
964
965 if (count != 2)
966 return NULL;
967
968 return dma_request_channel(info->dma_cap, info->filter_fn,
969 &dma_spec->args[0]);
970}
971
972static const struct cppi_glue_infos usb_infos = {
973 .isr = cppi41_irq,
974 .queues_rx = usb_queues_rx,
975 .queues_tx = usb_queues_tx,
976 .td_queue = { .submit = 31, .complete = 0 },
977};
978
979static const struct of_device_id cppi41_dma_ids[] = {
980 { .compatible = "ti,am3359-cppi41", .data = &usb_infos},
981 {},
982};
983MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
984
985static const struct cppi_glue_infos *get_glue_info(struct device *dev)
986{
987 const struct of_device_id *of_id;
988
989 of_id = of_match_node(cppi41_dma_ids, dev->of_node);
990 if (!of_id)
991 return NULL;
992 return of_id->data;
993}
994
995#define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
996 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
997 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
998 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
999
1000static int cppi41_dma_probe(struct platform_device *pdev)
1001{
1002 struct cppi41_dd *cdd;
1003 struct device *dev = &pdev->dev;
1004 const struct cppi_glue_infos *glue_info;
1005 int irq;
1006 int ret;
1007
1008 glue_info = get_glue_info(dev);
1009 if (!glue_info)
1010 return -EINVAL;
1011
1012 cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
1013 if (!cdd)
1014 return -ENOMEM;
1015
1016 dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
1017 cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
1018 cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
1019 cdd->ddev.device_tx_status = cppi41_dma_tx_status;
1020 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
1021 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
1022 cdd->ddev.device_terminate_all = cppi41_stop_chan;
1023 cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1024 cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
1025 cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
1026 cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1027 cdd->ddev.dev = dev;
1028 INIT_LIST_HEAD(&cdd->ddev.channels);
1029 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
1030
1031 cdd->usbss_mem = of_iomap(dev->of_node, 0);
1032 cdd->ctrl_mem = of_iomap(dev->of_node, 1);
1033 cdd->sched_mem = of_iomap(dev->of_node, 2);
1034 cdd->qmgr_mem = of_iomap(dev->of_node, 3);
1035 spin_lock_init(&cdd->lock);
1036 INIT_LIST_HEAD(&cdd->pending);
1037
1038 platform_set_drvdata(pdev, cdd);
1039
1040 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
1041 !cdd->qmgr_mem)
1042 return -ENXIO;
1043
1044 pm_runtime_enable(dev);
1045 pm_runtime_set_autosuspend_delay(dev, 100);
1046 pm_runtime_use_autosuspend(dev);
1047 ret = pm_runtime_get_sync(dev);
1048 if (ret < 0)
1049 goto err_get_sync;
1050
1051 cdd->queues_rx = glue_info->queues_rx;
1052 cdd->queues_tx = glue_info->queues_tx;
1053 cdd->td_queue = glue_info->td_queue;
1054
1055 ret = init_cppi41(dev, cdd);
1056 if (ret)
1057 goto err_init_cppi;
1058
1059 ret = cppi41_add_chans(dev, cdd);
1060 if (ret)
1061 goto err_chans;
1062
1063 irq = irq_of_parse_and_map(dev->of_node, 0);
1064 if (!irq) {
1065 ret = -EINVAL;
1066 goto err_irq;
1067 }
1068
1069 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
1070
1071 ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED,
1072 dev_name(dev), cdd);
1073 if (ret)
1074 goto err_irq;
1075 cdd->irq = irq;
1076
1077 ret = dma_async_device_register(&cdd->ddev);
1078 if (ret)
1079 goto err_dma_reg;
1080
1081 ret = of_dma_controller_register(dev->of_node,
1082 cppi41_dma_xlate, &cpp41_dma_info);
1083 if (ret)
1084 goto err_of;
1085
1086 pm_runtime_mark_last_busy(dev);
1087 pm_runtime_put_autosuspend(dev);
1088
1089 return 0;
1090err_of:
1091 dma_async_device_unregister(&cdd->ddev);
1092err_dma_reg:
1093err_irq:
1094 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1095 cleanup_chans(cdd);
1096err_chans:
1097 deinit_cppi41(dev, cdd);
1098err_init_cppi:
1099 pm_runtime_dont_use_autosuspend(dev);
1100err_get_sync:
1101 pm_runtime_put_sync(dev);
1102 pm_runtime_disable(dev);
1103 iounmap(cdd->usbss_mem);
1104 iounmap(cdd->ctrl_mem);
1105 iounmap(cdd->sched_mem);
1106 iounmap(cdd->qmgr_mem);
1107 return ret;
1108}
1109
1110static int cppi41_dma_remove(struct platform_device *pdev)
1111{
1112 struct cppi41_dd *cdd = platform_get_drvdata(pdev);
1113 int error;
1114
1115 error = pm_runtime_get_sync(&pdev->dev);
1116 if (error < 0)
1117 dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
1118 __func__, error);
1119 of_dma_controller_free(pdev->dev.of_node);
1120 dma_async_device_unregister(&cdd->ddev);
1121
1122 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1123 devm_free_irq(&pdev->dev, cdd->irq, cdd);
1124 cleanup_chans(cdd);
1125 deinit_cppi41(&pdev->dev, cdd);
1126 iounmap(cdd->usbss_mem);
1127 iounmap(cdd->ctrl_mem);
1128 iounmap(cdd->sched_mem);
1129 iounmap(cdd->qmgr_mem);
1130 pm_runtime_dont_use_autosuspend(&pdev->dev);
1131 pm_runtime_put_sync(&pdev->dev);
1132 pm_runtime_disable(&pdev->dev);
1133 return 0;
1134}
1135
1136static int __maybe_unused cppi41_suspend(struct device *dev)
1137{
1138 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1139
1140 cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
1141 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1142 disable_sched(cdd);
1143
1144 return 0;
1145}
1146
1147static int __maybe_unused cppi41_resume(struct device *dev)
1148{
1149 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1150 struct cppi41_channel *c;
1151 int i;
1152
1153 for (i = 0; i < DESCS_AREAS; i++)
1154 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
1155
1156 list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
1157 if (!c->is_tx)
1158 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
1159
1160 init_sched(cdd);
1161
1162 cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
1163 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
1164 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
1165 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
1166
1167 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
1168
1169 return 0;
1170}
1171
1172static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1173{
1174 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1175 unsigned long flags;
1176
1177 spin_lock_irqsave(&cdd->lock, flags);
1178 cdd->is_suspended = true;
1179 WARN_ON(!list_empty(&cdd->pending));
1180 spin_unlock_irqrestore(&cdd->lock, flags);
1181
1182 return 0;
1183}
1184
1185static int __maybe_unused cppi41_runtime_resume(struct device *dev)
1186{
1187 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1188 unsigned long flags;
1189
1190 spin_lock_irqsave(&cdd->lock, flags);
1191 cdd->is_suspended = false;
1192 cppi41_run_queue(cdd);
1193 spin_unlock_irqrestore(&cdd->lock, flags);
1194
1195 return 0;
1196}
1197
1198static const struct dev_pm_ops cppi41_pm_ops = {
1199 SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
1200 SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
1201 cppi41_runtime_resume,
1202 NULL)
1203};
1204
1205static struct platform_driver cpp41_dma_driver = {
1206 .probe = cppi41_dma_probe,
1207 .remove = cppi41_dma_remove,
1208 .driver = {
1209 .name = "cppi41-dma-engine",
1210 .pm = &cppi41_pm_ops,
1211 .of_match_table = of_match_ptr(cppi41_dma_ids),
1212 },
1213};
1214
1215module_platform_driver(cpp41_dma_driver);
1216MODULE_LICENSE("GPL");
1217MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");