Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2013 - 2015 Linaro Ltd.
4 * Copyright (c) 2013 Hisilicon Limited.
5 */
6#include <linux/sched.h>
7#include <linux/device.h>
8#include <linux/dma-mapping.h>
9#include <linux/dmapool.h>
10#include <linux/dmaengine.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/of_device.h>
19#include <linux/of.h>
20#include <linux/clk.h>
21#include <linux/of_dma.h>
22
23#include "virt-dma.h"
24
25#define DRIVER_NAME "k3-dma"
26#define DMA_MAX_SIZE 0x1ffc
27#define DMA_CYCLIC_MAX_PERIOD 0x1000
28#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
29
30#define INT_STAT 0x00
31#define INT_TC1 0x04
32#define INT_TC2 0x08
33#define INT_ERR1 0x0c
34#define INT_ERR2 0x10
35#define INT_TC1_MASK 0x18
36#define INT_TC2_MASK 0x1c
37#define INT_ERR1_MASK 0x20
38#define INT_ERR2_MASK 0x24
39#define INT_TC1_RAW 0x600
40#define INT_TC2_RAW 0x608
41#define INT_ERR1_RAW 0x610
42#define INT_ERR2_RAW 0x618
43#define CH_PRI 0x688
44#define CH_STAT 0x690
45#define CX_CUR_CNT 0x704
46#define CX_LLI 0x800
47#define CX_CNT1 0x80c
48#define CX_CNT0 0x810
49#define CX_SRC 0x814
50#define CX_DST 0x818
51#define CX_CFG 0x81c
52
53#define CX_LLI_CHAIN_EN 0x2
54#define CX_CFG_EN 0x1
55#define CX_CFG_NODEIRQ BIT(1)
56#define CX_CFG_MEM2PER (0x1 << 2)
57#define CX_CFG_PER2MEM (0x2 << 2)
58#define CX_CFG_SRCINCR (0x1 << 31)
59#define CX_CFG_DSTINCR (0x1 << 30)
60
61struct k3_desc_hw {
62 u32 lli;
63 u32 reserved[3];
64 u32 count;
65 u32 saddr;
66 u32 daddr;
67 u32 config;
68} __aligned(32);
69
70struct k3_dma_desc_sw {
71 struct virt_dma_desc vd;
72 dma_addr_t desc_hw_lli;
73 size_t desc_num;
74 size_t size;
75 struct k3_desc_hw *desc_hw;
76};
77
78struct k3_dma_phy;
79
80struct k3_dma_chan {
81 u32 ccfg;
82 struct virt_dma_chan vc;
83 struct k3_dma_phy *phy;
84 struct list_head node;
85 dma_addr_t dev_addr;
86 enum dma_status status;
87 bool cyclic;
88 struct dma_slave_config slave_config;
89};
90
91struct k3_dma_phy {
92 u32 idx;
93 void __iomem *base;
94 struct k3_dma_chan *vchan;
95 struct k3_dma_desc_sw *ds_run;
96 struct k3_dma_desc_sw *ds_done;
97};
98
99struct k3_dma_dev {
100 struct dma_device slave;
101 void __iomem *base;
102 struct tasklet_struct task;
103 spinlock_t lock;
104 struct list_head chan_pending;
105 struct k3_dma_phy *phy;
106 struct k3_dma_chan *chans;
107 struct clk *clk;
108 struct dma_pool *pool;
109 u32 dma_channels;
110 u32 dma_requests;
111 u32 dma_channel_mask;
112 unsigned int irq;
113};
114
115
116#define K3_FLAG_NOCLK BIT(1)
117
118struct k3dma_soc_data {
119 unsigned long flags;
120};
121
122
123#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
124
125static int k3_dma_config_write(struct dma_chan *chan,
126 enum dma_transfer_direction dir,
127 struct dma_slave_config *cfg);
128
129static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
130{
131 return container_of(chan, struct k3_dma_chan, vc.chan);
132}
133
134static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
135{
136 u32 val = 0;
137
138 if (on) {
139 val = readl_relaxed(phy->base + CX_CFG);
140 val |= CX_CFG_EN;
141 writel_relaxed(val, phy->base + CX_CFG);
142 } else {
143 val = readl_relaxed(phy->base + CX_CFG);
144 val &= ~CX_CFG_EN;
145 writel_relaxed(val, phy->base + CX_CFG);
146 }
147}
148
149static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
150{
151 u32 val = 0;
152
153 k3_dma_pause_dma(phy, false);
154
155 val = 0x1 << phy->idx;
156 writel_relaxed(val, d->base + INT_TC1_RAW);
157 writel_relaxed(val, d->base + INT_TC2_RAW);
158 writel_relaxed(val, d->base + INT_ERR1_RAW);
159 writel_relaxed(val, d->base + INT_ERR2_RAW);
160}
161
162static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
163{
164 writel_relaxed(hw->lli, phy->base + CX_LLI);
165 writel_relaxed(hw->count, phy->base + CX_CNT0);
166 writel_relaxed(hw->saddr, phy->base + CX_SRC);
167 writel_relaxed(hw->daddr, phy->base + CX_DST);
168 writel_relaxed(hw->config, phy->base + CX_CFG);
169}
170
171static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
172{
173 u32 cnt = 0;
174
175 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
176 cnt &= 0xffff;
177 return cnt;
178}
179
180static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
181{
182 return readl_relaxed(phy->base + CX_LLI);
183}
184
185static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
186{
187 return readl_relaxed(d->base + CH_STAT);
188}
189
190static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
191{
192 if (on) {
193 /* set same priority */
194 writel_relaxed(0x0, d->base + CH_PRI);
195
196 /* unmask irq */
197 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
198 writel_relaxed(0xffff, d->base + INT_TC2_MASK);
199 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
200 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
201 } else {
202 /* mask irq */
203 writel_relaxed(0x0, d->base + INT_TC1_MASK);
204 writel_relaxed(0x0, d->base + INT_TC2_MASK);
205 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
206 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
207 }
208}
209
210static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
211{
212 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
213 struct k3_dma_phy *p;
214 struct k3_dma_chan *c;
215 u32 stat = readl_relaxed(d->base + INT_STAT);
216 u32 tc1 = readl_relaxed(d->base + INT_TC1);
217 u32 tc2 = readl_relaxed(d->base + INT_TC2);
218 u32 err1 = readl_relaxed(d->base + INT_ERR1);
219 u32 err2 = readl_relaxed(d->base + INT_ERR2);
220 u32 i, irq_chan = 0;
221
222 while (stat) {
223 i = __ffs(stat);
224 stat &= ~BIT(i);
225 if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
226 unsigned long flags;
227
228 p = &d->phy[i];
229 c = p->vchan;
230 if (c && (tc1 & BIT(i))) {
231 spin_lock_irqsave(&c->vc.lock, flags);
232 if (p->ds_run != NULL) {
233 vchan_cookie_complete(&p->ds_run->vd);
234 p->ds_done = p->ds_run;
235 p->ds_run = NULL;
236 }
237 spin_unlock_irqrestore(&c->vc.lock, flags);
238 }
239 if (c && (tc2 & BIT(i))) {
240 spin_lock_irqsave(&c->vc.lock, flags);
241 if (p->ds_run != NULL)
242 vchan_cyclic_callback(&p->ds_run->vd);
243 spin_unlock_irqrestore(&c->vc.lock, flags);
244 }
245 irq_chan |= BIT(i);
246 }
247 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
248 dev_warn(d->slave.dev, "DMA ERR\n");
249 }
250
251 writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
252 writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
253 writel_relaxed(err1, d->base + INT_ERR1_RAW);
254 writel_relaxed(err2, d->base + INT_ERR2_RAW);
255
256 if (irq_chan)
257 tasklet_schedule(&d->task);
258
259 if (irq_chan || err1 || err2)
260 return IRQ_HANDLED;
261
262 return IRQ_NONE;
263}
264
265static int k3_dma_start_txd(struct k3_dma_chan *c)
266{
267 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
268 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
269
270 if (!c->phy)
271 return -EAGAIN;
272
273 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
274 return -EAGAIN;
275
276 /* Avoid losing track of ds_run if a transaction is in flight */
277 if (c->phy->ds_run)
278 return -EAGAIN;
279
280 if (vd) {
281 struct k3_dma_desc_sw *ds =
282 container_of(vd, struct k3_dma_desc_sw, vd);
283 /*
284 * fetch and remove request from vc->desc_issued
285 * so vc->desc_issued only contains desc pending
286 */
287 list_del(&ds->vd.node);
288
289 c->phy->ds_run = ds;
290 c->phy->ds_done = NULL;
291 /* start dma */
292 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
293 return 0;
294 }
295 c->phy->ds_run = NULL;
296 c->phy->ds_done = NULL;
297 return -EAGAIN;
298}
299
300static void k3_dma_tasklet(unsigned long arg)
301{
302 struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
303 struct k3_dma_phy *p;
304 struct k3_dma_chan *c, *cn;
305 unsigned pch, pch_alloc = 0;
306
307 /* check new dma request of running channel in vc->desc_issued */
308 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
309 spin_lock_irq(&c->vc.lock);
310 p = c->phy;
311 if (p && p->ds_done) {
312 if (k3_dma_start_txd(c)) {
313 /* No current txd associated with this channel */
314 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
315 /* Mark this channel free */
316 c->phy = NULL;
317 p->vchan = NULL;
318 }
319 }
320 spin_unlock_irq(&c->vc.lock);
321 }
322
323 /* check new channel request in d->chan_pending */
324 spin_lock_irq(&d->lock);
325 for (pch = 0; pch < d->dma_channels; pch++) {
326 if (!(d->dma_channel_mask & (1 << pch)))
327 continue;
328
329 p = &d->phy[pch];
330
331 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
332 c = list_first_entry(&d->chan_pending,
333 struct k3_dma_chan, node);
334 /* remove from d->chan_pending */
335 list_del_init(&c->node);
336 pch_alloc |= 1 << pch;
337 /* Mark this channel allocated */
338 p->vchan = c;
339 c->phy = p;
340 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
341 }
342 }
343 spin_unlock_irq(&d->lock);
344
345 for (pch = 0; pch < d->dma_channels; pch++) {
346 if (!(d->dma_channel_mask & (1 << pch)))
347 continue;
348
349 if (pch_alloc & (1 << pch)) {
350 p = &d->phy[pch];
351 c = p->vchan;
352 if (c) {
353 spin_lock_irq(&c->vc.lock);
354 k3_dma_start_txd(c);
355 spin_unlock_irq(&c->vc.lock);
356 }
357 }
358 }
359}
360
361static void k3_dma_free_chan_resources(struct dma_chan *chan)
362{
363 struct k3_dma_chan *c = to_k3_chan(chan);
364 struct k3_dma_dev *d = to_k3_dma(chan->device);
365 unsigned long flags;
366
367 spin_lock_irqsave(&d->lock, flags);
368 list_del_init(&c->node);
369 spin_unlock_irqrestore(&d->lock, flags);
370
371 vchan_free_chan_resources(&c->vc);
372 c->ccfg = 0;
373}
374
375static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
376 dma_cookie_t cookie, struct dma_tx_state *state)
377{
378 struct k3_dma_chan *c = to_k3_chan(chan);
379 struct k3_dma_dev *d = to_k3_dma(chan->device);
380 struct k3_dma_phy *p;
381 struct virt_dma_desc *vd;
382 unsigned long flags;
383 enum dma_status ret;
384 size_t bytes = 0;
385
386 ret = dma_cookie_status(&c->vc.chan, cookie, state);
387 if (ret == DMA_COMPLETE)
388 return ret;
389
390 spin_lock_irqsave(&c->vc.lock, flags);
391 p = c->phy;
392 ret = c->status;
393
394 /*
395 * If the cookie is on our issue queue, then the residue is
396 * its total size.
397 */
398 vd = vchan_find_desc(&c->vc, cookie);
399 if (vd && !c->cyclic) {
400 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
401 } else if ((!p) || (!p->ds_run)) {
402 bytes = 0;
403 } else {
404 struct k3_dma_desc_sw *ds = p->ds_run;
405 u32 clli = 0, index = 0;
406
407 bytes = k3_dma_get_curr_cnt(d, p);
408 clli = k3_dma_get_curr_lli(p);
409 index = ((clli - ds->desc_hw_lli) /
410 sizeof(struct k3_desc_hw)) + 1;
411 for (; index < ds->desc_num; index++) {
412 bytes += ds->desc_hw[index].count;
413 /* end of lli */
414 if (!ds->desc_hw[index].lli)
415 break;
416 }
417 }
418 spin_unlock_irqrestore(&c->vc.lock, flags);
419 dma_set_residue(state, bytes);
420 return ret;
421}
422
423static void k3_dma_issue_pending(struct dma_chan *chan)
424{
425 struct k3_dma_chan *c = to_k3_chan(chan);
426 struct k3_dma_dev *d = to_k3_dma(chan->device);
427 unsigned long flags;
428
429 spin_lock_irqsave(&c->vc.lock, flags);
430 /* add request to vc->desc_issued */
431 if (vchan_issue_pending(&c->vc)) {
432 spin_lock(&d->lock);
433 if (!c->phy) {
434 if (list_empty(&c->node)) {
435 /* if new channel, add chan_pending */
436 list_add_tail(&c->node, &d->chan_pending);
437 /* check in tasklet */
438 tasklet_schedule(&d->task);
439 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
440 }
441 }
442 spin_unlock(&d->lock);
443 } else
444 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
445 spin_unlock_irqrestore(&c->vc.lock, flags);
446}
447
448static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
449 dma_addr_t src, size_t len, u32 num, u32 ccfg)
450{
451 if (num != ds->desc_num - 1)
452 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
453 sizeof(struct k3_desc_hw);
454
455 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
456 ds->desc_hw[num].count = len;
457 ds->desc_hw[num].saddr = src;
458 ds->desc_hw[num].daddr = dst;
459 ds->desc_hw[num].config = ccfg;
460}
461
462static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
463 struct dma_chan *chan)
464{
465 struct k3_dma_chan *c = to_k3_chan(chan);
466 struct k3_dma_desc_sw *ds;
467 struct k3_dma_dev *d = to_k3_dma(chan->device);
468 int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
469
470 if (num > lli_limit) {
471 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
472 &c->vc, num, lli_limit);
473 return NULL;
474 }
475
476 ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
477 if (!ds)
478 return NULL;
479
480 ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
481 if (!ds->desc_hw) {
482 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
483 kfree(ds);
484 return NULL;
485 }
486 ds->desc_num = num;
487 return ds;
488}
489
490static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
491 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
492 size_t len, unsigned long flags)
493{
494 struct k3_dma_chan *c = to_k3_chan(chan);
495 struct k3_dma_desc_sw *ds;
496 size_t copy = 0;
497 int num = 0;
498
499 if (!len)
500 return NULL;
501
502 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
503
504 ds = k3_dma_alloc_desc_resource(num, chan);
505 if (!ds)
506 return NULL;
507
508 c->cyclic = 0;
509 ds->size = len;
510 num = 0;
511
512 if (!c->ccfg) {
513 /* default is memtomem, without calling device_config */
514 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
515 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
516 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
517 }
518
519 do {
520 copy = min_t(size_t, len, DMA_MAX_SIZE);
521 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
522
523 src += copy;
524 dst += copy;
525 len -= copy;
526 } while (len);
527
528 ds->desc_hw[num-1].lli = 0; /* end of link */
529 return vchan_tx_prep(&c->vc, &ds->vd, flags);
530}
531
532static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
533 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
534 enum dma_transfer_direction dir, unsigned long flags, void *context)
535{
536 struct k3_dma_chan *c = to_k3_chan(chan);
537 struct k3_dma_desc_sw *ds;
538 size_t len, avail, total = 0;
539 struct scatterlist *sg;
540 dma_addr_t addr, src = 0, dst = 0;
541 int num = sglen, i;
542
543 if (sgl == NULL)
544 return NULL;
545
546 c->cyclic = 0;
547
548 for_each_sg(sgl, sg, sglen, i) {
549 avail = sg_dma_len(sg);
550 if (avail > DMA_MAX_SIZE)
551 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
552 }
553
554 ds = k3_dma_alloc_desc_resource(num, chan);
555 if (!ds)
556 return NULL;
557 num = 0;
558 k3_dma_config_write(chan, dir, &c->slave_config);
559
560 for_each_sg(sgl, sg, sglen, i) {
561 addr = sg_dma_address(sg);
562 avail = sg_dma_len(sg);
563 total += avail;
564
565 do {
566 len = min_t(size_t, avail, DMA_MAX_SIZE);
567
568 if (dir == DMA_MEM_TO_DEV) {
569 src = addr;
570 dst = c->dev_addr;
571 } else if (dir == DMA_DEV_TO_MEM) {
572 src = c->dev_addr;
573 dst = addr;
574 }
575
576 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
577
578 addr += len;
579 avail -= len;
580 } while (avail);
581 }
582
583 ds->desc_hw[num-1].lli = 0; /* end of link */
584 ds->size = total;
585 return vchan_tx_prep(&c->vc, &ds->vd, flags);
586}
587
588static struct dma_async_tx_descriptor *
589k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
590 size_t buf_len, size_t period_len,
591 enum dma_transfer_direction dir,
592 unsigned long flags)
593{
594 struct k3_dma_chan *c = to_k3_chan(chan);
595 struct k3_dma_desc_sw *ds;
596 size_t len, avail, total = 0;
597 dma_addr_t addr, src = 0, dst = 0;
598 int num = 1, since = 0;
599 size_t modulo = DMA_CYCLIC_MAX_PERIOD;
600 u32 en_tc2 = 0;
601
602 dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
603 __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
604 buf_len, period_len, (int)dir);
605
606 avail = buf_len;
607 if (avail > modulo)
608 num += DIV_ROUND_UP(avail, modulo) - 1;
609
610 ds = k3_dma_alloc_desc_resource(num, chan);
611 if (!ds)
612 return NULL;
613
614 c->cyclic = 1;
615 addr = buf_addr;
616 avail = buf_len;
617 total = avail;
618 num = 0;
619 k3_dma_config_write(chan, dir, &c->slave_config);
620
621 if (period_len < modulo)
622 modulo = period_len;
623
624 do {
625 len = min_t(size_t, avail, modulo);
626
627 if (dir == DMA_MEM_TO_DEV) {
628 src = addr;
629 dst = c->dev_addr;
630 } else if (dir == DMA_DEV_TO_MEM) {
631 src = c->dev_addr;
632 dst = addr;
633 }
634 since += len;
635 if (since >= period_len) {
636 /* descriptor asks for TC2 interrupt on completion */
637 en_tc2 = CX_CFG_NODEIRQ;
638 since -= period_len;
639 } else
640 en_tc2 = 0;
641
642 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
643
644 addr += len;
645 avail -= len;
646 } while (avail);
647
648 /* "Cyclic" == end of link points back to start of link */
649 ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
650
651 ds->size = total;
652
653 return vchan_tx_prep(&c->vc, &ds->vd, flags);
654}
655
656static int k3_dma_config(struct dma_chan *chan,
657 struct dma_slave_config *cfg)
658{
659 struct k3_dma_chan *c = to_k3_chan(chan);
660
661 memcpy(&c->slave_config, cfg, sizeof(*cfg));
662
663 return 0;
664}
665
666static int k3_dma_config_write(struct dma_chan *chan,
667 enum dma_transfer_direction dir,
668 struct dma_slave_config *cfg)
669{
670 struct k3_dma_chan *c = to_k3_chan(chan);
671 u32 maxburst = 0, val = 0;
672 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
673
674 if (dir == DMA_DEV_TO_MEM) {
675 c->ccfg = CX_CFG_DSTINCR;
676 c->dev_addr = cfg->src_addr;
677 maxburst = cfg->src_maxburst;
678 width = cfg->src_addr_width;
679 } else if (dir == DMA_MEM_TO_DEV) {
680 c->ccfg = CX_CFG_SRCINCR;
681 c->dev_addr = cfg->dst_addr;
682 maxburst = cfg->dst_maxburst;
683 width = cfg->dst_addr_width;
684 }
685 switch (width) {
686 case DMA_SLAVE_BUSWIDTH_1_BYTE:
687 case DMA_SLAVE_BUSWIDTH_2_BYTES:
688 case DMA_SLAVE_BUSWIDTH_4_BYTES:
689 case DMA_SLAVE_BUSWIDTH_8_BYTES:
690 val = __ffs(width);
691 break;
692 default:
693 val = 3;
694 break;
695 }
696 c->ccfg |= (val << 12) | (val << 16);
697
698 if ((maxburst == 0) || (maxburst > 16))
699 val = 15;
700 else
701 val = maxburst - 1;
702 c->ccfg |= (val << 20) | (val << 24);
703 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
704
705 /* specific request line */
706 c->ccfg |= c->vc.chan.chan_id << 4;
707
708 return 0;
709}
710
711static void k3_dma_free_desc(struct virt_dma_desc *vd)
712{
713 struct k3_dma_desc_sw *ds =
714 container_of(vd, struct k3_dma_desc_sw, vd);
715 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
716
717 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
718 kfree(ds);
719}
720
721static int k3_dma_terminate_all(struct dma_chan *chan)
722{
723 struct k3_dma_chan *c = to_k3_chan(chan);
724 struct k3_dma_dev *d = to_k3_dma(chan->device);
725 struct k3_dma_phy *p = c->phy;
726 unsigned long flags;
727 LIST_HEAD(head);
728
729 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
730
731 /* Prevent this channel being scheduled */
732 spin_lock(&d->lock);
733 list_del_init(&c->node);
734 spin_unlock(&d->lock);
735
736 /* Clear the tx descriptor lists */
737 spin_lock_irqsave(&c->vc.lock, flags);
738 vchan_get_all_descriptors(&c->vc, &head);
739 if (p) {
740 /* vchan is assigned to a pchan - stop the channel */
741 k3_dma_terminate_chan(p, d);
742 c->phy = NULL;
743 p->vchan = NULL;
744 if (p->ds_run) {
745 vchan_terminate_vdesc(&p->ds_run->vd);
746 p->ds_run = NULL;
747 }
748 p->ds_done = NULL;
749 }
750 spin_unlock_irqrestore(&c->vc.lock, flags);
751 vchan_dma_desc_free_list(&c->vc, &head);
752
753 return 0;
754}
755
756static void k3_dma_synchronize(struct dma_chan *chan)
757{
758 struct k3_dma_chan *c = to_k3_chan(chan);
759
760 vchan_synchronize(&c->vc);
761}
762
763static int k3_dma_transfer_pause(struct dma_chan *chan)
764{
765 struct k3_dma_chan *c = to_k3_chan(chan);
766 struct k3_dma_dev *d = to_k3_dma(chan->device);
767 struct k3_dma_phy *p = c->phy;
768
769 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
770 if (c->status == DMA_IN_PROGRESS) {
771 c->status = DMA_PAUSED;
772 if (p) {
773 k3_dma_pause_dma(p, false);
774 } else {
775 spin_lock(&d->lock);
776 list_del_init(&c->node);
777 spin_unlock(&d->lock);
778 }
779 }
780
781 return 0;
782}
783
784static int k3_dma_transfer_resume(struct dma_chan *chan)
785{
786 struct k3_dma_chan *c = to_k3_chan(chan);
787 struct k3_dma_dev *d = to_k3_dma(chan->device);
788 struct k3_dma_phy *p = c->phy;
789 unsigned long flags;
790
791 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
792 spin_lock_irqsave(&c->vc.lock, flags);
793 if (c->status == DMA_PAUSED) {
794 c->status = DMA_IN_PROGRESS;
795 if (p) {
796 k3_dma_pause_dma(p, true);
797 } else if (!list_empty(&c->vc.desc_issued)) {
798 spin_lock(&d->lock);
799 list_add_tail(&c->node, &d->chan_pending);
800 spin_unlock(&d->lock);
801 }
802 }
803 spin_unlock_irqrestore(&c->vc.lock, flags);
804
805 return 0;
806}
807
808static const struct k3dma_soc_data k3_v1_dma_data = {
809 .flags = 0,
810};
811
812static const struct k3dma_soc_data asp_v1_dma_data = {
813 .flags = K3_FLAG_NOCLK,
814};
815
816static const struct of_device_id k3_pdma_dt_ids[] = {
817 { .compatible = "hisilicon,k3-dma-1.0",
818 .data = &k3_v1_dma_data
819 },
820 { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
821 .data = &asp_v1_dma_data
822 },
823 {}
824};
825MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
826
827static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
828 struct of_dma *ofdma)
829{
830 struct k3_dma_dev *d = ofdma->of_dma_data;
831 unsigned int request = dma_spec->args[0];
832
833 if (request >= d->dma_requests)
834 return NULL;
835
836 return dma_get_slave_channel(&(d->chans[request].vc.chan));
837}
838
839static int k3_dma_probe(struct platform_device *op)
840{
841 const struct k3dma_soc_data *soc_data;
842 struct k3_dma_dev *d;
843 const struct of_device_id *of_id;
844 int i, ret, irq = 0;
845
846 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
847 if (!d)
848 return -ENOMEM;
849
850 soc_data = device_get_match_data(&op->dev);
851 if (!soc_data)
852 return -EINVAL;
853
854 d->base = devm_platform_ioremap_resource(op, 0);
855 if (IS_ERR(d->base))
856 return PTR_ERR(d->base);
857
858 of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
859 if (of_id) {
860 of_property_read_u32((&op->dev)->of_node,
861 "dma-channels", &d->dma_channels);
862 of_property_read_u32((&op->dev)->of_node,
863 "dma-requests", &d->dma_requests);
864 ret = of_property_read_u32((&op->dev)->of_node,
865 "dma-channel-mask", &d->dma_channel_mask);
866 if (ret) {
867 dev_warn(&op->dev,
868 "dma-channel-mask doesn't exist, considering all as available.\n");
869 d->dma_channel_mask = (u32)~0UL;
870 }
871 }
872
873 if (!(soc_data->flags & K3_FLAG_NOCLK)) {
874 d->clk = devm_clk_get(&op->dev, NULL);
875 if (IS_ERR(d->clk)) {
876 dev_err(&op->dev, "no dma clk\n");
877 return PTR_ERR(d->clk);
878 }
879 }
880
881 irq = platform_get_irq(op, 0);
882 ret = devm_request_irq(&op->dev, irq,
883 k3_dma_int_handler, 0, DRIVER_NAME, d);
884 if (ret)
885 return ret;
886
887 d->irq = irq;
888
889 /* A DMA memory pool for LLIs, align on 32-byte boundary */
890 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
891 LLI_BLOCK_SIZE, 32, 0);
892 if (!d->pool)
893 return -ENOMEM;
894
895 /* init phy channel */
896 d->phy = devm_kcalloc(&op->dev,
897 d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL);
898 if (d->phy == NULL)
899 return -ENOMEM;
900
901 for (i = 0; i < d->dma_channels; i++) {
902 struct k3_dma_phy *p;
903
904 if (!(d->dma_channel_mask & BIT(i)))
905 continue;
906
907 p = &d->phy[i];
908 p->idx = i;
909 p->base = d->base + i * 0x40;
910 }
911
912 INIT_LIST_HEAD(&d->slave.channels);
913 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
914 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
915 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
916 d->slave.dev = &op->dev;
917 d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
918 d->slave.device_tx_status = k3_dma_tx_status;
919 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
920 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
921 d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
922 d->slave.device_issue_pending = k3_dma_issue_pending;
923 d->slave.device_config = k3_dma_config;
924 d->slave.device_pause = k3_dma_transfer_pause;
925 d->slave.device_resume = k3_dma_transfer_resume;
926 d->slave.device_terminate_all = k3_dma_terminate_all;
927 d->slave.device_synchronize = k3_dma_synchronize;
928 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
929
930 /* init virtual channel */
931 d->chans = devm_kcalloc(&op->dev,
932 d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL);
933 if (d->chans == NULL)
934 return -ENOMEM;
935
936 for (i = 0; i < d->dma_requests; i++) {
937 struct k3_dma_chan *c = &d->chans[i];
938
939 c->status = DMA_IN_PROGRESS;
940 INIT_LIST_HEAD(&c->node);
941 c->vc.desc_free = k3_dma_free_desc;
942 vchan_init(&c->vc, &d->slave);
943 }
944
945 /* Enable clock before accessing registers */
946 ret = clk_prepare_enable(d->clk);
947 if (ret < 0) {
948 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
949 return ret;
950 }
951
952 k3_dma_enable_dma(d, true);
953
954 ret = dma_async_device_register(&d->slave);
955 if (ret)
956 goto dma_async_register_fail;
957
958 ret = of_dma_controller_register((&op->dev)->of_node,
959 k3_of_dma_simple_xlate, d);
960 if (ret)
961 goto of_dma_register_fail;
962
963 spin_lock_init(&d->lock);
964 INIT_LIST_HEAD(&d->chan_pending);
965 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
966 platform_set_drvdata(op, d);
967 dev_info(&op->dev, "initialized\n");
968
969 return 0;
970
971of_dma_register_fail:
972 dma_async_device_unregister(&d->slave);
973dma_async_register_fail:
974 clk_disable_unprepare(d->clk);
975 return ret;
976}
977
978static int k3_dma_remove(struct platform_device *op)
979{
980 struct k3_dma_chan *c, *cn;
981 struct k3_dma_dev *d = platform_get_drvdata(op);
982
983 dma_async_device_unregister(&d->slave);
984 of_dma_controller_free((&op->dev)->of_node);
985
986 devm_free_irq(&op->dev, d->irq, d);
987
988 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
989 list_del(&c->vc.chan.device_node);
990 tasklet_kill(&c->vc.task);
991 }
992 tasklet_kill(&d->task);
993 clk_disable_unprepare(d->clk);
994 return 0;
995}
996
997#ifdef CONFIG_PM_SLEEP
998static int k3_dma_suspend_dev(struct device *dev)
999{
1000 struct k3_dma_dev *d = dev_get_drvdata(dev);
1001 u32 stat = 0;
1002
1003 stat = k3_dma_get_chan_stat(d);
1004 if (stat) {
1005 dev_warn(d->slave.dev,
1006 "chan %d is running fail to suspend\n", stat);
1007 return -1;
1008 }
1009 k3_dma_enable_dma(d, false);
1010 clk_disable_unprepare(d->clk);
1011 return 0;
1012}
1013
1014static int k3_dma_resume_dev(struct device *dev)
1015{
1016 struct k3_dma_dev *d = dev_get_drvdata(dev);
1017 int ret = 0;
1018
1019 ret = clk_prepare_enable(d->clk);
1020 if (ret < 0) {
1021 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
1022 return ret;
1023 }
1024 k3_dma_enable_dma(d, true);
1025 return 0;
1026}
1027#endif
1028
1029static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
1030
1031static struct platform_driver k3_pdma_driver = {
1032 .driver = {
1033 .name = DRIVER_NAME,
1034 .pm = &k3_dma_pmops,
1035 .of_match_table = k3_pdma_dt_ids,
1036 },
1037 .probe = k3_dma_probe,
1038 .remove = k3_dma_remove,
1039};
1040
1041module_platform_driver(k3_pdma_driver);
1042
1043MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
1044MODULE_ALIAS("platform:k3dma");
1045MODULE_LICENSE("GPL v2");
1/*
2 * Copyright (c) 2013 Linaro Ltd.
3 * Copyright (c) 2013 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/sched.h>
10#include <linux/device.h>
11#include <linux/dmaengine.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/of_device.h>
20#include <linux/of.h>
21#include <linux/clk.h>
22#include <linux/of_dma.h>
23
24#include "virt-dma.h"
25
26#define DRIVER_NAME "k3-dma"
27#define DMA_MAX_SIZE 0x1ffc
28
29#define INT_STAT 0x00
30#define INT_TC1 0x04
31#define INT_ERR1 0x0c
32#define INT_ERR2 0x10
33#define INT_TC1_MASK 0x18
34#define INT_ERR1_MASK 0x20
35#define INT_ERR2_MASK 0x24
36#define INT_TC1_RAW 0x600
37#define INT_ERR1_RAW 0x608
38#define INT_ERR2_RAW 0x610
39#define CH_PRI 0x688
40#define CH_STAT 0x690
41#define CX_CUR_CNT 0x704
42#define CX_LLI 0x800
43#define CX_CNT 0x810
44#define CX_SRC 0x814
45#define CX_DST 0x818
46#define CX_CFG 0x81c
47#define AXI_CFG 0x820
48#define AXI_CFG_DEFAULT 0x201201
49
50#define CX_LLI_CHAIN_EN 0x2
51#define CX_CFG_EN 0x1
52#define CX_CFG_MEM2PER (0x1 << 2)
53#define CX_CFG_PER2MEM (0x2 << 2)
54#define CX_CFG_SRCINCR (0x1 << 31)
55#define CX_CFG_DSTINCR (0x1 << 30)
56
57struct k3_desc_hw {
58 u32 lli;
59 u32 reserved[3];
60 u32 count;
61 u32 saddr;
62 u32 daddr;
63 u32 config;
64} __aligned(32);
65
66struct k3_dma_desc_sw {
67 struct virt_dma_desc vd;
68 dma_addr_t desc_hw_lli;
69 size_t desc_num;
70 size_t size;
71 struct k3_desc_hw desc_hw[0];
72};
73
74struct k3_dma_phy;
75
76struct k3_dma_chan {
77 u32 ccfg;
78 struct virt_dma_chan vc;
79 struct k3_dma_phy *phy;
80 struct list_head node;
81 enum dma_transfer_direction dir;
82 dma_addr_t dev_addr;
83 enum dma_status status;
84};
85
86struct k3_dma_phy {
87 u32 idx;
88 void __iomem *base;
89 struct k3_dma_chan *vchan;
90 struct k3_dma_desc_sw *ds_run;
91 struct k3_dma_desc_sw *ds_done;
92};
93
94struct k3_dma_dev {
95 struct dma_device slave;
96 void __iomem *base;
97 struct tasklet_struct task;
98 spinlock_t lock;
99 struct list_head chan_pending;
100 struct k3_dma_phy *phy;
101 struct k3_dma_chan *chans;
102 struct clk *clk;
103 u32 dma_channels;
104 u32 dma_requests;
105};
106
107#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
108
109static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
110{
111 return container_of(chan, struct k3_dma_chan, vc.chan);
112}
113
114static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
115{
116 u32 val = 0;
117
118 if (on) {
119 val = readl_relaxed(phy->base + CX_CFG);
120 val |= CX_CFG_EN;
121 writel_relaxed(val, phy->base + CX_CFG);
122 } else {
123 val = readl_relaxed(phy->base + CX_CFG);
124 val &= ~CX_CFG_EN;
125 writel_relaxed(val, phy->base + CX_CFG);
126 }
127}
128
129static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
130{
131 u32 val = 0;
132
133 k3_dma_pause_dma(phy, false);
134
135 val = 0x1 << phy->idx;
136 writel_relaxed(val, d->base + INT_TC1_RAW);
137 writel_relaxed(val, d->base + INT_ERR1_RAW);
138 writel_relaxed(val, d->base + INT_ERR2_RAW);
139}
140
141static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
142{
143 writel_relaxed(hw->lli, phy->base + CX_LLI);
144 writel_relaxed(hw->count, phy->base + CX_CNT);
145 writel_relaxed(hw->saddr, phy->base + CX_SRC);
146 writel_relaxed(hw->daddr, phy->base + CX_DST);
147 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
148 writel_relaxed(hw->config, phy->base + CX_CFG);
149}
150
151static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
152{
153 u32 cnt = 0;
154
155 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
156 cnt &= 0xffff;
157 return cnt;
158}
159
160static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
161{
162 return readl_relaxed(phy->base + CX_LLI);
163}
164
165static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
166{
167 return readl_relaxed(d->base + CH_STAT);
168}
169
170static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
171{
172 if (on) {
173 /* set same priority */
174 writel_relaxed(0x0, d->base + CH_PRI);
175
176 /* unmask irq */
177 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
178 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
179 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
180 } else {
181 /* mask irq */
182 writel_relaxed(0x0, d->base + INT_TC1_MASK);
183 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
184 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
185 }
186}
187
188static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
189{
190 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
191 struct k3_dma_phy *p;
192 struct k3_dma_chan *c;
193 u32 stat = readl_relaxed(d->base + INT_STAT);
194 u32 tc1 = readl_relaxed(d->base + INT_TC1);
195 u32 err1 = readl_relaxed(d->base + INT_ERR1);
196 u32 err2 = readl_relaxed(d->base + INT_ERR2);
197 u32 i, irq_chan = 0;
198
199 while (stat) {
200 i = __ffs(stat);
201 stat &= (stat - 1);
202 if (likely(tc1 & BIT(i))) {
203 p = &d->phy[i];
204 c = p->vchan;
205 if (c) {
206 unsigned long flags;
207
208 spin_lock_irqsave(&c->vc.lock, flags);
209 vchan_cookie_complete(&p->ds_run->vd);
210 p->ds_done = p->ds_run;
211 spin_unlock_irqrestore(&c->vc.lock, flags);
212 }
213 irq_chan |= BIT(i);
214 }
215 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
216 dev_warn(d->slave.dev, "DMA ERR\n");
217 }
218
219 writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
220 writel_relaxed(err1, d->base + INT_ERR1_RAW);
221 writel_relaxed(err2, d->base + INT_ERR2_RAW);
222
223 if (irq_chan) {
224 tasklet_schedule(&d->task);
225 return IRQ_HANDLED;
226 } else
227 return IRQ_NONE;
228}
229
230static int k3_dma_start_txd(struct k3_dma_chan *c)
231{
232 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
233 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
234
235 if (!c->phy)
236 return -EAGAIN;
237
238 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
239 return -EAGAIN;
240
241 if (vd) {
242 struct k3_dma_desc_sw *ds =
243 container_of(vd, struct k3_dma_desc_sw, vd);
244 /*
245 * fetch and remove request from vc->desc_issued
246 * so vc->desc_issued only contains desc pending
247 */
248 list_del(&ds->vd.node);
249 c->phy->ds_run = ds;
250 c->phy->ds_done = NULL;
251 /* start dma */
252 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
253 return 0;
254 }
255 c->phy->ds_done = NULL;
256 c->phy->ds_run = NULL;
257 return -EAGAIN;
258}
259
260static void k3_dma_tasklet(unsigned long arg)
261{
262 struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
263 struct k3_dma_phy *p;
264 struct k3_dma_chan *c, *cn;
265 unsigned pch, pch_alloc = 0;
266
267 /* check new dma request of running channel in vc->desc_issued */
268 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
269 spin_lock_irq(&c->vc.lock);
270 p = c->phy;
271 if (p && p->ds_done) {
272 if (k3_dma_start_txd(c)) {
273 /* No current txd associated with this channel */
274 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
275 /* Mark this channel free */
276 c->phy = NULL;
277 p->vchan = NULL;
278 }
279 }
280 spin_unlock_irq(&c->vc.lock);
281 }
282
283 /* check new channel request in d->chan_pending */
284 spin_lock_irq(&d->lock);
285 for (pch = 0; pch < d->dma_channels; pch++) {
286 p = &d->phy[pch];
287
288 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
289 c = list_first_entry(&d->chan_pending,
290 struct k3_dma_chan, node);
291 /* remove from d->chan_pending */
292 list_del_init(&c->node);
293 pch_alloc |= 1 << pch;
294 /* Mark this channel allocated */
295 p->vchan = c;
296 c->phy = p;
297 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
298 }
299 }
300 spin_unlock_irq(&d->lock);
301
302 for (pch = 0; pch < d->dma_channels; pch++) {
303 if (pch_alloc & (1 << pch)) {
304 p = &d->phy[pch];
305 c = p->vchan;
306 if (c) {
307 spin_lock_irq(&c->vc.lock);
308 k3_dma_start_txd(c);
309 spin_unlock_irq(&c->vc.lock);
310 }
311 }
312 }
313}
314
315static void k3_dma_free_chan_resources(struct dma_chan *chan)
316{
317 struct k3_dma_chan *c = to_k3_chan(chan);
318 struct k3_dma_dev *d = to_k3_dma(chan->device);
319 unsigned long flags;
320
321 spin_lock_irqsave(&d->lock, flags);
322 list_del_init(&c->node);
323 spin_unlock_irqrestore(&d->lock, flags);
324
325 vchan_free_chan_resources(&c->vc);
326 c->ccfg = 0;
327}
328
329static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
330 dma_cookie_t cookie, struct dma_tx_state *state)
331{
332 struct k3_dma_chan *c = to_k3_chan(chan);
333 struct k3_dma_dev *d = to_k3_dma(chan->device);
334 struct k3_dma_phy *p;
335 struct virt_dma_desc *vd;
336 unsigned long flags;
337 enum dma_status ret;
338 size_t bytes = 0;
339
340 ret = dma_cookie_status(&c->vc.chan, cookie, state);
341 if (ret == DMA_COMPLETE)
342 return ret;
343
344 spin_lock_irqsave(&c->vc.lock, flags);
345 p = c->phy;
346 ret = c->status;
347
348 /*
349 * If the cookie is on our issue queue, then the residue is
350 * its total size.
351 */
352 vd = vchan_find_desc(&c->vc, cookie);
353 if (vd) {
354 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
355 } else if ((!p) || (!p->ds_run)) {
356 bytes = 0;
357 } else {
358 struct k3_dma_desc_sw *ds = p->ds_run;
359 u32 clli = 0, index = 0;
360
361 bytes = k3_dma_get_curr_cnt(d, p);
362 clli = k3_dma_get_curr_lli(p);
363 index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
364 for (; index < ds->desc_num; index++) {
365 bytes += ds->desc_hw[index].count;
366 /* end of lli */
367 if (!ds->desc_hw[index].lli)
368 break;
369 }
370 }
371 spin_unlock_irqrestore(&c->vc.lock, flags);
372 dma_set_residue(state, bytes);
373 return ret;
374}
375
376static void k3_dma_issue_pending(struct dma_chan *chan)
377{
378 struct k3_dma_chan *c = to_k3_chan(chan);
379 struct k3_dma_dev *d = to_k3_dma(chan->device);
380 unsigned long flags;
381
382 spin_lock_irqsave(&c->vc.lock, flags);
383 /* add request to vc->desc_issued */
384 if (vchan_issue_pending(&c->vc)) {
385 spin_lock(&d->lock);
386 if (!c->phy) {
387 if (list_empty(&c->node)) {
388 /* if new channel, add chan_pending */
389 list_add_tail(&c->node, &d->chan_pending);
390 /* check in tasklet */
391 tasklet_schedule(&d->task);
392 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
393 }
394 }
395 spin_unlock(&d->lock);
396 } else
397 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
398 spin_unlock_irqrestore(&c->vc.lock, flags);
399}
400
401static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
402 dma_addr_t src, size_t len, u32 num, u32 ccfg)
403{
404 if ((num + 1) < ds->desc_num)
405 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
406 sizeof(struct k3_desc_hw);
407 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
408 ds->desc_hw[num].count = len;
409 ds->desc_hw[num].saddr = src;
410 ds->desc_hw[num].daddr = dst;
411 ds->desc_hw[num].config = ccfg;
412}
413
414static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
415 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
416 size_t len, unsigned long flags)
417{
418 struct k3_dma_chan *c = to_k3_chan(chan);
419 struct k3_dma_desc_sw *ds;
420 size_t copy = 0;
421 int num = 0;
422
423 if (!len)
424 return NULL;
425
426 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
427 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
428 if (!ds) {
429 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
430 return NULL;
431 }
432 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
433 ds->size = len;
434 ds->desc_num = num;
435 num = 0;
436
437 if (!c->ccfg) {
438 /* default is memtomem, without calling device_config */
439 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
440 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
441 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
442 }
443
444 do {
445 copy = min_t(size_t, len, DMA_MAX_SIZE);
446 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
447
448 if (c->dir == DMA_MEM_TO_DEV) {
449 src += copy;
450 } else if (c->dir == DMA_DEV_TO_MEM) {
451 dst += copy;
452 } else {
453 src += copy;
454 dst += copy;
455 }
456 len -= copy;
457 } while (len);
458
459 ds->desc_hw[num-1].lli = 0; /* end of link */
460 return vchan_tx_prep(&c->vc, &ds->vd, flags);
461}
462
463static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
464 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
465 enum dma_transfer_direction dir, unsigned long flags, void *context)
466{
467 struct k3_dma_chan *c = to_k3_chan(chan);
468 struct k3_dma_desc_sw *ds;
469 size_t len, avail, total = 0;
470 struct scatterlist *sg;
471 dma_addr_t addr, src = 0, dst = 0;
472 int num = sglen, i;
473
474 if (sgl == NULL)
475 return NULL;
476
477 for_each_sg(sgl, sg, sglen, i) {
478 avail = sg_dma_len(sg);
479 if (avail > DMA_MAX_SIZE)
480 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
481 }
482
483 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
484 if (!ds) {
485 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
486 return NULL;
487 }
488 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
489 ds->desc_num = num;
490 num = 0;
491
492 for_each_sg(sgl, sg, sglen, i) {
493 addr = sg_dma_address(sg);
494 avail = sg_dma_len(sg);
495 total += avail;
496
497 do {
498 len = min_t(size_t, avail, DMA_MAX_SIZE);
499
500 if (dir == DMA_MEM_TO_DEV) {
501 src = addr;
502 dst = c->dev_addr;
503 } else if (dir == DMA_DEV_TO_MEM) {
504 src = c->dev_addr;
505 dst = addr;
506 }
507
508 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
509
510 addr += len;
511 avail -= len;
512 } while (avail);
513 }
514
515 ds->desc_hw[num-1].lli = 0; /* end of link */
516 ds->size = total;
517 return vchan_tx_prep(&c->vc, &ds->vd, flags);
518}
519
520static int k3_dma_config(struct dma_chan *chan,
521 struct dma_slave_config *cfg)
522{
523 struct k3_dma_chan *c = to_k3_chan(chan);
524 u32 maxburst = 0, val = 0;
525 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
526
527 if (cfg == NULL)
528 return -EINVAL;
529 c->dir = cfg->direction;
530 if (c->dir == DMA_DEV_TO_MEM) {
531 c->ccfg = CX_CFG_DSTINCR;
532 c->dev_addr = cfg->src_addr;
533 maxburst = cfg->src_maxburst;
534 width = cfg->src_addr_width;
535 } else if (c->dir == DMA_MEM_TO_DEV) {
536 c->ccfg = CX_CFG_SRCINCR;
537 c->dev_addr = cfg->dst_addr;
538 maxburst = cfg->dst_maxburst;
539 width = cfg->dst_addr_width;
540 }
541 switch (width) {
542 case DMA_SLAVE_BUSWIDTH_1_BYTE:
543 case DMA_SLAVE_BUSWIDTH_2_BYTES:
544 case DMA_SLAVE_BUSWIDTH_4_BYTES:
545 case DMA_SLAVE_BUSWIDTH_8_BYTES:
546 val = __ffs(width);
547 break;
548 default:
549 val = 3;
550 break;
551 }
552 c->ccfg |= (val << 12) | (val << 16);
553
554 if ((maxburst == 0) || (maxburst > 16))
555 val = 16;
556 else
557 val = maxburst - 1;
558 c->ccfg |= (val << 20) | (val << 24);
559 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
560
561 /* specific request line */
562 c->ccfg |= c->vc.chan.chan_id << 4;
563
564 return 0;
565}
566
567static int k3_dma_terminate_all(struct dma_chan *chan)
568{
569 struct k3_dma_chan *c = to_k3_chan(chan);
570 struct k3_dma_dev *d = to_k3_dma(chan->device);
571 struct k3_dma_phy *p = c->phy;
572 unsigned long flags;
573 LIST_HEAD(head);
574
575 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
576
577 /* Prevent this channel being scheduled */
578 spin_lock(&d->lock);
579 list_del_init(&c->node);
580 spin_unlock(&d->lock);
581
582 /* Clear the tx descriptor lists */
583 spin_lock_irqsave(&c->vc.lock, flags);
584 vchan_get_all_descriptors(&c->vc, &head);
585 if (p) {
586 /* vchan is assigned to a pchan - stop the channel */
587 k3_dma_terminate_chan(p, d);
588 c->phy = NULL;
589 p->vchan = NULL;
590 p->ds_run = p->ds_done = NULL;
591 }
592 spin_unlock_irqrestore(&c->vc.lock, flags);
593 vchan_dma_desc_free_list(&c->vc, &head);
594
595 return 0;
596}
597
598static int k3_dma_transfer_pause(struct dma_chan *chan)
599{
600 struct k3_dma_chan *c = to_k3_chan(chan);
601 struct k3_dma_dev *d = to_k3_dma(chan->device);
602 struct k3_dma_phy *p = c->phy;
603
604 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
605 if (c->status == DMA_IN_PROGRESS) {
606 c->status = DMA_PAUSED;
607 if (p) {
608 k3_dma_pause_dma(p, false);
609 } else {
610 spin_lock(&d->lock);
611 list_del_init(&c->node);
612 spin_unlock(&d->lock);
613 }
614 }
615
616 return 0;
617}
618
619static int k3_dma_transfer_resume(struct dma_chan *chan)
620{
621 struct k3_dma_chan *c = to_k3_chan(chan);
622 struct k3_dma_dev *d = to_k3_dma(chan->device);
623 struct k3_dma_phy *p = c->phy;
624 unsigned long flags;
625
626 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
627 spin_lock_irqsave(&c->vc.lock, flags);
628 if (c->status == DMA_PAUSED) {
629 c->status = DMA_IN_PROGRESS;
630 if (p) {
631 k3_dma_pause_dma(p, true);
632 } else if (!list_empty(&c->vc.desc_issued)) {
633 spin_lock(&d->lock);
634 list_add_tail(&c->node, &d->chan_pending);
635 spin_unlock(&d->lock);
636 }
637 }
638 spin_unlock_irqrestore(&c->vc.lock, flags);
639
640 return 0;
641}
642
643static void k3_dma_free_desc(struct virt_dma_desc *vd)
644{
645 struct k3_dma_desc_sw *ds =
646 container_of(vd, struct k3_dma_desc_sw, vd);
647
648 kfree(ds);
649}
650
651static const struct of_device_id k3_pdma_dt_ids[] = {
652 { .compatible = "hisilicon,k3-dma-1.0", },
653 {}
654};
655MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
656
657static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
658 struct of_dma *ofdma)
659{
660 struct k3_dma_dev *d = ofdma->of_dma_data;
661 unsigned int request = dma_spec->args[0];
662
663 if (request > d->dma_requests)
664 return NULL;
665
666 return dma_get_slave_channel(&(d->chans[request].vc.chan));
667}
668
669static int k3_dma_probe(struct platform_device *op)
670{
671 struct k3_dma_dev *d;
672 const struct of_device_id *of_id;
673 struct resource *iores;
674 int i, ret, irq = 0;
675
676 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
677 if (!iores)
678 return -EINVAL;
679
680 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
681 if (!d)
682 return -ENOMEM;
683
684 d->base = devm_ioremap_resource(&op->dev, iores);
685 if (IS_ERR(d->base))
686 return PTR_ERR(d->base);
687
688 of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
689 if (of_id) {
690 of_property_read_u32((&op->dev)->of_node,
691 "dma-channels", &d->dma_channels);
692 of_property_read_u32((&op->dev)->of_node,
693 "dma-requests", &d->dma_requests);
694 }
695
696 d->clk = devm_clk_get(&op->dev, NULL);
697 if (IS_ERR(d->clk)) {
698 dev_err(&op->dev, "no dma clk\n");
699 return PTR_ERR(d->clk);
700 }
701
702 irq = platform_get_irq(op, 0);
703 ret = devm_request_irq(&op->dev, irq,
704 k3_dma_int_handler, 0, DRIVER_NAME, d);
705 if (ret)
706 return ret;
707
708 /* init phy channel */
709 d->phy = devm_kzalloc(&op->dev,
710 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
711 if (d->phy == NULL)
712 return -ENOMEM;
713
714 for (i = 0; i < d->dma_channels; i++) {
715 struct k3_dma_phy *p = &d->phy[i];
716
717 p->idx = i;
718 p->base = d->base + i * 0x40;
719 }
720
721 INIT_LIST_HEAD(&d->slave.channels);
722 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
723 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
724 d->slave.dev = &op->dev;
725 d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
726 d->slave.device_tx_status = k3_dma_tx_status;
727 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
728 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
729 d->slave.device_issue_pending = k3_dma_issue_pending;
730 d->slave.device_config = k3_dma_config;
731 d->slave.device_pause = k3_dma_transfer_pause;
732 d->slave.device_resume = k3_dma_transfer_resume;
733 d->slave.device_terminate_all = k3_dma_terminate_all;
734 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
735
736 /* init virtual channel */
737 d->chans = devm_kzalloc(&op->dev,
738 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
739 if (d->chans == NULL)
740 return -ENOMEM;
741
742 for (i = 0; i < d->dma_requests; i++) {
743 struct k3_dma_chan *c = &d->chans[i];
744
745 c->status = DMA_IN_PROGRESS;
746 INIT_LIST_HEAD(&c->node);
747 c->vc.desc_free = k3_dma_free_desc;
748 vchan_init(&c->vc, &d->slave);
749 }
750
751 /* Enable clock before accessing registers */
752 ret = clk_prepare_enable(d->clk);
753 if (ret < 0) {
754 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
755 return ret;
756 }
757
758 k3_dma_enable_dma(d, true);
759
760 ret = dma_async_device_register(&d->slave);
761 if (ret)
762 return ret;
763
764 ret = of_dma_controller_register((&op->dev)->of_node,
765 k3_of_dma_simple_xlate, d);
766 if (ret)
767 goto of_dma_register_fail;
768
769 spin_lock_init(&d->lock);
770 INIT_LIST_HEAD(&d->chan_pending);
771 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
772 platform_set_drvdata(op, d);
773 dev_info(&op->dev, "initialized\n");
774
775 return 0;
776
777of_dma_register_fail:
778 dma_async_device_unregister(&d->slave);
779 return ret;
780}
781
782static int k3_dma_remove(struct platform_device *op)
783{
784 struct k3_dma_chan *c, *cn;
785 struct k3_dma_dev *d = platform_get_drvdata(op);
786
787 dma_async_device_unregister(&d->slave);
788 of_dma_controller_free((&op->dev)->of_node);
789
790 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
791 list_del(&c->vc.chan.device_node);
792 tasklet_kill(&c->vc.task);
793 }
794 tasklet_kill(&d->task);
795 clk_disable_unprepare(d->clk);
796 return 0;
797}
798
799#ifdef CONFIG_PM_SLEEP
800static int k3_dma_suspend_dev(struct device *dev)
801{
802 struct k3_dma_dev *d = dev_get_drvdata(dev);
803 u32 stat = 0;
804
805 stat = k3_dma_get_chan_stat(d);
806 if (stat) {
807 dev_warn(d->slave.dev,
808 "chan %d is running fail to suspend\n", stat);
809 return -1;
810 }
811 k3_dma_enable_dma(d, false);
812 clk_disable_unprepare(d->clk);
813 return 0;
814}
815
816static int k3_dma_resume_dev(struct device *dev)
817{
818 struct k3_dma_dev *d = dev_get_drvdata(dev);
819 int ret = 0;
820
821 ret = clk_prepare_enable(d->clk);
822 if (ret < 0) {
823 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
824 return ret;
825 }
826 k3_dma_enable_dma(d, true);
827 return 0;
828}
829#endif
830
831static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
832
833static struct platform_driver k3_pdma_driver = {
834 .driver = {
835 .name = DRIVER_NAME,
836 .pm = &k3_dma_pmops,
837 .of_match_table = k3_pdma_dt_ids,
838 },
839 .probe = k3_dma_probe,
840 .remove = k3_dma_remove,
841};
842
843module_platform_driver(k3_pdma_driver);
844
845MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
846MODULE_ALIAS("platform:k3dma");
847MODULE_LICENSE("GPL v2");