Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * DMA driver for STMicroelectronics STi FDMA controller
4 *
5 * Copyright (C) 2014 STMicroelectronics
6 *
7 * Author: Ludovic Barre <Ludovic.barre@st.com>
8 * Peter Griffin <peter.griffin@linaro.org>
9 */
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/of_device.h>
14#include <linux/of_dma.h>
15#include <linux/platform_device.h>
16#include <linux/interrupt.h>
17#include <linux/remoteproc.h>
18#include <linux/slab.h>
19
20#include "st_fdma.h"
21
22static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
23{
24 return container_of(c, struct st_fdma_chan, vchan.chan);
25}
26
27static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
28{
29 return container_of(vd, struct st_fdma_desc, vdesc);
30}
31
32static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
33{
34 struct st_fdma_dev *fdev = fchan->fdev;
35 u32 req_line_cfg = fchan->cfg.req_line;
36 u32 dreq_line;
37 int try = 0;
38
39 /*
40 * dreq_mask is shared for n channels of fdma, so all accesses must be
41 * atomic. if the dreq_mask is changed between ffz and set_bit,
42 * we retry
43 */
44 do {
45 if (fdev->dreq_mask == ~0L) {
46 dev_err(fdev->dev, "No req lines available\n");
47 return -EINVAL;
48 }
49
50 if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
51 dev_err(fdev->dev, "Invalid or used req line\n");
52 return -EINVAL;
53 } else {
54 dreq_line = req_line_cfg;
55 }
56
57 try++;
58 } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
59
60 dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
61 dreq_line, fdev->dreq_mask);
62
63 return dreq_line;
64}
65
66static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
67{
68 struct st_fdma_dev *fdev = fchan->fdev;
69
70 dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
71 clear_bit(fchan->dreq_line, &fdev->dreq_mask);
72}
73
74static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
75{
76 struct virt_dma_desc *vdesc;
77 unsigned long nbytes, ch_cmd, cmd;
78
79 vdesc = vchan_next_desc(&fchan->vchan);
80 if (!vdesc)
81 return;
82
83 fchan->fdesc = to_st_fdma_desc(vdesc);
84 nbytes = fchan->fdesc->node[0].desc->nbytes;
85 cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
86 ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
87
88 /* start the channel for the descriptor */
89 fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
90 fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
91 writel(cmd,
92 fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
93
94 dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
95}
96
97static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
98 unsigned long int_sta)
99{
100 unsigned long ch_sta, ch_err;
101 int ch_id = fchan->vchan.chan.chan_id;
102 struct st_fdma_dev *fdev = fchan->fdev;
103
104 ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
105 ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
106 ch_sta &= FDMA_CH_CMD_STA_MASK;
107
108 if (int_sta & FDMA_INT_STA_ERR) {
109 dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
110 fchan->status = DMA_ERROR;
111 return;
112 }
113
114 switch (ch_sta) {
115 case FDMA_CH_CMD_STA_PAUSED:
116 fchan->status = DMA_PAUSED;
117 break;
118
119 case FDMA_CH_CMD_STA_RUNNING:
120 fchan->status = DMA_IN_PROGRESS;
121 break;
122 }
123}
124
125static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
126{
127 struct st_fdma_dev *fdev = dev_id;
128 irqreturn_t ret = IRQ_NONE;
129 struct st_fdma_chan *fchan = &fdev->chans[0];
130 unsigned long int_sta, clr;
131
132 int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
133 clr = int_sta;
134
135 for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
136 if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
137 continue;
138
139 spin_lock(&fchan->vchan.lock);
140 st_fdma_ch_sta_update(fchan, int_sta);
141
142 if (fchan->fdesc) {
143 if (!fchan->fdesc->iscyclic) {
144 list_del(&fchan->fdesc->vdesc.node);
145 vchan_cookie_complete(&fchan->fdesc->vdesc);
146 fchan->fdesc = NULL;
147 fchan->status = DMA_COMPLETE;
148 } else {
149 vchan_cyclic_callback(&fchan->fdesc->vdesc);
150 }
151
152 /* Start the next descriptor (if available) */
153 if (!fchan->fdesc)
154 st_fdma_xfer_desc(fchan);
155 }
156
157 spin_unlock(&fchan->vchan.lock);
158 ret = IRQ_HANDLED;
159 }
160
161 fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
162
163 return ret;
164}
165
166static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
167 struct of_dma *ofdma)
168{
169 struct st_fdma_dev *fdev = ofdma->of_dma_data;
170 struct dma_chan *chan;
171 struct st_fdma_chan *fchan;
172 int ret;
173
174 if (dma_spec->args_count < 1)
175 return ERR_PTR(-EINVAL);
176
177 if (fdev->dma_device.dev->of_node != dma_spec->np)
178 return ERR_PTR(-EINVAL);
179
180 ret = rproc_boot(fdev->slim_rproc->rproc);
181 if (ret == -ENOENT)
182 return ERR_PTR(-EPROBE_DEFER);
183 else if (ret)
184 return ERR_PTR(ret);
185
186 chan = dma_get_any_slave_channel(&fdev->dma_device);
187 if (!chan)
188 goto err_chan;
189
190 fchan = to_st_fdma_chan(chan);
191
192 fchan->cfg.of_node = dma_spec->np;
193 fchan->cfg.req_line = dma_spec->args[0];
194 fchan->cfg.req_ctrl = 0;
195 fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
196
197 if (dma_spec->args_count > 1)
198 fchan->cfg.req_ctrl = dma_spec->args[1]
199 & FDMA_REQ_CTRL_CFG_MASK;
200
201 if (dma_spec->args_count > 2)
202 fchan->cfg.type = dma_spec->args[2];
203
204 if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
205 fchan->dreq_line = 0;
206 } else {
207 fchan->dreq_line = st_fdma_dreq_get(fchan);
208 if (IS_ERR_VALUE(fchan->dreq_line)) {
209 chan = ERR_PTR(fchan->dreq_line);
210 goto err_chan;
211 }
212 }
213
214 dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
215 fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
216
217 return chan;
218
219err_chan:
220 rproc_shutdown(fdev->slim_rproc->rproc);
221 return chan;
222
223}
224
225static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
226{
227 struct st_fdma_desc *fdesc;
228 int i;
229
230 fdesc = to_st_fdma_desc(vdesc);
231 for (i = 0; i < fdesc->n_nodes; i++)
232 dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
233 fdesc->node[i].pdesc);
234 kfree(fdesc);
235}
236
237static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
238 int sg_len)
239{
240 struct st_fdma_desc *fdesc;
241 int i;
242
243 fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT);
244 if (!fdesc)
245 return NULL;
246
247 fdesc->fchan = fchan;
248 fdesc->n_nodes = sg_len;
249 for (i = 0; i < sg_len; i++) {
250 fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
251 GFP_NOWAIT, &fdesc->node[i].pdesc);
252 if (!fdesc->node[i].desc)
253 goto err;
254 }
255 return fdesc;
256
257err:
258 while (--i >= 0)
259 dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
260 fdesc->node[i].pdesc);
261 kfree(fdesc);
262 return NULL;
263}
264
265static int st_fdma_alloc_chan_res(struct dma_chan *chan)
266{
267 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
268
269 /* Create the dma pool for descriptor allocation */
270 fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
271 fchan->fdev->dev,
272 sizeof(struct st_fdma_hw_node),
273 __alignof__(struct st_fdma_hw_node),
274 0);
275
276 if (!fchan->node_pool) {
277 dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
278 return -ENOMEM;
279 }
280
281 dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
282 fchan->vchan.chan.chan_id, fchan->cfg.type);
283
284 return 0;
285}
286
287static void st_fdma_free_chan_res(struct dma_chan *chan)
288{
289 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
290 struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
291 unsigned long flags;
292
293 dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
294 __func__, fchan->vchan.chan.chan_id);
295
296 if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
297 st_fdma_dreq_put(fchan);
298
299 spin_lock_irqsave(&fchan->vchan.lock, flags);
300 fchan->fdesc = NULL;
301 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
302
303 dma_pool_destroy(fchan->node_pool);
304 fchan->node_pool = NULL;
305 memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
306
307 rproc_shutdown(rproc);
308}
309
310static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
311 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
312 size_t len, unsigned long flags)
313{
314 struct st_fdma_chan *fchan;
315 struct st_fdma_desc *fdesc;
316 struct st_fdma_hw_node *hw_node;
317
318 if (!len)
319 return NULL;
320
321 fchan = to_st_fdma_chan(chan);
322
323 /* We only require a single descriptor */
324 fdesc = st_fdma_alloc_desc(fchan, 1);
325 if (!fdesc) {
326 dev_err(fchan->fdev->dev, "no memory for desc\n");
327 return NULL;
328 }
329
330 hw_node = fdesc->node[0].desc;
331 hw_node->next = 0;
332 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
333 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
334 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
335 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
336 hw_node->nbytes = len;
337 hw_node->saddr = src;
338 hw_node->daddr = dst;
339 hw_node->generic.length = len;
340 hw_node->generic.sstride = 0;
341 hw_node->generic.dstride = 0;
342
343 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
344}
345
346static int config_reqctrl(struct st_fdma_chan *fchan,
347 enum dma_transfer_direction direction)
348{
349 u32 maxburst = 0, addr = 0;
350 enum dma_slave_buswidth width;
351 int ch_id = fchan->vchan.chan.chan_id;
352 struct st_fdma_dev *fdev = fchan->fdev;
353
354 switch (direction) {
355
356 case DMA_DEV_TO_MEM:
357 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
358 maxburst = fchan->scfg.src_maxburst;
359 width = fchan->scfg.src_addr_width;
360 addr = fchan->scfg.src_addr;
361 break;
362
363 case DMA_MEM_TO_DEV:
364 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
365 maxburst = fchan->scfg.dst_maxburst;
366 width = fchan->scfg.dst_addr_width;
367 addr = fchan->scfg.dst_addr;
368 break;
369
370 default:
371 return -EINVAL;
372 }
373
374 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
375
376 switch (width) {
377
378 case DMA_SLAVE_BUSWIDTH_1_BYTE:
379 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
380 break;
381
382 case DMA_SLAVE_BUSWIDTH_2_BYTES:
383 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
384 break;
385
386 case DMA_SLAVE_BUSWIDTH_4_BYTES:
387 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
388 break;
389
390 case DMA_SLAVE_BUSWIDTH_8_BYTES:
391 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
392 break;
393
394 default:
395 return -EINVAL;
396 }
397
398 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
399 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
400 dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
401
402 fchan->cfg.dev_addr = addr;
403 fchan->cfg.dir = direction;
404
405 dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
406 ch_id, addr, fchan->cfg.req_ctrl);
407
408 return 0;
409}
410
411static void fill_hw_node(struct st_fdma_hw_node *hw_node,
412 struct st_fdma_chan *fchan,
413 enum dma_transfer_direction direction)
414{
415 if (direction == DMA_MEM_TO_DEV) {
416 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
417 hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
418 hw_node->daddr = fchan->cfg.dev_addr;
419 } else {
420 hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
421 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
422 hw_node->saddr = fchan->cfg.dev_addr;
423 }
424
425 hw_node->generic.sstride = 0;
426 hw_node->generic.dstride = 0;
427}
428
429static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
430 size_t len, enum dma_transfer_direction direction)
431{
432 struct st_fdma_chan *fchan;
433
434 if (!chan || !len)
435 return NULL;
436
437 fchan = to_st_fdma_chan(chan);
438
439 if (!is_slave_direction(direction)) {
440 dev_err(fchan->fdev->dev, "bad direction?\n");
441 return NULL;
442 }
443
444 return fchan;
445}
446
447static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
448 struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
449 size_t period_len, enum dma_transfer_direction direction,
450 unsigned long flags)
451{
452 struct st_fdma_chan *fchan;
453 struct st_fdma_desc *fdesc;
454 int sg_len, i;
455
456 fchan = st_fdma_prep_common(chan, len, direction);
457 if (!fchan)
458 return NULL;
459
460 if (!period_len)
461 return NULL;
462
463 if (config_reqctrl(fchan, direction)) {
464 dev_err(fchan->fdev->dev, "bad width or direction\n");
465 return NULL;
466 }
467
468 /* the buffer length must be a multiple of period_len */
469 if (len % period_len != 0) {
470 dev_err(fchan->fdev->dev, "len is not multiple of period\n");
471 return NULL;
472 }
473
474 sg_len = len / period_len;
475 fdesc = st_fdma_alloc_desc(fchan, sg_len);
476 if (!fdesc) {
477 dev_err(fchan->fdev->dev, "no memory for desc\n");
478 return NULL;
479 }
480
481 fdesc->iscyclic = true;
482
483 for (i = 0; i < sg_len; i++) {
484 struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
485
486 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
487
488 hw_node->control =
489 FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
490 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
491
492 fill_hw_node(hw_node, fchan, direction);
493
494 if (direction == DMA_MEM_TO_DEV)
495 hw_node->saddr = buf_addr + (i * period_len);
496 else
497 hw_node->daddr = buf_addr + (i * period_len);
498
499 hw_node->nbytes = period_len;
500 hw_node->generic.length = period_len;
501 }
502
503 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
504}
505
506static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
507 struct dma_chan *chan, struct scatterlist *sgl,
508 unsigned int sg_len, enum dma_transfer_direction direction,
509 unsigned long flags, void *context)
510{
511 struct st_fdma_chan *fchan;
512 struct st_fdma_desc *fdesc;
513 struct st_fdma_hw_node *hw_node;
514 struct scatterlist *sg;
515 int i;
516
517 fchan = st_fdma_prep_common(chan, sg_len, direction);
518 if (!fchan)
519 return NULL;
520
521 if (!sgl)
522 return NULL;
523
524 fdesc = st_fdma_alloc_desc(fchan, sg_len);
525 if (!fdesc) {
526 dev_err(fchan->fdev->dev, "no memory for desc\n");
527 return NULL;
528 }
529
530 fdesc->iscyclic = false;
531
532 for_each_sg(sgl, sg, sg_len, i) {
533 hw_node = fdesc->node[i].desc;
534
535 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
536 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
537
538 fill_hw_node(hw_node, fchan, direction);
539
540 if (direction == DMA_MEM_TO_DEV)
541 hw_node->saddr = sg_dma_address(sg);
542 else
543 hw_node->daddr = sg_dma_address(sg);
544
545 hw_node->nbytes = sg_dma_len(sg);
546 hw_node->generic.length = sg_dma_len(sg);
547 }
548
549 /* interrupt at end of last node */
550 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
551
552 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
553}
554
555static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
556 struct virt_dma_desc *vdesc,
557 bool in_progress)
558{
559 struct st_fdma_desc *fdesc = fchan->fdesc;
560 size_t residue = 0;
561 dma_addr_t cur_addr = 0;
562 int i;
563
564 if (in_progress) {
565 cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
566 cur_addr &= FDMA_CH_CMD_DATA_MASK;
567 }
568
569 for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
570 if (cur_addr == fdesc->node[i].pdesc) {
571 residue += fnode_read(fchan, FDMA_CNTN_OFST);
572 break;
573 }
574 residue += fdesc->node[i].desc->nbytes;
575 }
576
577 return residue;
578}
579
580static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
581 dma_cookie_t cookie,
582 struct dma_tx_state *txstate)
583{
584 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
585 struct virt_dma_desc *vd;
586 enum dma_status ret;
587 unsigned long flags;
588
589 ret = dma_cookie_status(chan, cookie, txstate);
590 if (ret == DMA_COMPLETE || !txstate)
591 return ret;
592
593 spin_lock_irqsave(&fchan->vchan.lock, flags);
594 vd = vchan_find_desc(&fchan->vchan, cookie);
595 if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
596 txstate->residue = st_fdma_desc_residue(fchan, vd, true);
597 else if (vd)
598 txstate->residue = st_fdma_desc_residue(fchan, vd, false);
599 else
600 txstate->residue = 0;
601
602 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
603
604 return ret;
605}
606
607static void st_fdma_issue_pending(struct dma_chan *chan)
608{
609 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
610 unsigned long flags;
611
612 spin_lock_irqsave(&fchan->vchan.lock, flags);
613
614 if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
615 st_fdma_xfer_desc(fchan);
616
617 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
618}
619
620static int st_fdma_pause(struct dma_chan *chan)
621{
622 unsigned long flags;
623 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
624 int ch_id = fchan->vchan.chan.chan_id;
625 unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
626
627 dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
628
629 spin_lock_irqsave(&fchan->vchan.lock, flags);
630 if (fchan->fdesc)
631 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
632 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
633
634 return 0;
635}
636
637static int st_fdma_resume(struct dma_chan *chan)
638{
639 unsigned long flags;
640 unsigned long val;
641 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
642 int ch_id = fchan->vchan.chan.chan_id;
643
644 dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
645
646 spin_lock_irqsave(&fchan->vchan.lock, flags);
647 if (fchan->fdesc) {
648 val = fchan_read(fchan, FDMA_CH_CMD_OFST);
649 val &= FDMA_CH_CMD_DATA_MASK;
650 fchan_write(fchan, val, FDMA_CH_CMD_OFST);
651 }
652 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
653
654 return 0;
655}
656
657static int st_fdma_terminate_all(struct dma_chan *chan)
658{
659 unsigned long flags;
660 LIST_HEAD(head);
661 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
662 int ch_id = fchan->vchan.chan.chan_id;
663 unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
664
665 dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
666
667 spin_lock_irqsave(&fchan->vchan.lock, flags);
668 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
669 fchan->fdesc = NULL;
670 vchan_get_all_descriptors(&fchan->vchan, &head);
671 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
672 vchan_dma_desc_free_list(&fchan->vchan, &head);
673
674 return 0;
675}
676
677static int st_fdma_slave_config(struct dma_chan *chan,
678 struct dma_slave_config *slave_cfg)
679{
680 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
681
682 memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
683 return 0;
684}
685
686static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
687 .name = "STiH407",
688 .id = 0,
689};
690
691static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
692 .name = "STiH407",
693 .id = 1,
694};
695
696static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
697 .name = "STiH407",
698 .id = 2,
699};
700
701static const struct of_device_id st_fdma_match[] = {
702 { .compatible = "st,stih407-fdma-mpe31-11"
703 , .data = &fdma_mpe31_stih407_11 },
704 { .compatible = "st,stih407-fdma-mpe31-12"
705 , .data = &fdma_mpe31_stih407_12 },
706 { .compatible = "st,stih407-fdma-mpe31-13"
707 , .data = &fdma_mpe31_stih407_13 },
708 {},
709};
710MODULE_DEVICE_TABLE(of, st_fdma_match);
711
712static int st_fdma_parse_dt(struct platform_device *pdev,
713 const struct st_fdma_driverdata *drvdata,
714 struct st_fdma_dev *fdev)
715{
716 snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
717 drvdata->name, drvdata->id);
718
719 return of_property_read_u32(pdev->dev.of_node, "dma-channels",
720 &fdev->nr_channels);
721}
722#define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
723 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
724 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
725 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
726
727static void st_fdma_free(struct st_fdma_dev *fdev)
728{
729 struct st_fdma_chan *fchan;
730 int i;
731
732 for (i = 0; i < fdev->nr_channels; i++) {
733 fchan = &fdev->chans[i];
734 list_del(&fchan->vchan.chan.device_node);
735 tasklet_kill(&fchan->vchan.task);
736 }
737}
738
739static int st_fdma_probe(struct platform_device *pdev)
740{
741 struct st_fdma_dev *fdev;
742 const struct of_device_id *match;
743 struct device_node *np = pdev->dev.of_node;
744 const struct st_fdma_driverdata *drvdata;
745 int ret, i;
746
747 match = of_match_device((st_fdma_match), &pdev->dev);
748 if (!match || !match->data) {
749 dev_err(&pdev->dev, "No device match found\n");
750 return -ENODEV;
751 }
752
753 drvdata = match->data;
754
755 fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
756 if (!fdev)
757 return -ENOMEM;
758
759 ret = st_fdma_parse_dt(pdev, drvdata, fdev);
760 if (ret) {
761 dev_err(&pdev->dev, "unable to find platform data\n");
762 goto err;
763 }
764
765 fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
766 sizeof(struct st_fdma_chan), GFP_KERNEL);
767 if (!fdev->chans)
768 return -ENOMEM;
769
770 fdev->dev = &pdev->dev;
771 fdev->drvdata = drvdata;
772 platform_set_drvdata(pdev, fdev);
773
774 fdev->irq = platform_get_irq(pdev, 0);
775 if (fdev->irq < 0)
776 return -EINVAL;
777
778 ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
779 dev_name(&pdev->dev), fdev);
780 if (ret) {
781 dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
782 goto err;
783 }
784
785 fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
786 if (IS_ERR(fdev->slim_rproc)) {
787 ret = PTR_ERR(fdev->slim_rproc);
788 dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
789 goto err;
790 }
791
792 /* Initialise list of FDMA channels */
793 INIT_LIST_HEAD(&fdev->dma_device.channels);
794 for (i = 0; i < fdev->nr_channels; i++) {
795 struct st_fdma_chan *fchan = &fdev->chans[i];
796
797 fchan->fdev = fdev;
798 fchan->vchan.desc_free = st_fdma_free_desc;
799 vchan_init(&fchan->vchan, &fdev->dma_device);
800 }
801
802 /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
803 fdev->dreq_mask = BIT(0) | BIT(31);
804
805 dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
806 dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
807 dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
808
809 fdev->dma_device.dev = &pdev->dev;
810 fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
811 fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
812 fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
813 fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
814 fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
815 fdev->dma_device.device_tx_status = st_fdma_tx_status;
816 fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
817 fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
818 fdev->dma_device.device_config = st_fdma_slave_config;
819 fdev->dma_device.device_pause = st_fdma_pause;
820 fdev->dma_device.device_resume = st_fdma_resume;
821
822 fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
823 fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
824 fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
825 fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
826
827 ret = dmaenginem_async_device_register(&fdev->dma_device);
828 if (ret) {
829 dev_err(&pdev->dev,
830 "Failed to register DMA device (%d)\n", ret);
831 goto err_rproc;
832 }
833
834 ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
835 if (ret) {
836 dev_err(&pdev->dev,
837 "Failed to register controller (%d)\n", ret);
838 goto err_rproc;
839 }
840
841 dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
842
843 return 0;
844
845err_rproc:
846 st_fdma_free(fdev);
847 st_slim_rproc_put(fdev->slim_rproc);
848err:
849 return ret;
850}
851
852static int st_fdma_remove(struct platform_device *pdev)
853{
854 struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
855
856 devm_free_irq(&pdev->dev, fdev->irq, fdev);
857 st_slim_rproc_put(fdev->slim_rproc);
858 of_dma_controller_free(pdev->dev.of_node);
859
860 return 0;
861}
862
863static struct platform_driver st_fdma_platform_driver = {
864 .driver = {
865 .name = DRIVER_NAME,
866 .of_match_table = st_fdma_match,
867 },
868 .probe = st_fdma_probe,
869 .remove = st_fdma_remove,
870};
871module_platform_driver(st_fdma_platform_driver);
872
873MODULE_LICENSE("GPL v2");
874MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
875MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
876MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
877MODULE_ALIAS("platform: " DRIVER_NAME);
1/*
2 * DMA driver for STMicroelectronics STi FDMA controller
3 *
4 * Copyright (C) 2014 STMicroelectronics
5 *
6 * Author: Ludovic Barre <Ludovic.barre@st.com>
7 * Peter Griffin <peter.griffin@linaro.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 */
14
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/of_device.h>
18#include <linux/of_dma.h>
19#include <linux/platform_device.h>
20#include <linux/interrupt.h>
21#include <linux/remoteproc.h>
22
23#include "st_fdma.h"
24
25static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
26{
27 return container_of(c, struct st_fdma_chan, vchan.chan);
28}
29
30static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
31{
32 return container_of(vd, struct st_fdma_desc, vdesc);
33}
34
35static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
36{
37 struct st_fdma_dev *fdev = fchan->fdev;
38 u32 req_line_cfg = fchan->cfg.req_line;
39 u32 dreq_line;
40 int try = 0;
41
42 /*
43 * dreq_mask is shared for n channels of fdma, so all accesses must be
44 * atomic. if the dreq_mask is changed between ffz and set_bit,
45 * we retry
46 */
47 do {
48 if (fdev->dreq_mask == ~0L) {
49 dev_err(fdev->dev, "No req lines available\n");
50 return -EINVAL;
51 }
52
53 if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
54 dev_err(fdev->dev, "Invalid or used req line\n");
55 return -EINVAL;
56 } else {
57 dreq_line = req_line_cfg;
58 }
59
60 try++;
61 } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
62
63 dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
64 dreq_line, fdev->dreq_mask);
65
66 return dreq_line;
67}
68
69static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
70{
71 struct st_fdma_dev *fdev = fchan->fdev;
72
73 dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
74 clear_bit(fchan->dreq_line, &fdev->dreq_mask);
75}
76
77static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
78{
79 struct virt_dma_desc *vdesc;
80 unsigned long nbytes, ch_cmd, cmd;
81
82 vdesc = vchan_next_desc(&fchan->vchan);
83 if (!vdesc)
84 return;
85
86 fchan->fdesc = to_st_fdma_desc(vdesc);
87 nbytes = fchan->fdesc->node[0].desc->nbytes;
88 cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
89 ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
90
91 /* start the channel for the descriptor */
92 fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
93 fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
94 writel(cmd,
95 fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
96
97 dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
98}
99
100static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
101 unsigned long int_sta)
102{
103 unsigned long ch_sta, ch_err;
104 int ch_id = fchan->vchan.chan.chan_id;
105 struct st_fdma_dev *fdev = fchan->fdev;
106
107 ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
108 ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
109 ch_sta &= FDMA_CH_CMD_STA_MASK;
110
111 if (int_sta & FDMA_INT_STA_ERR) {
112 dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
113 fchan->status = DMA_ERROR;
114 return;
115 }
116
117 switch (ch_sta) {
118 case FDMA_CH_CMD_STA_PAUSED:
119 fchan->status = DMA_PAUSED;
120 break;
121
122 case FDMA_CH_CMD_STA_RUNNING:
123 fchan->status = DMA_IN_PROGRESS;
124 break;
125 }
126}
127
128static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
129{
130 struct st_fdma_dev *fdev = dev_id;
131 irqreturn_t ret = IRQ_NONE;
132 struct st_fdma_chan *fchan = &fdev->chans[0];
133 unsigned long int_sta, clr;
134
135 int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
136 clr = int_sta;
137
138 for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
139 if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
140 continue;
141
142 spin_lock(&fchan->vchan.lock);
143 st_fdma_ch_sta_update(fchan, int_sta);
144
145 if (fchan->fdesc) {
146 if (!fchan->fdesc->iscyclic) {
147 list_del(&fchan->fdesc->vdesc.node);
148 vchan_cookie_complete(&fchan->fdesc->vdesc);
149 fchan->fdesc = NULL;
150 fchan->status = DMA_COMPLETE;
151 } else {
152 vchan_cyclic_callback(&fchan->fdesc->vdesc);
153 }
154
155 /* Start the next descriptor (if available) */
156 if (!fchan->fdesc)
157 st_fdma_xfer_desc(fchan);
158 }
159
160 spin_unlock(&fchan->vchan.lock);
161 ret = IRQ_HANDLED;
162 }
163
164 fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
165
166 return ret;
167}
168
169static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
170 struct of_dma *ofdma)
171{
172 struct st_fdma_dev *fdev = ofdma->of_dma_data;
173 struct dma_chan *chan;
174 struct st_fdma_chan *fchan;
175 int ret;
176
177 if (dma_spec->args_count < 1)
178 return ERR_PTR(-EINVAL);
179
180 if (fdev->dma_device.dev->of_node != dma_spec->np)
181 return ERR_PTR(-EINVAL);
182
183 ret = rproc_boot(fdev->slim_rproc->rproc);
184 if (ret == -ENOENT)
185 return ERR_PTR(-EPROBE_DEFER);
186 else if (ret)
187 return ERR_PTR(ret);
188
189 chan = dma_get_any_slave_channel(&fdev->dma_device);
190 if (!chan)
191 goto err_chan;
192
193 fchan = to_st_fdma_chan(chan);
194
195 fchan->cfg.of_node = dma_spec->np;
196 fchan->cfg.req_line = dma_spec->args[0];
197 fchan->cfg.req_ctrl = 0;
198 fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
199
200 if (dma_spec->args_count > 1)
201 fchan->cfg.req_ctrl = dma_spec->args[1]
202 & FDMA_REQ_CTRL_CFG_MASK;
203
204 if (dma_spec->args_count > 2)
205 fchan->cfg.type = dma_spec->args[2];
206
207 if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
208 fchan->dreq_line = 0;
209 } else {
210 fchan->dreq_line = st_fdma_dreq_get(fchan);
211 if (IS_ERR_VALUE(fchan->dreq_line)) {
212 chan = ERR_PTR(fchan->dreq_line);
213 goto err_chan;
214 }
215 }
216
217 dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
218 fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
219
220 return chan;
221
222err_chan:
223 rproc_shutdown(fdev->slim_rproc->rproc);
224 return chan;
225
226}
227
228static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
229{
230 struct st_fdma_desc *fdesc;
231 int i;
232
233 fdesc = to_st_fdma_desc(vdesc);
234 for (i = 0; i < fdesc->n_nodes; i++)
235 dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
236 fdesc->node[i].pdesc);
237 kfree(fdesc);
238}
239
240static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
241 int sg_len)
242{
243 struct st_fdma_desc *fdesc;
244 int i;
245
246 fdesc = kzalloc(sizeof(*fdesc) +
247 sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
248 if (!fdesc)
249 return NULL;
250
251 fdesc->fchan = fchan;
252 fdesc->n_nodes = sg_len;
253 for (i = 0; i < sg_len; i++) {
254 fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
255 GFP_NOWAIT, &fdesc->node[i].pdesc);
256 if (!fdesc->node[i].desc)
257 goto err;
258 }
259 return fdesc;
260
261err:
262 while (--i >= 0)
263 dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
264 fdesc->node[i].pdesc);
265 kfree(fdesc);
266 return NULL;
267}
268
269static int st_fdma_alloc_chan_res(struct dma_chan *chan)
270{
271 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
272
273 /* Create the dma pool for descriptor allocation */
274 fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
275 fchan->fdev->dev,
276 sizeof(struct st_fdma_hw_node),
277 __alignof__(struct st_fdma_hw_node),
278 0);
279
280 if (!fchan->node_pool) {
281 dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
282 return -ENOMEM;
283 }
284
285 dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
286 fchan->vchan.chan.chan_id, fchan->cfg.type);
287
288 return 0;
289}
290
291static void st_fdma_free_chan_res(struct dma_chan *chan)
292{
293 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
294 struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
295 unsigned long flags;
296
297 LIST_HEAD(head);
298
299 dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
300 __func__, fchan->vchan.chan.chan_id);
301
302 if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
303 st_fdma_dreq_put(fchan);
304
305 spin_lock_irqsave(&fchan->vchan.lock, flags);
306 fchan->fdesc = NULL;
307 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
308
309 dma_pool_destroy(fchan->node_pool);
310 fchan->node_pool = NULL;
311 memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
312
313 rproc_shutdown(rproc);
314}
315
316static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
317 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
318 size_t len, unsigned long flags)
319{
320 struct st_fdma_chan *fchan;
321 struct st_fdma_desc *fdesc;
322 struct st_fdma_hw_node *hw_node;
323
324 if (!len)
325 return NULL;
326
327 fchan = to_st_fdma_chan(chan);
328
329 /* We only require a single descriptor */
330 fdesc = st_fdma_alloc_desc(fchan, 1);
331 if (!fdesc) {
332 dev_err(fchan->fdev->dev, "no memory for desc\n");
333 return NULL;
334 }
335
336 hw_node = fdesc->node[0].desc;
337 hw_node->next = 0;
338 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
339 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
340 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
341 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
342 hw_node->nbytes = len;
343 hw_node->saddr = src;
344 hw_node->daddr = dst;
345 hw_node->generic.length = len;
346 hw_node->generic.sstride = 0;
347 hw_node->generic.dstride = 0;
348
349 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
350}
351
352static int config_reqctrl(struct st_fdma_chan *fchan,
353 enum dma_transfer_direction direction)
354{
355 u32 maxburst = 0, addr = 0;
356 enum dma_slave_buswidth width;
357 int ch_id = fchan->vchan.chan.chan_id;
358 struct st_fdma_dev *fdev = fchan->fdev;
359
360 switch (direction) {
361
362 case DMA_DEV_TO_MEM:
363 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
364 maxburst = fchan->scfg.src_maxburst;
365 width = fchan->scfg.src_addr_width;
366 addr = fchan->scfg.src_addr;
367 break;
368
369 case DMA_MEM_TO_DEV:
370 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
371 maxburst = fchan->scfg.dst_maxburst;
372 width = fchan->scfg.dst_addr_width;
373 addr = fchan->scfg.dst_addr;
374 break;
375
376 default:
377 return -EINVAL;
378 }
379
380 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
381
382 switch (width) {
383
384 case DMA_SLAVE_BUSWIDTH_1_BYTE:
385 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
386 break;
387
388 case DMA_SLAVE_BUSWIDTH_2_BYTES:
389 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
390 break;
391
392 case DMA_SLAVE_BUSWIDTH_4_BYTES:
393 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
394 break;
395
396 case DMA_SLAVE_BUSWIDTH_8_BYTES:
397 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
398 break;
399
400 default:
401 return -EINVAL;
402 }
403
404 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
405 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
406 dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
407
408 fchan->cfg.dev_addr = addr;
409 fchan->cfg.dir = direction;
410
411 dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
412 ch_id, addr, fchan->cfg.req_ctrl);
413
414 return 0;
415}
416
417static void fill_hw_node(struct st_fdma_hw_node *hw_node,
418 struct st_fdma_chan *fchan,
419 enum dma_transfer_direction direction)
420{
421 if (direction == DMA_MEM_TO_DEV) {
422 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
423 hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
424 hw_node->daddr = fchan->cfg.dev_addr;
425 } else {
426 hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
427 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
428 hw_node->saddr = fchan->cfg.dev_addr;
429 }
430
431 hw_node->generic.sstride = 0;
432 hw_node->generic.dstride = 0;
433}
434
435static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
436 size_t len, enum dma_transfer_direction direction)
437{
438 struct st_fdma_chan *fchan;
439
440 if (!chan || !len)
441 return NULL;
442
443 fchan = to_st_fdma_chan(chan);
444
445 if (!is_slave_direction(direction)) {
446 dev_err(fchan->fdev->dev, "bad direction?\n");
447 return NULL;
448 }
449
450 return fchan;
451}
452
453static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
454 struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
455 size_t period_len, enum dma_transfer_direction direction,
456 unsigned long flags)
457{
458 struct st_fdma_chan *fchan;
459 struct st_fdma_desc *fdesc;
460 int sg_len, i;
461
462 fchan = st_fdma_prep_common(chan, len, direction);
463 if (!fchan)
464 return NULL;
465
466 if (!period_len)
467 return NULL;
468
469 if (config_reqctrl(fchan, direction)) {
470 dev_err(fchan->fdev->dev, "bad width or direction\n");
471 return NULL;
472 }
473
474 /* the buffer length must be a multiple of period_len */
475 if (len % period_len != 0) {
476 dev_err(fchan->fdev->dev, "len is not multiple of period\n");
477 return NULL;
478 }
479
480 sg_len = len / period_len;
481 fdesc = st_fdma_alloc_desc(fchan, sg_len);
482 if (!fdesc) {
483 dev_err(fchan->fdev->dev, "no memory for desc\n");
484 return NULL;
485 }
486
487 fdesc->iscyclic = true;
488
489 for (i = 0; i < sg_len; i++) {
490 struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
491
492 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
493
494 hw_node->control =
495 FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
496 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
497
498 fill_hw_node(hw_node, fchan, direction);
499
500 if (direction == DMA_MEM_TO_DEV)
501 hw_node->saddr = buf_addr + (i * period_len);
502 else
503 hw_node->daddr = buf_addr + (i * period_len);
504
505 hw_node->nbytes = period_len;
506 hw_node->generic.length = period_len;
507 }
508
509 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
510}
511
512static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
513 struct dma_chan *chan, struct scatterlist *sgl,
514 unsigned int sg_len, enum dma_transfer_direction direction,
515 unsigned long flags, void *context)
516{
517 struct st_fdma_chan *fchan;
518 struct st_fdma_desc *fdesc;
519 struct st_fdma_hw_node *hw_node;
520 struct scatterlist *sg;
521 int i;
522
523 fchan = st_fdma_prep_common(chan, sg_len, direction);
524 if (!fchan)
525 return NULL;
526
527 if (!sgl)
528 return NULL;
529
530 fdesc = st_fdma_alloc_desc(fchan, sg_len);
531 if (!fdesc) {
532 dev_err(fchan->fdev->dev, "no memory for desc\n");
533 return NULL;
534 }
535
536 fdesc->iscyclic = false;
537
538 for_each_sg(sgl, sg, sg_len, i) {
539 hw_node = fdesc->node[i].desc;
540
541 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
542 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
543
544 fill_hw_node(hw_node, fchan, direction);
545
546 if (direction == DMA_MEM_TO_DEV)
547 hw_node->saddr = sg_dma_address(sg);
548 else
549 hw_node->daddr = sg_dma_address(sg);
550
551 hw_node->nbytes = sg_dma_len(sg);
552 hw_node->generic.length = sg_dma_len(sg);
553 }
554
555 /* interrupt at end of last node */
556 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
557
558 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
559}
560
561static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
562 struct virt_dma_desc *vdesc,
563 bool in_progress)
564{
565 struct st_fdma_desc *fdesc = fchan->fdesc;
566 size_t residue = 0;
567 dma_addr_t cur_addr = 0;
568 int i;
569
570 if (in_progress) {
571 cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
572 cur_addr &= FDMA_CH_CMD_DATA_MASK;
573 }
574
575 for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
576 if (cur_addr == fdesc->node[i].pdesc) {
577 residue += fnode_read(fchan, FDMA_CNTN_OFST);
578 break;
579 }
580 residue += fdesc->node[i].desc->nbytes;
581 }
582
583 return residue;
584}
585
586static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
587 dma_cookie_t cookie,
588 struct dma_tx_state *txstate)
589{
590 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
591 struct virt_dma_desc *vd;
592 enum dma_status ret;
593 unsigned long flags;
594
595 ret = dma_cookie_status(chan, cookie, txstate);
596 if (ret == DMA_COMPLETE || !txstate)
597 return ret;
598
599 spin_lock_irqsave(&fchan->vchan.lock, flags);
600 vd = vchan_find_desc(&fchan->vchan, cookie);
601 if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
602 txstate->residue = st_fdma_desc_residue(fchan, vd, true);
603 else if (vd)
604 txstate->residue = st_fdma_desc_residue(fchan, vd, false);
605 else
606 txstate->residue = 0;
607
608 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
609
610 return ret;
611}
612
613static void st_fdma_issue_pending(struct dma_chan *chan)
614{
615 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
616 unsigned long flags;
617
618 spin_lock_irqsave(&fchan->vchan.lock, flags);
619
620 if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
621 st_fdma_xfer_desc(fchan);
622
623 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
624}
625
626static int st_fdma_pause(struct dma_chan *chan)
627{
628 unsigned long flags;
629 LIST_HEAD(head);
630 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
631 int ch_id = fchan->vchan.chan.chan_id;
632 unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
633
634 dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
635
636 spin_lock_irqsave(&fchan->vchan.lock, flags);
637 if (fchan->fdesc)
638 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
639 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
640
641 return 0;
642}
643
644static int st_fdma_resume(struct dma_chan *chan)
645{
646 unsigned long flags;
647 unsigned long val;
648 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
649 int ch_id = fchan->vchan.chan.chan_id;
650
651 dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
652
653 spin_lock_irqsave(&fchan->vchan.lock, flags);
654 if (fchan->fdesc) {
655 val = fchan_read(fchan, FDMA_CH_CMD_OFST);
656 val &= FDMA_CH_CMD_DATA_MASK;
657 fchan_write(fchan, val, FDMA_CH_CMD_OFST);
658 }
659 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
660
661 return 0;
662}
663
664static int st_fdma_terminate_all(struct dma_chan *chan)
665{
666 unsigned long flags;
667 LIST_HEAD(head);
668 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
669 int ch_id = fchan->vchan.chan.chan_id;
670 unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
671
672 dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
673
674 spin_lock_irqsave(&fchan->vchan.lock, flags);
675 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
676 fchan->fdesc = NULL;
677 vchan_get_all_descriptors(&fchan->vchan, &head);
678 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
679 vchan_dma_desc_free_list(&fchan->vchan, &head);
680
681 return 0;
682}
683
684static int st_fdma_slave_config(struct dma_chan *chan,
685 struct dma_slave_config *slave_cfg)
686{
687 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
688
689 memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
690 return 0;
691}
692
693static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
694 .name = "STiH407",
695 .id = 0,
696};
697
698static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
699 .name = "STiH407",
700 .id = 1,
701};
702
703static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
704 .name = "STiH407",
705 .id = 2,
706};
707
708static const struct of_device_id st_fdma_match[] = {
709 { .compatible = "st,stih407-fdma-mpe31-11"
710 , .data = &fdma_mpe31_stih407_11 },
711 { .compatible = "st,stih407-fdma-mpe31-12"
712 , .data = &fdma_mpe31_stih407_12 },
713 { .compatible = "st,stih407-fdma-mpe31-13"
714 , .data = &fdma_mpe31_stih407_13 },
715 {},
716};
717MODULE_DEVICE_TABLE(of, st_fdma_match);
718
719static int st_fdma_parse_dt(struct platform_device *pdev,
720 const struct st_fdma_driverdata *drvdata,
721 struct st_fdma_dev *fdev)
722{
723 snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
724 drvdata->name, drvdata->id);
725
726 return of_property_read_u32(pdev->dev.of_node, "dma-channels",
727 &fdev->nr_channels);
728}
729#define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
730 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
731 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
732 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
733
734static void st_fdma_free(struct st_fdma_dev *fdev)
735{
736 struct st_fdma_chan *fchan;
737 int i;
738
739 for (i = 0; i < fdev->nr_channels; i++) {
740 fchan = &fdev->chans[i];
741 list_del(&fchan->vchan.chan.device_node);
742 tasklet_kill(&fchan->vchan.task);
743 }
744}
745
746static int st_fdma_probe(struct platform_device *pdev)
747{
748 struct st_fdma_dev *fdev;
749 const struct of_device_id *match;
750 struct device_node *np = pdev->dev.of_node;
751 const struct st_fdma_driverdata *drvdata;
752 int ret, i;
753
754 match = of_match_device((st_fdma_match), &pdev->dev);
755 if (!match || !match->data) {
756 dev_err(&pdev->dev, "No device match found\n");
757 return -ENODEV;
758 }
759
760 drvdata = match->data;
761
762 fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
763 if (!fdev)
764 return -ENOMEM;
765
766 ret = st_fdma_parse_dt(pdev, drvdata, fdev);
767 if (ret) {
768 dev_err(&pdev->dev, "unable to find platform data\n");
769 goto err;
770 }
771
772 fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
773 sizeof(struct st_fdma_chan), GFP_KERNEL);
774 if (!fdev->chans)
775 return -ENOMEM;
776
777 fdev->dev = &pdev->dev;
778 fdev->drvdata = drvdata;
779 platform_set_drvdata(pdev, fdev);
780
781 fdev->irq = platform_get_irq(pdev, 0);
782 if (fdev->irq < 0) {
783 dev_err(&pdev->dev, "Failed to get irq resource\n");
784 return -EINVAL;
785 }
786
787 ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
788 dev_name(&pdev->dev), fdev);
789 if (ret) {
790 dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
791 goto err;
792 }
793
794 fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
795 if (IS_ERR(fdev->slim_rproc)) {
796 ret = PTR_ERR(fdev->slim_rproc);
797 dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
798 goto err;
799 }
800
801 /* Initialise list of FDMA channels */
802 INIT_LIST_HEAD(&fdev->dma_device.channels);
803 for (i = 0; i < fdev->nr_channels; i++) {
804 struct st_fdma_chan *fchan = &fdev->chans[i];
805
806 fchan->fdev = fdev;
807 fchan->vchan.desc_free = st_fdma_free_desc;
808 vchan_init(&fchan->vchan, &fdev->dma_device);
809 }
810
811 /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
812 fdev->dreq_mask = BIT(0) | BIT(31);
813
814 dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
815 dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
816 dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
817
818 fdev->dma_device.dev = &pdev->dev;
819 fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
820 fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
821 fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
822 fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
823 fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
824 fdev->dma_device.device_tx_status = st_fdma_tx_status;
825 fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
826 fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
827 fdev->dma_device.device_config = st_fdma_slave_config;
828 fdev->dma_device.device_pause = st_fdma_pause;
829 fdev->dma_device.device_resume = st_fdma_resume;
830
831 fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
832 fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
833 fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
834 fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
835
836 ret = dma_async_device_register(&fdev->dma_device);
837 if (ret) {
838 dev_err(&pdev->dev,
839 "Failed to register DMA device (%d)\n", ret);
840 goto err_rproc;
841 }
842
843 ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
844 if (ret) {
845 dev_err(&pdev->dev,
846 "Failed to register controller (%d)\n", ret);
847 goto err_dma_dev;
848 }
849
850 dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
851
852 return 0;
853
854err_dma_dev:
855 dma_async_device_unregister(&fdev->dma_device);
856err_rproc:
857 st_fdma_free(fdev);
858 st_slim_rproc_put(fdev->slim_rproc);
859err:
860 return ret;
861}
862
863static int st_fdma_remove(struct platform_device *pdev)
864{
865 struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
866
867 devm_free_irq(&pdev->dev, fdev->irq, fdev);
868 st_slim_rproc_put(fdev->slim_rproc);
869 of_dma_controller_free(pdev->dev.of_node);
870 dma_async_device_unregister(&fdev->dma_device);
871
872 return 0;
873}
874
875static struct platform_driver st_fdma_platform_driver = {
876 .driver = {
877 .name = DRIVER_NAME,
878 .of_match_table = st_fdma_match,
879 },
880 .probe = st_fdma_probe,
881 .remove = st_fdma_remove,
882};
883module_platform_driver(st_fdma_platform_driver);
884
885MODULE_LICENSE("GPL v2");
886MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
887MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
888MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
889MODULE_ALIAS("platform: " DRIVER_NAME);