Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * DMA driver for Xilinx DMA/Bridge Subsystem
4 *
5 * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
6 * Copyright (C) 2022, Advanced Micro Devices, Inc.
7 */
8
9/*
10 * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
11 * between Host memory and the DMA subsystem. It does this by operating on
12 * 'descriptors' that contain information about the source, destination and
13 * amount of data to transfer. These direct memory transfers can be both in
14 * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
15 * configured to have a single AXI4 Master interface shared by all channels
16 * or one AXI4-Stream interface for each channel enabled. Memory transfers are
17 * specified on a per-channel basis in descriptor linked lists, which the DMA
18 * fetches from host memory and processes. Events such as descriptor completion
19 * and errors are signaled using interrupts. The core also provides up to 16
20 * user interrupt wires that generate interrupts to the host.
21 */
22
23#include <linux/mod_devicetable.h>
24#include <linux/bitfield.h>
25#include <linux/dmapool.h>
26#include <linux/regmap.h>
27#include <linux/dmaengine.h>
28#include <linux/dma/amd_xdma.h>
29#include <linux/platform_device.h>
30#include <linux/platform_data/amd_xdma.h>
31#include <linux/dma-mapping.h>
32#include <linux/pci.h>
33#include "../virt-dma.h"
34#include "xdma-regs.h"
35
36/* mmio regmap config for all XDMA registers */
37static const struct regmap_config xdma_regmap_config = {
38 .reg_bits = 32,
39 .val_bits = 32,
40 .reg_stride = 4,
41 .max_register = XDMA_REG_SPACE_LEN,
42};
43
44/**
45 * struct xdma_desc_block - Descriptor block
46 * @virt_addr: Virtual address of block start
47 * @dma_addr: DMA address of block start
48 */
49struct xdma_desc_block {
50 void *virt_addr;
51 dma_addr_t dma_addr;
52};
53
54/**
55 * struct xdma_chan - Driver specific DMA channel structure
56 * @vchan: Virtual channel
57 * @xdev_hdl: Pointer to DMA device structure
58 * @base: Offset of channel registers
59 * @desc_pool: Descriptor pool
60 * @busy: Busy flag of the channel
61 * @dir: Transferring direction of the channel
62 * @cfg: Transferring config of the channel
63 * @irq: IRQ assigned to the channel
64 */
65struct xdma_chan {
66 struct virt_dma_chan vchan;
67 void *xdev_hdl;
68 u32 base;
69 struct dma_pool *desc_pool;
70 bool busy;
71 enum dma_transfer_direction dir;
72 struct dma_slave_config cfg;
73 u32 irq;
74};
75
76/**
77 * struct xdma_desc - DMA desc structure
78 * @vdesc: Virtual DMA descriptor
79 * @chan: DMA channel pointer
80 * @dir: Transferring direction of the request
81 * @desc_blocks: Hardware descriptor blocks
82 * @dblk_num: Number of hardware descriptor blocks
83 * @desc_num: Number of hardware descriptors
84 * @completed_desc_num: Completed hardware descriptors
85 * @cyclic: Cyclic transfer vs. scatter-gather
86 * @interleaved_dma: Interleaved DMA transfer
87 * @periods: Number of periods in the cyclic transfer
88 * @period_size: Size of a period in bytes in cyclic transfers
89 * @frames_left: Number of frames left in interleaved DMA transfer
90 * @error: tx error flag
91 */
92struct xdma_desc {
93 struct virt_dma_desc vdesc;
94 struct xdma_chan *chan;
95 enum dma_transfer_direction dir;
96 struct xdma_desc_block *desc_blocks;
97 u32 dblk_num;
98 u32 desc_num;
99 u32 completed_desc_num;
100 bool cyclic;
101 bool interleaved_dma;
102 u32 periods;
103 u32 period_size;
104 u32 frames_left;
105 bool error;
106};
107
108#define XDMA_DEV_STATUS_REG_DMA BIT(0)
109#define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
110
111/**
112 * struct xdma_device - DMA device structure
113 * @pdev: Platform device pointer
114 * @dma_dev: DMA device structure
115 * @rmap: MMIO regmap for DMA registers
116 * @h2c_chans: Host to Card channels
117 * @c2h_chans: Card to Host channels
118 * @h2c_chan_num: Number of H2C channels
119 * @c2h_chan_num: Number of C2H channels
120 * @irq_start: Start IRQ assigned to device
121 * @irq_num: Number of IRQ assigned to device
122 * @status: Initialization status
123 */
124struct xdma_device {
125 struct platform_device *pdev;
126 struct dma_device dma_dev;
127 struct regmap *rmap;
128 struct xdma_chan *h2c_chans;
129 struct xdma_chan *c2h_chans;
130 u32 h2c_chan_num;
131 u32 c2h_chan_num;
132 u32 irq_start;
133 u32 irq_num;
134 u32 status;
135};
136
137#define xdma_err(xdev, fmt, args...) \
138 dev_err(&(xdev)->pdev->dev, fmt, ##args)
139#define XDMA_CHAN_NUM(_xd) ({ \
140 typeof(_xd) (xd) = (_xd); \
141 ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
142
143/* Get the last desc in a desc block */
144static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
145{
146 return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
147}
148
149/**
150 * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
151 * @sw_desc: Tx descriptor pointer
152 */
153static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
154{
155 struct xdma_desc_block *block;
156 u32 last_blk_desc, desc_control;
157 struct xdma_hw_desc *desc;
158 int i;
159
160 desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
161 for (i = 1; i < sw_desc->dblk_num; i++) {
162 block = &sw_desc->desc_blocks[i - 1];
163 desc = xdma_blk_last_desc(block);
164
165 if (!(i & XDMA_DESC_BLOCK_MASK)) {
166 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
167 continue;
168 }
169 desc->control = cpu_to_le32(desc_control);
170 desc->next_desc = cpu_to_le64(block[1].dma_addr);
171 }
172
173 /* update the last block */
174 last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
175 if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
176 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
177 desc = xdma_blk_last_desc(block);
178 desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
179 desc->control = cpu_to_le32(desc_control);
180 }
181
182 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
183 desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
184 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
185}
186
187/**
188 * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
189 * @sw_desc: Tx descriptor pointer
190 */
191static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
192{
193 struct xdma_desc_block *block;
194 struct xdma_hw_desc *desc;
195 int i;
196
197 block = sw_desc->desc_blocks;
198 for (i = 0; i < sw_desc->desc_num - 1; i++) {
199 desc = block->virt_addr + i * XDMA_DESC_SIZE;
200 desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
201 }
202 desc = block->virt_addr + i * XDMA_DESC_SIZE;
203 desc->next_desc = cpu_to_le64(block->dma_addr);
204}
205
206static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
207{
208 return container_of(chan, struct xdma_chan, vchan.chan);
209}
210
211static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
212{
213 return container_of(vdesc, struct xdma_desc, vdesc);
214}
215
216/**
217 * xdma_channel_init - Initialize DMA channel registers
218 * @chan: DMA channel pointer
219 */
220static int xdma_channel_init(struct xdma_chan *chan)
221{
222 struct xdma_device *xdev = chan->xdev_hdl;
223 int ret;
224
225 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
226 CHAN_CTRL_NON_INCR_ADDR);
227 if (ret)
228 return ret;
229
230 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
231 CHAN_IM_ALL);
232 if (ret)
233 return ret;
234
235 return 0;
236}
237
238/**
239 * xdma_free_desc - Free descriptor
240 * @vdesc: Virtual DMA descriptor
241 */
242static void xdma_free_desc(struct virt_dma_desc *vdesc)
243{
244 struct xdma_desc *sw_desc;
245 int i;
246
247 sw_desc = to_xdma_desc(vdesc);
248 for (i = 0; i < sw_desc->dblk_num; i++) {
249 if (!sw_desc->desc_blocks[i].virt_addr)
250 break;
251 dma_pool_free(sw_desc->chan->desc_pool,
252 sw_desc->desc_blocks[i].virt_addr,
253 sw_desc->desc_blocks[i].dma_addr);
254 }
255 kfree(sw_desc->desc_blocks);
256 kfree(sw_desc);
257}
258
259/**
260 * xdma_alloc_desc - Allocate descriptor
261 * @chan: DMA channel pointer
262 * @desc_num: Number of hardware descriptors
263 * @cyclic: Whether this is a cyclic transfer
264 */
265static struct xdma_desc *
266xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
267{
268 struct xdma_desc *sw_desc;
269 struct xdma_hw_desc *desc;
270 dma_addr_t dma_addr;
271 u32 dblk_num;
272 u32 control;
273 void *addr;
274 int i, j;
275
276 sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
277 if (!sw_desc)
278 return NULL;
279
280 sw_desc->chan = chan;
281 sw_desc->desc_num = desc_num;
282 sw_desc->cyclic = cyclic;
283 sw_desc->error = false;
284 dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
285 sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
286 GFP_NOWAIT);
287 if (!sw_desc->desc_blocks)
288 goto failed;
289
290 if (cyclic)
291 control = XDMA_DESC_CONTROL_CYCLIC;
292 else
293 control = XDMA_DESC_CONTROL(1, 0);
294
295 sw_desc->dblk_num = dblk_num;
296 for (i = 0; i < sw_desc->dblk_num; i++) {
297 addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
298 if (!addr)
299 goto failed;
300
301 sw_desc->desc_blocks[i].virt_addr = addr;
302 sw_desc->desc_blocks[i].dma_addr = dma_addr;
303 for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
304 desc[j].control = cpu_to_le32(control);
305 }
306
307 if (cyclic)
308 xdma_link_cyclic_desc_blocks(sw_desc);
309 else
310 xdma_link_sg_desc_blocks(sw_desc);
311
312 return sw_desc;
313
314failed:
315 xdma_free_desc(&sw_desc->vdesc);
316 return NULL;
317}
318
319/**
320 * xdma_xfer_start - Start DMA transfer
321 * @xchan: DMA channel pointer
322 */
323static int xdma_xfer_start(struct xdma_chan *xchan)
324{
325 struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
326 struct xdma_device *xdev = xchan->xdev_hdl;
327 struct xdma_desc_block *block;
328 u32 val, completed_blocks;
329 struct xdma_desc *desc;
330 int ret;
331
332 /*
333 * check if there is not any submitted descriptor or channel is busy.
334 * vchan lock should be held where this function is called.
335 */
336 if (!vd || xchan->busy)
337 return -EINVAL;
338
339 /* clear run stop bit to get ready for transfer */
340 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
341 CHAN_CTRL_RUN_STOP);
342 if (ret)
343 return ret;
344
345 desc = to_xdma_desc(vd);
346 if (desc->dir != xchan->dir) {
347 xdma_err(xdev, "incorrect request direction");
348 return -EINVAL;
349 }
350
351 /* set DMA engine to the first descriptor block */
352 completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
353 block = &desc->desc_blocks[completed_blocks];
354 val = lower_32_bits(block->dma_addr);
355 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
356 if (ret)
357 return ret;
358
359 val = upper_32_bits(block->dma_addr);
360 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
361 if (ret)
362 return ret;
363
364 if (completed_blocks + 1 == desc->dblk_num)
365 val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
366 else
367 val = XDMA_DESC_ADJACENT - 1;
368 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
369 if (ret)
370 return ret;
371
372 /* kick off DMA transfer */
373 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
374 CHAN_CTRL_START);
375 if (ret)
376 return ret;
377
378 xchan->busy = true;
379
380 return 0;
381}
382
383/**
384 * xdma_xfer_stop - Stop DMA transfer
385 * @xchan: DMA channel pointer
386 */
387static int xdma_xfer_stop(struct xdma_chan *xchan)
388{
389 int ret;
390 u32 val;
391 struct xdma_device *xdev = xchan->xdev_hdl;
392
393 /* clear run stop bit to prevent any further auto-triggering */
394 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
395 CHAN_CTRL_RUN_STOP);
396 if (ret)
397 return ret;
398
399 /* Clear the channel status register */
400 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
401 if (ret)
402 return ret;
403
404 return 0;
405}
406
407/**
408 * xdma_alloc_channels - Detect and allocate DMA channels
409 * @xdev: DMA device pointer
410 * @dir: Channel direction
411 */
412static int xdma_alloc_channels(struct xdma_device *xdev,
413 enum dma_transfer_direction dir)
414{
415 struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
416 struct xdma_chan **chans, *xchan;
417 u32 base, identifier, target;
418 u32 *chan_num;
419 int i, j, ret;
420
421 if (dir == DMA_MEM_TO_DEV) {
422 base = XDMA_CHAN_H2C_OFFSET;
423 target = XDMA_CHAN_H2C_TARGET;
424 chans = &xdev->h2c_chans;
425 chan_num = &xdev->h2c_chan_num;
426 } else if (dir == DMA_DEV_TO_MEM) {
427 base = XDMA_CHAN_C2H_OFFSET;
428 target = XDMA_CHAN_C2H_TARGET;
429 chans = &xdev->c2h_chans;
430 chan_num = &xdev->c2h_chan_num;
431 } else {
432 xdma_err(xdev, "invalid direction specified");
433 return -EINVAL;
434 }
435
436 /* detect number of available DMA channels */
437 for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
438 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
439 &identifier);
440 if (ret)
441 return ret;
442
443 /* check if it is available DMA channel */
444 if (XDMA_CHAN_CHECK_TARGET(identifier, target))
445 (*chan_num)++;
446 }
447
448 if (!*chan_num) {
449 xdma_err(xdev, "does not probe any channel");
450 return -EINVAL;
451 }
452
453 *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
454 GFP_KERNEL);
455 if (!*chans)
456 return -ENOMEM;
457
458 for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
459 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
460 &identifier);
461 if (ret)
462 return ret;
463
464 if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
465 continue;
466
467 if (j == *chan_num) {
468 xdma_err(xdev, "invalid channel number");
469 return -EIO;
470 }
471
472 /* init channel structure and hardware */
473 xchan = &(*chans)[j];
474 xchan->xdev_hdl = xdev;
475 xchan->base = base + i * XDMA_CHAN_STRIDE;
476 xchan->dir = dir;
477
478 ret = xdma_channel_init(xchan);
479 if (ret)
480 return ret;
481 xchan->vchan.desc_free = xdma_free_desc;
482 vchan_init(&xchan->vchan, &xdev->dma_dev);
483
484 j++;
485 }
486
487 dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
488 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
489
490 return 0;
491}
492
493/**
494 * xdma_issue_pending - Issue pending transactions
495 * @chan: DMA channel pointer
496 */
497static void xdma_issue_pending(struct dma_chan *chan)
498{
499 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
500 unsigned long flags;
501
502 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
503 if (vchan_issue_pending(&xdma_chan->vchan))
504 xdma_xfer_start(xdma_chan);
505 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
506}
507
508/**
509 * xdma_terminate_all - Terminate all transactions
510 * @chan: DMA channel pointer
511 */
512static int xdma_terminate_all(struct dma_chan *chan)
513{
514 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
515 struct virt_dma_desc *vd;
516 unsigned long flags;
517 LIST_HEAD(head);
518
519 xdma_xfer_stop(xdma_chan);
520
521 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
522
523 xdma_chan->busy = false;
524 vd = vchan_next_desc(&xdma_chan->vchan);
525 if (vd) {
526 list_del(&vd->node);
527 dma_cookie_complete(&vd->tx);
528 vchan_terminate_vdesc(vd);
529 }
530 vchan_get_all_descriptors(&xdma_chan->vchan, &head);
531 list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
532
533 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
534
535 return 0;
536}
537
538/**
539 * xdma_synchronize - Synchronize terminated transactions
540 * @chan: DMA channel pointer
541 */
542static void xdma_synchronize(struct dma_chan *chan)
543{
544 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
545
546 vchan_synchronize(&xdma_chan->vchan);
547}
548
549/**
550 * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
551 * @sw_desc: tx descriptor state container
552 * @src_addr: Value for a ->src_addr field of a first descriptor
553 * @dst_addr: Value for a ->dst_addr field of a first descriptor
554 * @size: Total size of a contiguous memory block
555 * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
556 */
557static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
558 u64 dst_addr, u32 size, u32 filled_descs_num)
559{
560 u32 left = size, len, desc_num = filled_descs_num;
561 struct xdma_desc_block *dblk;
562 struct xdma_hw_desc *desc;
563
564 dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
565 desc = dblk->virt_addr;
566 desc += desc_num & XDMA_DESC_ADJACENT_MASK;
567 do {
568 len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
569 /* set hardware descriptor */
570 desc->bytes = cpu_to_le32(len);
571 desc->src_addr = cpu_to_le64(src_addr);
572 desc->dst_addr = cpu_to_le64(dst_addr);
573 if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
574 desc = (++dblk)->virt_addr;
575 else
576 desc++;
577
578 src_addr += len;
579 dst_addr += len;
580 left -= len;
581 } while (left);
582
583 return desc_num - filled_descs_num;
584}
585
586/**
587 * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
588 * @chan: DMA channel pointer
589 * @sgl: Transfer scatter gather list
590 * @sg_len: Length of scatter gather list
591 * @dir: Transfer direction
592 * @flags: transfer ack flags
593 * @context: APP words of the descriptor
594 */
595static struct dma_async_tx_descriptor *
596xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
597 unsigned int sg_len, enum dma_transfer_direction dir,
598 unsigned long flags, void *context)
599{
600 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
601 struct dma_async_tx_descriptor *tx_desc;
602 struct xdma_desc *sw_desc;
603 u32 desc_num = 0, i;
604 u64 addr, dev_addr, *src, *dst;
605 struct scatterlist *sg;
606
607 for_each_sg(sgl, sg, sg_len, i)
608 desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
609
610 sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
611 if (!sw_desc)
612 return NULL;
613 sw_desc->dir = dir;
614 sw_desc->cyclic = false;
615 sw_desc->interleaved_dma = false;
616
617 if (dir == DMA_MEM_TO_DEV) {
618 dev_addr = xdma_chan->cfg.dst_addr;
619 src = &addr;
620 dst = &dev_addr;
621 } else {
622 dev_addr = xdma_chan->cfg.src_addr;
623 src = &dev_addr;
624 dst = &addr;
625 }
626
627 desc_num = 0;
628 for_each_sg(sgl, sg, sg_len, i) {
629 addr = sg_dma_address(sg);
630 desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
631 dev_addr += sg_dma_len(sg);
632 }
633
634 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
635 if (!tx_desc)
636 goto failed;
637
638 return tx_desc;
639
640failed:
641 xdma_free_desc(&sw_desc->vdesc);
642
643 return NULL;
644}
645
646/**
647 * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
648 * @chan: DMA channel pointer
649 * @address: Device DMA address to access
650 * @size: Total length to transfer
651 * @period_size: Period size to use for each transfer
652 * @dir: Transfer direction
653 * @flags: Transfer ack flags
654 */
655static struct dma_async_tx_descriptor *
656xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
657 size_t size, size_t period_size,
658 enum dma_transfer_direction dir,
659 unsigned long flags)
660{
661 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
662 struct xdma_device *xdev = xdma_chan->xdev_hdl;
663 unsigned int periods = size / period_size;
664 struct dma_async_tx_descriptor *tx_desc;
665 struct xdma_desc *sw_desc;
666 u64 addr, dev_addr, *src, *dst;
667 u32 desc_num;
668 unsigned int i;
669
670 /*
671 * Simplify the whole logic by preventing an abnormally high number of
672 * periods and periods size.
673 */
674 if (period_size > XDMA_DESC_BLEN_MAX) {
675 xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
676 return NULL;
677 }
678
679 if (periods > XDMA_DESC_ADJACENT) {
680 xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
681 return NULL;
682 }
683
684 sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
685 if (!sw_desc)
686 return NULL;
687
688 sw_desc->periods = periods;
689 sw_desc->period_size = period_size;
690 sw_desc->dir = dir;
691 sw_desc->interleaved_dma = false;
692
693 addr = address;
694 if (dir == DMA_MEM_TO_DEV) {
695 dev_addr = xdma_chan->cfg.dst_addr;
696 src = &addr;
697 dst = &dev_addr;
698 } else {
699 dev_addr = xdma_chan->cfg.src_addr;
700 src = &dev_addr;
701 dst = &addr;
702 }
703
704 desc_num = 0;
705 for (i = 0; i < periods; i++) {
706 desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
707 addr += i * period_size;
708 }
709
710 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
711 if (!tx_desc)
712 goto failed;
713
714 return tx_desc;
715
716failed:
717 xdma_free_desc(&sw_desc->vdesc);
718
719 return NULL;
720}
721
722/**
723 * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
724 * @chan: DMA channel
725 * @xt: DMA transfer template
726 * @flags: tx flags
727 */
728static struct dma_async_tx_descriptor *
729xdma_prep_interleaved_dma(struct dma_chan *chan,
730 struct dma_interleaved_template *xt,
731 unsigned long flags)
732{
733 int i;
734 u32 desc_num = 0, period_size = 0;
735 struct dma_async_tx_descriptor *tx_desc;
736 struct xdma_chan *xchan = to_xdma_chan(chan);
737 struct xdma_desc *sw_desc;
738 u64 src_addr, dst_addr;
739
740 for (i = 0; i < xt->frame_size; ++i)
741 desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
742
743 sw_desc = xdma_alloc_desc(xchan, desc_num, false);
744 if (!sw_desc)
745 return NULL;
746 sw_desc->dir = xt->dir;
747 sw_desc->interleaved_dma = true;
748 sw_desc->cyclic = flags & DMA_PREP_REPEAT;
749 sw_desc->frames_left = xt->numf;
750 sw_desc->periods = xt->numf;
751
752 desc_num = 0;
753 src_addr = xt->src_start;
754 dst_addr = xt->dst_start;
755 for (i = 0; i < xt->frame_size; ++i) {
756 desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
757 src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ?
758 xt->sgl[i].size : 0);
759 dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ?
760 xt->sgl[i].size : 0);
761 period_size += xt->sgl[i].size;
762 }
763 sw_desc->period_size = period_size;
764
765 tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
766 if (tx_desc)
767 return tx_desc;
768
769 xdma_free_desc(&sw_desc->vdesc);
770 return NULL;
771}
772
773/**
774 * xdma_device_config - Configure the DMA channel
775 * @chan: DMA channel
776 * @cfg: channel configuration
777 */
778static int xdma_device_config(struct dma_chan *chan,
779 struct dma_slave_config *cfg)
780{
781 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
782
783 memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
784
785 return 0;
786}
787
788/**
789 * xdma_free_chan_resources - Free channel resources
790 * @chan: DMA channel
791 */
792static void xdma_free_chan_resources(struct dma_chan *chan)
793{
794 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
795
796 vchan_free_chan_resources(&xdma_chan->vchan);
797 dma_pool_destroy(xdma_chan->desc_pool);
798 xdma_chan->desc_pool = NULL;
799}
800
801/**
802 * xdma_alloc_chan_resources - Allocate channel resources
803 * @chan: DMA channel
804 */
805static int xdma_alloc_chan_resources(struct dma_chan *chan)
806{
807 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
808 struct xdma_device *xdev = xdma_chan->xdev_hdl;
809 struct device *dev = xdev->dma_dev.dev;
810
811 while (dev && !dev_is_pci(dev))
812 dev = dev->parent;
813 if (!dev) {
814 xdma_err(xdev, "unable to find pci device");
815 return -EINVAL;
816 }
817
818 xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE,
819 XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
820 if (!xdma_chan->desc_pool) {
821 xdma_err(xdev, "unable to allocate descriptor pool");
822 return -ENOMEM;
823 }
824
825 return 0;
826}
827
828static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
829 struct dma_tx_state *state)
830{
831 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
832 struct xdma_desc *desc = NULL;
833 struct virt_dma_desc *vd;
834 enum dma_status ret;
835 unsigned long flags;
836 unsigned int period_idx;
837 u32 residue = 0;
838
839 ret = dma_cookie_status(chan, cookie, state);
840 if (ret == DMA_COMPLETE)
841 return ret;
842
843 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
844
845 vd = vchan_find_desc(&xdma_chan->vchan, cookie);
846 if (!vd)
847 goto out;
848
849 desc = to_xdma_desc(vd);
850 if (desc->error) {
851 ret = DMA_ERROR;
852 } else if (desc->cyclic) {
853 period_idx = desc->completed_desc_num % desc->periods;
854 residue = (desc->periods - period_idx) * desc->period_size;
855 dma_set_residue(state, residue);
856 }
857out:
858 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
859
860 return ret;
861}
862
863/**
864 * xdma_channel_isr - XDMA channel interrupt handler
865 * @irq: IRQ number
866 * @dev_id: Pointer to the DMA channel structure
867 */
868static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
869{
870 struct xdma_chan *xchan = dev_id;
871 u32 complete_desc_num = 0;
872 struct xdma_device *xdev = xchan->xdev_hdl;
873 struct virt_dma_desc *vd, *next_vd;
874 struct xdma_desc *desc;
875 int ret;
876 u32 st;
877 bool repeat_tx;
878
879 spin_lock(&xchan->vchan.lock);
880
881 /* get submitted request */
882 vd = vchan_next_desc(&xchan->vchan);
883 if (!vd)
884 goto out;
885
886 /* Clear-on-read the status register */
887 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
888 if (ret)
889 goto out;
890
891 desc = to_xdma_desc(vd);
892
893 st &= XDMA_CHAN_STATUS_MASK;
894 if ((st & XDMA_CHAN_ERROR_MASK) ||
895 !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
896 desc->error = true;
897 xdma_err(xdev, "channel error, status register value: 0x%x", st);
898 goto out;
899 }
900
901 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
902 &complete_desc_num);
903 if (ret)
904 goto out;
905
906 if (desc->interleaved_dma) {
907 xchan->busy = false;
908 desc->completed_desc_num += complete_desc_num;
909 if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
910 xdma_xfer_start(xchan);
911 goto out;
912 }
913
914 /* last desc of any frame */
915 desc->frames_left--;
916 if (desc->frames_left)
917 goto out;
918
919 /* last desc of the last frame */
920 repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
921 next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
922 if (next_vd)
923 repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
924 if (repeat_tx) {
925 desc->frames_left = desc->periods;
926 desc->completed_desc_num = 0;
927 vchan_cyclic_callback(vd);
928 } else {
929 list_del(&vd->node);
930 vchan_cookie_complete(vd);
931 }
932 /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
933 xdma_xfer_start(xchan);
934 } else if (!desc->cyclic) {
935 xchan->busy = false;
936 desc->completed_desc_num += complete_desc_num;
937
938 /* if all data blocks are transferred, remove and complete the request */
939 if (desc->completed_desc_num == desc->desc_num) {
940 list_del(&vd->node);
941 vchan_cookie_complete(vd);
942 goto out;
943 }
944
945 if (desc->completed_desc_num > desc->desc_num ||
946 complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
947 goto out;
948
949 /* transfer the rest of data */
950 xdma_xfer_start(xchan);
951 } else {
952 desc->completed_desc_num = complete_desc_num;
953 vchan_cyclic_callback(vd);
954 }
955
956out:
957 spin_unlock(&xchan->vchan.lock);
958 return IRQ_HANDLED;
959}
960
961/**
962 * xdma_irq_fini - Uninitialize IRQ
963 * @xdev: DMA device pointer
964 */
965static void xdma_irq_fini(struct xdma_device *xdev)
966{
967 int i;
968
969 /* disable interrupt */
970 regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
971
972 /* free irq handler */
973 for (i = 0; i < xdev->h2c_chan_num; i++)
974 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
975
976 for (i = 0; i < xdev->c2h_chan_num; i++)
977 free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
978}
979
980/**
981 * xdma_set_vector_reg - configure hardware IRQ registers
982 * @xdev: DMA device pointer
983 * @vec_tbl_start: Start of IRQ registers
984 * @irq_start: Start of IRQ
985 * @irq_num: Number of IRQ
986 */
987static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
988 u32 irq_start, u32 irq_num)
989{
990 u32 shift, i, val = 0;
991 int ret;
992
993 /* Each IRQ register is 32 bit and contains 4 IRQs */
994 while (irq_num > 0) {
995 for (i = 0; i < 4; i++) {
996 shift = XDMA_IRQ_VEC_SHIFT * i;
997 val |= irq_start << shift;
998 irq_start++;
999 irq_num--;
1000 if (!irq_num)
1001 break;
1002 }
1003
1004 /* write IRQ register */
1005 ret = regmap_write(xdev->rmap, vec_tbl_start, val);
1006 if (ret)
1007 return ret;
1008 vec_tbl_start += sizeof(u32);
1009 val = 0;
1010 }
1011
1012 return 0;
1013}
1014
1015/**
1016 * xdma_irq_init - initialize IRQs
1017 * @xdev: DMA device pointer
1018 */
1019static int xdma_irq_init(struct xdma_device *xdev)
1020{
1021 u32 irq = xdev->irq_start;
1022 u32 user_irq_start;
1023 int i, j, ret;
1024
1025 /* return failure if there are not enough IRQs */
1026 if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
1027 xdma_err(xdev, "not enough irq");
1028 return -EINVAL;
1029 }
1030
1031 /* setup H2C interrupt handler */
1032 for (i = 0; i < xdev->h2c_chan_num; i++) {
1033 ret = request_irq(irq, xdma_channel_isr, 0,
1034 "xdma-h2c-channel", &xdev->h2c_chans[i]);
1035 if (ret) {
1036 xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
1037 i, irq, ret);
1038 goto failed_init_h2c;
1039 }
1040 xdev->h2c_chans[i].irq = irq;
1041 irq++;
1042 }
1043
1044 /* setup C2H interrupt handler */
1045 for (j = 0; j < xdev->c2h_chan_num; j++) {
1046 ret = request_irq(irq, xdma_channel_isr, 0,
1047 "xdma-c2h-channel", &xdev->c2h_chans[j]);
1048 if (ret) {
1049 xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
1050 j, irq, ret);
1051 goto failed_init_c2h;
1052 }
1053 xdev->c2h_chans[j].irq = irq;
1054 irq++;
1055 }
1056
1057 /* config hardware IRQ registers */
1058 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
1059 XDMA_CHAN_NUM(xdev));
1060 if (ret) {
1061 xdma_err(xdev, "failed to set channel vectors: %d", ret);
1062 goto failed_init_c2h;
1063 }
1064
1065 /* config user IRQ registers if needed */
1066 user_irq_start = XDMA_CHAN_NUM(xdev);
1067 if (xdev->irq_num > user_irq_start) {
1068 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
1069 user_irq_start,
1070 xdev->irq_num - user_irq_start);
1071 if (ret) {
1072 xdma_err(xdev, "failed to set user vectors: %d", ret);
1073 goto failed_init_c2h;
1074 }
1075 }
1076
1077 /* enable interrupt */
1078 ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
1079 if (ret)
1080 goto failed_init_c2h;
1081
1082 return 0;
1083
1084failed_init_c2h:
1085 while (j--)
1086 free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
1087failed_init_h2c:
1088 while (i--)
1089 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
1090
1091 return ret;
1092}
1093
1094static bool xdma_filter_fn(struct dma_chan *chan, void *param)
1095{
1096 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
1097 struct xdma_chan_info *chan_info = param;
1098
1099 return chan_info->dir == xdma_chan->dir;
1100}
1101
1102/**
1103 * xdma_disable_user_irq - Disable user interrupt
1104 * @pdev: Pointer to the platform_device structure
1105 * @irq_num: System IRQ number
1106 */
1107void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
1108{
1109 struct xdma_device *xdev = platform_get_drvdata(pdev);
1110 u32 index;
1111
1112 index = irq_num - xdev->irq_start;
1113 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
1114 xdma_err(xdev, "invalid user irq number");
1115 return;
1116 }
1117 index -= XDMA_CHAN_NUM(xdev);
1118
1119 regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
1120}
1121EXPORT_SYMBOL(xdma_disable_user_irq);
1122
1123/**
1124 * xdma_enable_user_irq - Enable user logic interrupt
1125 * @pdev: Pointer to the platform_device structure
1126 * @irq_num: System IRQ number
1127 */
1128int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
1129{
1130 struct xdma_device *xdev = platform_get_drvdata(pdev);
1131 u32 index;
1132 int ret;
1133
1134 index = irq_num - xdev->irq_start;
1135 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
1136 xdma_err(xdev, "invalid user irq number");
1137 return -EINVAL;
1138 }
1139 index -= XDMA_CHAN_NUM(xdev);
1140
1141 ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
1142 if (ret)
1143 return ret;
1144
1145 return 0;
1146}
1147EXPORT_SYMBOL(xdma_enable_user_irq);
1148
1149/**
1150 * xdma_get_user_irq - Get system IRQ number
1151 * @pdev: Pointer to the platform_device structure
1152 * @user_irq_index: User logic IRQ wire index
1153 *
1154 * Return: The system IRQ number allocated for the given wire index.
1155 */
1156int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
1157{
1158 struct xdma_device *xdev = platform_get_drvdata(pdev);
1159
1160 if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
1161 xdma_err(xdev, "invalid user irq index");
1162 return -EINVAL;
1163 }
1164
1165 return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
1166}
1167EXPORT_SYMBOL(xdma_get_user_irq);
1168
1169/**
1170 * xdma_remove - Driver remove function
1171 * @pdev: Pointer to the platform_device structure
1172 */
1173static void xdma_remove(struct platform_device *pdev)
1174{
1175 struct xdma_device *xdev = platform_get_drvdata(pdev);
1176
1177 if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
1178 xdma_irq_fini(xdev);
1179
1180 if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
1181 dma_async_device_unregister(&xdev->dma_dev);
1182}
1183
1184/**
1185 * xdma_probe - Driver probe function
1186 * @pdev: Pointer to the platform_device structure
1187 */
1188static int xdma_probe(struct platform_device *pdev)
1189{
1190 struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
1191 struct xdma_device *xdev;
1192 void __iomem *reg_base;
1193 struct resource *res;
1194 int ret = -ENODEV;
1195
1196 if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
1197 dev_err(&pdev->dev, "invalid max dma channels %d",
1198 pdata->max_dma_channels);
1199 return -EINVAL;
1200 }
1201
1202 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1203 if (!xdev)
1204 return -ENOMEM;
1205
1206 platform_set_drvdata(pdev, xdev);
1207 xdev->pdev = pdev;
1208
1209 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1210 if (!res) {
1211 xdma_err(xdev, "failed to get irq resource");
1212 goto failed;
1213 }
1214 xdev->irq_start = res->start;
1215 xdev->irq_num = resource_size(res);
1216
1217 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1218 if (!res) {
1219 xdma_err(xdev, "failed to get io resource");
1220 goto failed;
1221 }
1222
1223 reg_base = devm_ioremap_resource(&pdev->dev, res);
1224 if (IS_ERR(reg_base)) {
1225 xdma_err(xdev, "ioremap failed");
1226 goto failed;
1227 }
1228
1229 xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
1230 &xdma_regmap_config);
1231 if (!xdev->rmap) {
1232 xdma_err(xdev, "config regmap failed: %d", ret);
1233 goto failed;
1234 }
1235 INIT_LIST_HEAD(&xdev->dma_dev.channels);
1236
1237 ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
1238 if (ret) {
1239 xdma_err(xdev, "config H2C channels failed: %d", ret);
1240 goto failed;
1241 }
1242
1243 ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
1244 if (ret) {
1245 xdma_err(xdev, "config C2H channels failed: %d", ret);
1246 goto failed;
1247 }
1248
1249 dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
1250 dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
1251 dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
1252 dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
1253 dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
1254 dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
1255
1256 xdev->dma_dev.dev = &pdev->dev;
1257 xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1258 xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
1259 xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
1260 xdev->dma_dev.device_tx_status = xdma_tx_status;
1261 xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
1262 xdev->dma_dev.device_config = xdma_device_config;
1263 xdev->dma_dev.device_issue_pending = xdma_issue_pending;
1264 xdev->dma_dev.device_terminate_all = xdma_terminate_all;
1265 xdev->dma_dev.device_synchronize = xdma_synchronize;
1266 xdev->dma_dev.filter.map = pdata->device_map;
1267 xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
1268 xdev->dma_dev.filter.fn = xdma_filter_fn;
1269 xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
1270 xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
1271
1272 ret = dma_async_device_register(&xdev->dma_dev);
1273 if (ret) {
1274 xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
1275 goto failed;
1276 }
1277 xdev->status |= XDMA_DEV_STATUS_REG_DMA;
1278
1279 ret = xdma_irq_init(xdev);
1280 if (ret) {
1281 xdma_err(xdev, "failed to init msix: %d", ret);
1282 goto failed;
1283 }
1284 xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
1285
1286 return 0;
1287
1288failed:
1289 xdma_remove(pdev);
1290
1291 return ret;
1292}
1293
1294static const struct platform_device_id xdma_id_table[] = {
1295 { "xdma", 0},
1296 { },
1297};
1298
1299static struct platform_driver xdma_driver = {
1300 .driver = {
1301 .name = "xdma",
1302 },
1303 .id_table = xdma_id_table,
1304 .probe = xdma_probe,
1305 .remove_new = xdma_remove,
1306};
1307
1308module_platform_driver(xdma_driver);
1309
1310MODULE_DESCRIPTION("AMD XDMA driver");
1311MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
1312MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * DMA driver for Xilinx DMA/Bridge Subsystem
4 *
5 * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
6 * Copyright (C) 2022, Advanced Micro Devices, Inc.
7 */
8
9/*
10 * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
11 * between Host memory and the DMA subsystem. It does this by operating on
12 * 'descriptors' that contain information about the source, destination and
13 * amount of data to transfer. These direct memory transfers can be both in
14 * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
15 * configured to have a single AXI4 Master interface shared by all channels
16 * or one AXI4-Stream interface for each channel enabled. Memory transfers are
17 * specified on a per-channel basis in descriptor linked lists, which the DMA
18 * fetches from host memory and processes. Events such as descriptor completion
19 * and errors are signaled using interrupts. The core also provides up to 16
20 * user interrupt wires that generate interrupts to the host.
21 */
22
23#include <linux/mod_devicetable.h>
24#include <linux/bitfield.h>
25#include <linux/dmapool.h>
26#include <linux/regmap.h>
27#include <linux/dmaengine.h>
28#include <linux/dma/amd_xdma.h>
29#include <linux/platform_device.h>
30#include <linux/platform_data/amd_xdma.h>
31#include <linux/dma-mapping.h>
32#include <linux/pci.h>
33#include "../virt-dma.h"
34#include "xdma-regs.h"
35
36/* mmio regmap config for all XDMA registers */
37static const struct regmap_config xdma_regmap_config = {
38 .reg_bits = 32,
39 .val_bits = 32,
40 .reg_stride = 4,
41 .max_register = XDMA_REG_SPACE_LEN,
42};
43
44/**
45 * struct xdma_desc_block - Descriptor block
46 * @virt_addr: Virtual address of block start
47 * @dma_addr: DMA address of block start
48 */
49struct xdma_desc_block {
50 void *virt_addr;
51 dma_addr_t dma_addr;
52};
53
54/**
55 * struct xdma_chan - Driver specific DMA channel structure
56 * @vchan: Virtual channel
57 * @xdev_hdl: Pointer to DMA device structure
58 * @base: Offset of channel registers
59 * @desc_pool: Descriptor pool
60 * @busy: Busy flag of the channel
61 * @dir: Transferring direction of the channel
62 * @cfg: Transferring config of the channel
63 * @irq: IRQ assigned to the channel
64 */
65struct xdma_chan {
66 struct virt_dma_chan vchan;
67 void *xdev_hdl;
68 u32 base;
69 struct dma_pool *desc_pool;
70 bool busy;
71 enum dma_transfer_direction dir;
72 struct dma_slave_config cfg;
73 u32 irq;
74 struct completion last_interrupt;
75 bool stop_requested;
76};
77
78/**
79 * struct xdma_desc - DMA desc structure
80 * @vdesc: Virtual DMA descriptor
81 * @chan: DMA channel pointer
82 * @dir: Transferring direction of the request
83 * @desc_blocks: Hardware descriptor blocks
84 * @dblk_num: Number of hardware descriptor blocks
85 * @desc_num: Number of hardware descriptors
86 * @completed_desc_num: Completed hardware descriptors
87 * @cyclic: Cyclic transfer vs. scatter-gather
88 * @interleaved_dma: Interleaved DMA transfer
89 * @periods: Number of periods in the cyclic transfer
90 * @period_size: Size of a period in bytes in cyclic transfers
91 * @frames_left: Number of frames left in interleaved DMA transfer
92 * @error: tx error flag
93 */
94struct xdma_desc {
95 struct virt_dma_desc vdesc;
96 struct xdma_chan *chan;
97 enum dma_transfer_direction dir;
98 struct xdma_desc_block *desc_blocks;
99 u32 dblk_num;
100 u32 desc_num;
101 u32 completed_desc_num;
102 bool cyclic;
103 bool interleaved_dma;
104 u32 periods;
105 u32 period_size;
106 u32 frames_left;
107 bool error;
108};
109
110#define XDMA_DEV_STATUS_REG_DMA BIT(0)
111#define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
112
113/**
114 * struct xdma_device - DMA device structure
115 * @pdev: Platform device pointer
116 * @dma_dev: DMA device structure
117 * @rmap: MMIO regmap for DMA registers
118 * @h2c_chans: Host to Card channels
119 * @c2h_chans: Card to Host channels
120 * @h2c_chan_num: Number of H2C channels
121 * @c2h_chan_num: Number of C2H channels
122 * @irq_start: Start IRQ assigned to device
123 * @irq_num: Number of IRQ assigned to device
124 * @status: Initialization status
125 */
126struct xdma_device {
127 struct platform_device *pdev;
128 struct dma_device dma_dev;
129 struct regmap *rmap;
130 struct xdma_chan *h2c_chans;
131 struct xdma_chan *c2h_chans;
132 u32 h2c_chan_num;
133 u32 c2h_chan_num;
134 u32 irq_start;
135 u32 irq_num;
136 u32 status;
137};
138
139#define xdma_err(xdev, fmt, args...) \
140 dev_err(&(xdev)->pdev->dev, fmt, ##args)
141#define XDMA_CHAN_NUM(_xd) ({ \
142 typeof(_xd) (xd) = (_xd); \
143 ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
144
145/* Get the last desc in a desc block */
146static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
147{
148 return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
149}
150
151/**
152 * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
153 * @sw_desc: Tx descriptor pointer
154 */
155static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
156{
157 struct xdma_desc_block *block;
158 u32 last_blk_desc, desc_control;
159 struct xdma_hw_desc *desc;
160 int i;
161
162 desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
163 for (i = 1; i < sw_desc->dblk_num; i++) {
164 block = &sw_desc->desc_blocks[i - 1];
165 desc = xdma_blk_last_desc(block);
166
167 if (!(i & XDMA_DESC_BLOCK_MASK)) {
168 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
169 continue;
170 }
171 desc->control = cpu_to_le32(desc_control);
172 desc->next_desc = cpu_to_le64(block[1].dma_addr);
173 }
174
175 /* update the last block */
176 last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
177 if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
178 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
179 desc = xdma_blk_last_desc(block);
180 desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
181 desc->control = cpu_to_le32(desc_control);
182 }
183
184 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
185 desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
186 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
187}
188
189/**
190 * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
191 * @sw_desc: Tx descriptor pointer
192 */
193static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
194{
195 struct xdma_desc_block *block;
196 struct xdma_hw_desc *desc;
197 int i;
198
199 block = sw_desc->desc_blocks;
200 for (i = 0; i < sw_desc->desc_num - 1; i++) {
201 desc = block->virt_addr + i * XDMA_DESC_SIZE;
202 desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
203 }
204 desc = block->virt_addr + i * XDMA_DESC_SIZE;
205 desc->next_desc = cpu_to_le64(block->dma_addr);
206}
207
208static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
209{
210 return container_of(chan, struct xdma_chan, vchan.chan);
211}
212
213static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
214{
215 return container_of(vdesc, struct xdma_desc, vdesc);
216}
217
218/**
219 * xdma_channel_init - Initialize DMA channel registers
220 * @chan: DMA channel pointer
221 */
222static int xdma_channel_init(struct xdma_chan *chan)
223{
224 struct xdma_device *xdev = chan->xdev_hdl;
225 int ret;
226
227 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
228 CHAN_CTRL_NON_INCR_ADDR);
229 if (ret)
230 return ret;
231
232 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
233 CHAN_IM_ALL);
234 if (ret)
235 return ret;
236
237 return 0;
238}
239
240/**
241 * xdma_free_desc - Free descriptor
242 * @vdesc: Virtual DMA descriptor
243 */
244static void xdma_free_desc(struct virt_dma_desc *vdesc)
245{
246 struct xdma_desc *sw_desc;
247 int i;
248
249 sw_desc = to_xdma_desc(vdesc);
250 for (i = 0; i < sw_desc->dblk_num; i++) {
251 if (!sw_desc->desc_blocks[i].virt_addr)
252 break;
253 dma_pool_free(sw_desc->chan->desc_pool,
254 sw_desc->desc_blocks[i].virt_addr,
255 sw_desc->desc_blocks[i].dma_addr);
256 }
257 kfree(sw_desc->desc_blocks);
258 kfree(sw_desc);
259}
260
261/**
262 * xdma_alloc_desc - Allocate descriptor
263 * @chan: DMA channel pointer
264 * @desc_num: Number of hardware descriptors
265 * @cyclic: Whether this is a cyclic transfer
266 */
267static struct xdma_desc *
268xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
269{
270 struct xdma_desc *sw_desc;
271 struct xdma_hw_desc *desc;
272 dma_addr_t dma_addr;
273 u32 dblk_num;
274 u32 control;
275 void *addr;
276 int i, j;
277
278 sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
279 if (!sw_desc)
280 return NULL;
281
282 sw_desc->chan = chan;
283 sw_desc->desc_num = desc_num;
284 sw_desc->cyclic = cyclic;
285 sw_desc->error = false;
286 dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
287 sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
288 GFP_NOWAIT);
289 if (!sw_desc->desc_blocks)
290 goto failed;
291
292 if (cyclic)
293 control = XDMA_DESC_CONTROL_CYCLIC;
294 else
295 control = XDMA_DESC_CONTROL(1, 0);
296
297 sw_desc->dblk_num = dblk_num;
298 for (i = 0; i < sw_desc->dblk_num; i++) {
299 addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
300 if (!addr)
301 goto failed;
302
303 sw_desc->desc_blocks[i].virt_addr = addr;
304 sw_desc->desc_blocks[i].dma_addr = dma_addr;
305 for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
306 desc[j].control = cpu_to_le32(control);
307 }
308
309 if (cyclic)
310 xdma_link_cyclic_desc_blocks(sw_desc);
311 else
312 xdma_link_sg_desc_blocks(sw_desc);
313
314 return sw_desc;
315
316failed:
317 xdma_free_desc(&sw_desc->vdesc);
318 return NULL;
319}
320
321/**
322 * xdma_xfer_start - Start DMA transfer
323 * @xchan: DMA channel pointer
324 */
325static int xdma_xfer_start(struct xdma_chan *xchan)
326{
327 struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
328 struct xdma_device *xdev = xchan->xdev_hdl;
329 struct xdma_desc_block *block;
330 u32 val, completed_blocks;
331 struct xdma_desc *desc;
332 int ret;
333
334 /*
335 * check if there is not any submitted descriptor or channel is busy.
336 * vchan lock should be held where this function is called.
337 */
338 if (!vd || xchan->busy)
339 return -EINVAL;
340
341 /* clear run stop bit to get ready for transfer */
342 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
343 CHAN_CTRL_RUN_STOP);
344 if (ret)
345 return ret;
346
347 desc = to_xdma_desc(vd);
348 if (desc->dir != xchan->dir) {
349 xdma_err(xdev, "incorrect request direction");
350 return -EINVAL;
351 }
352
353 /* set DMA engine to the first descriptor block */
354 completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
355 block = &desc->desc_blocks[completed_blocks];
356 val = lower_32_bits(block->dma_addr);
357 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
358 if (ret)
359 return ret;
360
361 val = upper_32_bits(block->dma_addr);
362 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
363 if (ret)
364 return ret;
365
366 if (completed_blocks + 1 == desc->dblk_num)
367 val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
368 else
369 val = XDMA_DESC_ADJACENT - 1;
370 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
371 if (ret)
372 return ret;
373
374 /* kick off DMA transfer */
375 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
376 CHAN_CTRL_START);
377 if (ret)
378 return ret;
379
380 xchan->busy = true;
381 xchan->stop_requested = false;
382 reinit_completion(&xchan->last_interrupt);
383
384 return 0;
385}
386
387/**
388 * xdma_xfer_stop - Stop DMA transfer
389 * @xchan: DMA channel pointer
390 */
391static int xdma_xfer_stop(struct xdma_chan *xchan)
392{
393 int ret;
394 struct xdma_device *xdev = xchan->xdev_hdl;
395
396 /* clear run stop bit to prevent any further auto-triggering */
397 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
398 CHAN_CTRL_RUN_STOP);
399 if (ret)
400 return ret;
401 return ret;
402}
403
404/**
405 * xdma_alloc_channels - Detect and allocate DMA channels
406 * @xdev: DMA device pointer
407 * @dir: Channel direction
408 */
409static int xdma_alloc_channels(struct xdma_device *xdev,
410 enum dma_transfer_direction dir)
411{
412 struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
413 struct xdma_chan **chans, *xchan;
414 u32 base, identifier, target;
415 u32 *chan_num;
416 int i, j, ret;
417
418 if (dir == DMA_MEM_TO_DEV) {
419 base = XDMA_CHAN_H2C_OFFSET;
420 target = XDMA_CHAN_H2C_TARGET;
421 chans = &xdev->h2c_chans;
422 chan_num = &xdev->h2c_chan_num;
423 } else if (dir == DMA_DEV_TO_MEM) {
424 base = XDMA_CHAN_C2H_OFFSET;
425 target = XDMA_CHAN_C2H_TARGET;
426 chans = &xdev->c2h_chans;
427 chan_num = &xdev->c2h_chan_num;
428 } else {
429 xdma_err(xdev, "invalid direction specified");
430 return -EINVAL;
431 }
432
433 /* detect number of available DMA channels */
434 for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
435 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
436 &identifier);
437 if (ret)
438 return ret;
439
440 /* check if it is available DMA channel */
441 if (XDMA_CHAN_CHECK_TARGET(identifier, target))
442 (*chan_num)++;
443 }
444
445 if (!*chan_num) {
446 xdma_err(xdev, "does not probe any channel");
447 return -EINVAL;
448 }
449
450 *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
451 GFP_KERNEL);
452 if (!*chans)
453 return -ENOMEM;
454
455 for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
456 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
457 &identifier);
458 if (ret)
459 return ret;
460
461 if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
462 continue;
463
464 if (j == *chan_num) {
465 xdma_err(xdev, "invalid channel number");
466 return -EIO;
467 }
468
469 /* init channel structure and hardware */
470 xchan = &(*chans)[j];
471 xchan->xdev_hdl = xdev;
472 xchan->base = base + i * XDMA_CHAN_STRIDE;
473 xchan->dir = dir;
474 xchan->stop_requested = false;
475 init_completion(&xchan->last_interrupt);
476
477 ret = xdma_channel_init(xchan);
478 if (ret)
479 return ret;
480 xchan->vchan.desc_free = xdma_free_desc;
481 vchan_init(&xchan->vchan, &xdev->dma_dev);
482
483 j++;
484 }
485
486 dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
487 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
488
489 return 0;
490}
491
492/**
493 * xdma_issue_pending - Issue pending transactions
494 * @chan: DMA channel pointer
495 */
496static void xdma_issue_pending(struct dma_chan *chan)
497{
498 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
499 unsigned long flags;
500
501 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
502 if (vchan_issue_pending(&xdma_chan->vchan))
503 xdma_xfer_start(xdma_chan);
504 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
505}
506
507/**
508 * xdma_terminate_all - Terminate all transactions
509 * @chan: DMA channel pointer
510 */
511static int xdma_terminate_all(struct dma_chan *chan)
512{
513 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
514 struct virt_dma_desc *vd;
515 unsigned long flags;
516 LIST_HEAD(head);
517
518 xdma_xfer_stop(xdma_chan);
519
520 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
521
522 xdma_chan->busy = false;
523 xdma_chan->stop_requested = true;
524 vd = vchan_next_desc(&xdma_chan->vchan);
525 if (vd) {
526 list_del(&vd->node);
527 dma_cookie_complete(&vd->tx);
528 vchan_terminate_vdesc(vd);
529 }
530 vchan_get_all_descriptors(&xdma_chan->vchan, &head);
531 list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
532
533 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
534
535 return 0;
536}
537
538/**
539 * xdma_synchronize - Synchronize terminated transactions
540 * @chan: DMA channel pointer
541 */
542static void xdma_synchronize(struct dma_chan *chan)
543{
544 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
545 struct xdma_device *xdev = xdma_chan->xdev_hdl;
546 int st = 0;
547
548 /* If the engine continues running, wait for the last interrupt */
549 regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
550 if (st & XDMA_CHAN_STATUS_BUSY)
551 wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
552
553 vchan_synchronize(&xdma_chan->vchan);
554}
555
556/**
557 * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
558 * More than one descriptor will be used if the size is bigger
559 * than XDMA_DESC_BLEN_MAX.
560 * @sw_desc: Descriptor container
561 * @src_addr: First value for the ->src_addr field
562 * @dst_addr: First value for the ->dst_addr field
563 * @size: Size of the contiguous memory block
564 * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
565 */
566static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
567 u64 dst_addr, u32 size, u32 filled_descs_num)
568{
569 u32 left = size, len, desc_num = filled_descs_num;
570 struct xdma_desc_block *dblk;
571 struct xdma_hw_desc *desc;
572
573 dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
574 desc = dblk->virt_addr;
575 desc += desc_num & XDMA_DESC_ADJACENT_MASK;
576 do {
577 len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
578 /* set hardware descriptor */
579 desc->bytes = cpu_to_le32(len);
580 desc->src_addr = cpu_to_le64(src_addr);
581 desc->dst_addr = cpu_to_le64(dst_addr);
582 if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
583 desc = (++dblk)->virt_addr;
584 else
585 desc++;
586
587 src_addr += len;
588 dst_addr += len;
589 left -= len;
590 } while (left);
591
592 return desc_num - filled_descs_num;
593}
594
595/**
596 * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
597 * @chan: DMA channel pointer
598 * @sgl: Transfer scatter gather list
599 * @sg_len: Length of scatter gather list
600 * @dir: Transfer direction
601 * @flags: transfer ack flags
602 * @context: APP words of the descriptor
603 */
604static struct dma_async_tx_descriptor *
605xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
606 unsigned int sg_len, enum dma_transfer_direction dir,
607 unsigned long flags, void *context)
608{
609 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
610 struct dma_async_tx_descriptor *tx_desc;
611 struct xdma_desc *sw_desc;
612 u32 desc_num = 0, i;
613 u64 addr, dev_addr, *src, *dst;
614 struct scatterlist *sg;
615
616 for_each_sg(sgl, sg, sg_len, i)
617 desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
618
619 sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
620 if (!sw_desc)
621 return NULL;
622 sw_desc->dir = dir;
623 sw_desc->cyclic = false;
624 sw_desc->interleaved_dma = false;
625
626 if (dir == DMA_MEM_TO_DEV) {
627 dev_addr = xdma_chan->cfg.dst_addr;
628 src = &addr;
629 dst = &dev_addr;
630 } else {
631 dev_addr = xdma_chan->cfg.src_addr;
632 src = &dev_addr;
633 dst = &addr;
634 }
635
636 desc_num = 0;
637 for_each_sg(sgl, sg, sg_len, i) {
638 addr = sg_dma_address(sg);
639 desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
640 dev_addr += sg_dma_len(sg);
641 }
642
643 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
644 if (!tx_desc)
645 goto failed;
646
647 return tx_desc;
648
649failed:
650 xdma_free_desc(&sw_desc->vdesc);
651
652 return NULL;
653}
654
655/**
656 * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
657 * @chan: DMA channel pointer
658 * @address: Device DMA address to access
659 * @size: Total length to transfer
660 * @period_size: Period size to use for each transfer
661 * @dir: Transfer direction
662 * @flags: Transfer ack flags
663 */
664static struct dma_async_tx_descriptor *
665xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
666 size_t size, size_t period_size,
667 enum dma_transfer_direction dir,
668 unsigned long flags)
669{
670 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
671 struct xdma_device *xdev = xdma_chan->xdev_hdl;
672 unsigned int periods = size / period_size;
673 struct dma_async_tx_descriptor *tx_desc;
674 struct xdma_desc *sw_desc;
675 u64 addr, dev_addr, *src, *dst;
676 u32 desc_num;
677 unsigned int i;
678
679 /*
680 * Simplify the whole logic by preventing an abnormally high number of
681 * periods and periods size.
682 */
683 if (period_size > XDMA_DESC_BLEN_MAX) {
684 xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
685 return NULL;
686 }
687
688 if (periods > XDMA_DESC_ADJACENT) {
689 xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
690 return NULL;
691 }
692
693 sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
694 if (!sw_desc)
695 return NULL;
696
697 sw_desc->periods = periods;
698 sw_desc->period_size = period_size;
699 sw_desc->dir = dir;
700 sw_desc->interleaved_dma = false;
701
702 addr = address;
703 if (dir == DMA_MEM_TO_DEV) {
704 dev_addr = xdma_chan->cfg.dst_addr;
705 src = &addr;
706 dst = &dev_addr;
707 } else {
708 dev_addr = xdma_chan->cfg.src_addr;
709 src = &dev_addr;
710 dst = &addr;
711 }
712
713 desc_num = 0;
714 for (i = 0; i < periods; i++) {
715 desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
716 addr += period_size;
717 }
718
719 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
720 if (!tx_desc)
721 goto failed;
722
723 return tx_desc;
724
725failed:
726 xdma_free_desc(&sw_desc->vdesc);
727
728 return NULL;
729}
730
731/**
732 * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
733 * @chan: DMA channel
734 * @xt: DMA transfer template
735 * @flags: tx flags
736 */
737static struct dma_async_tx_descriptor *
738xdma_prep_interleaved_dma(struct dma_chan *chan,
739 struct dma_interleaved_template *xt,
740 unsigned long flags)
741{
742 int i;
743 u32 desc_num = 0, period_size = 0;
744 struct dma_async_tx_descriptor *tx_desc;
745 struct xdma_chan *xchan = to_xdma_chan(chan);
746 struct xdma_desc *sw_desc;
747 u64 src_addr, dst_addr;
748
749 for (i = 0; i < xt->frame_size; ++i)
750 desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
751
752 sw_desc = xdma_alloc_desc(xchan, desc_num, false);
753 if (!sw_desc)
754 return NULL;
755 sw_desc->dir = xt->dir;
756 sw_desc->interleaved_dma = true;
757 sw_desc->cyclic = flags & DMA_PREP_REPEAT;
758 sw_desc->frames_left = xt->numf;
759 sw_desc->periods = xt->numf;
760
761 desc_num = 0;
762 src_addr = xt->src_start;
763 dst_addr = xt->dst_start;
764 for (i = 0; i < xt->frame_size; ++i) {
765 desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
766 src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ?
767 xt->sgl[i].size : 0);
768 dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ?
769 xt->sgl[i].size : 0);
770 period_size += xt->sgl[i].size;
771 }
772 sw_desc->period_size = period_size;
773
774 tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
775 if (tx_desc)
776 return tx_desc;
777
778 xdma_free_desc(&sw_desc->vdesc);
779 return NULL;
780}
781
782/**
783 * xdma_device_config - Configure the DMA channel
784 * @chan: DMA channel
785 * @cfg: channel configuration
786 */
787static int xdma_device_config(struct dma_chan *chan,
788 struct dma_slave_config *cfg)
789{
790 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
791
792 memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
793
794 return 0;
795}
796
797/**
798 * xdma_free_chan_resources - Free channel resources
799 * @chan: DMA channel
800 */
801static void xdma_free_chan_resources(struct dma_chan *chan)
802{
803 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
804
805 vchan_free_chan_resources(&xdma_chan->vchan);
806 dma_pool_destroy(xdma_chan->desc_pool);
807 xdma_chan->desc_pool = NULL;
808}
809
810/**
811 * xdma_alloc_chan_resources - Allocate channel resources
812 * @chan: DMA channel
813 */
814static int xdma_alloc_chan_resources(struct dma_chan *chan)
815{
816 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
817 struct xdma_device *xdev = xdma_chan->xdev_hdl;
818 struct device *dev = xdev->dma_dev.dev;
819
820 while (dev && !dev_is_pci(dev))
821 dev = dev->parent;
822 if (!dev) {
823 xdma_err(xdev, "unable to find pci device");
824 return -EINVAL;
825 }
826
827 xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE,
828 XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
829 if (!xdma_chan->desc_pool) {
830 xdma_err(xdev, "unable to allocate descriptor pool");
831 return -ENOMEM;
832 }
833
834 return 0;
835}
836
837static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
838 struct dma_tx_state *state)
839{
840 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
841 struct xdma_desc *desc = NULL;
842 struct virt_dma_desc *vd;
843 enum dma_status ret;
844 unsigned long flags;
845 unsigned int period_idx;
846 u32 residue = 0;
847
848 ret = dma_cookie_status(chan, cookie, state);
849 if (ret == DMA_COMPLETE)
850 return ret;
851
852 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
853
854 vd = vchan_find_desc(&xdma_chan->vchan, cookie);
855 if (!vd)
856 goto out;
857
858 desc = to_xdma_desc(vd);
859 if (desc->error) {
860 ret = DMA_ERROR;
861 } else if (desc->cyclic) {
862 period_idx = desc->completed_desc_num % desc->periods;
863 residue = (desc->periods - period_idx) * desc->period_size;
864 dma_set_residue(state, residue);
865 }
866out:
867 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
868
869 return ret;
870}
871
872/**
873 * xdma_channel_isr - XDMA channel interrupt handler
874 * @irq: IRQ number
875 * @dev_id: Pointer to the DMA channel structure
876 */
877static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
878{
879 struct xdma_chan *xchan = dev_id;
880 u32 complete_desc_num = 0;
881 struct xdma_device *xdev = xchan->xdev_hdl;
882 struct virt_dma_desc *vd, *next_vd;
883 struct xdma_desc *desc;
884 int ret;
885 u32 st;
886 bool repeat_tx;
887
888 spin_lock(&xchan->vchan.lock);
889
890 if (xchan->stop_requested)
891 complete(&xchan->last_interrupt);
892
893 /* get submitted request */
894 vd = vchan_next_desc(&xchan->vchan);
895 if (!vd)
896 goto out;
897
898 /* Clear-on-read the status register */
899 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
900 if (ret)
901 goto out;
902
903 desc = to_xdma_desc(vd);
904
905 st &= XDMA_CHAN_STATUS_MASK;
906 if ((st & XDMA_CHAN_ERROR_MASK) ||
907 !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
908 desc->error = true;
909 xdma_err(xdev, "channel error, status register value: 0x%x", st);
910 goto out;
911 }
912
913 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
914 &complete_desc_num);
915 if (ret)
916 goto out;
917
918 if (desc->interleaved_dma) {
919 xchan->busy = false;
920 desc->completed_desc_num += complete_desc_num;
921 if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
922 xdma_xfer_start(xchan);
923 goto out;
924 }
925
926 /* last desc of any frame */
927 desc->frames_left--;
928 if (desc->frames_left)
929 goto out;
930
931 /* last desc of the last frame */
932 repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
933 next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
934 if (next_vd)
935 repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
936 if (repeat_tx) {
937 desc->frames_left = desc->periods;
938 desc->completed_desc_num = 0;
939 vchan_cyclic_callback(vd);
940 } else {
941 list_del(&vd->node);
942 vchan_cookie_complete(vd);
943 }
944 /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
945 xdma_xfer_start(xchan);
946 } else if (!desc->cyclic) {
947 xchan->busy = false;
948 desc->completed_desc_num += complete_desc_num;
949
950 /* if all data blocks are transferred, remove and complete the request */
951 if (desc->completed_desc_num == desc->desc_num) {
952 list_del(&vd->node);
953 vchan_cookie_complete(vd);
954 goto out;
955 }
956
957 if (desc->completed_desc_num > desc->desc_num ||
958 complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
959 goto out;
960
961 /* transfer the rest of data */
962 xdma_xfer_start(xchan);
963 } else {
964 desc->completed_desc_num = complete_desc_num;
965 vchan_cyclic_callback(vd);
966 }
967
968out:
969 spin_unlock(&xchan->vchan.lock);
970 return IRQ_HANDLED;
971}
972
973/**
974 * xdma_irq_fini - Uninitialize IRQ
975 * @xdev: DMA device pointer
976 */
977static void xdma_irq_fini(struct xdma_device *xdev)
978{
979 int i;
980
981 /* disable interrupt */
982 regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
983
984 /* free irq handler */
985 for (i = 0; i < xdev->h2c_chan_num; i++)
986 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
987
988 for (i = 0; i < xdev->c2h_chan_num; i++)
989 free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
990}
991
992/**
993 * xdma_set_vector_reg - configure hardware IRQ registers
994 * @xdev: DMA device pointer
995 * @vec_tbl_start: Start of IRQ registers
996 * @irq_start: Start of IRQ
997 * @irq_num: Number of IRQ
998 */
999static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
1000 u32 irq_start, u32 irq_num)
1001{
1002 u32 shift, i, val = 0;
1003 int ret;
1004
1005 /* Each IRQ register is 32 bit and contains 4 IRQs */
1006 while (irq_num > 0) {
1007 for (i = 0; i < 4; i++) {
1008 shift = XDMA_IRQ_VEC_SHIFT * i;
1009 val |= irq_start << shift;
1010 irq_start++;
1011 irq_num--;
1012 if (!irq_num)
1013 break;
1014 }
1015
1016 /* write IRQ register */
1017 ret = regmap_write(xdev->rmap, vec_tbl_start, val);
1018 if (ret)
1019 return ret;
1020 vec_tbl_start += sizeof(u32);
1021 val = 0;
1022 }
1023
1024 return 0;
1025}
1026
1027/**
1028 * xdma_irq_init - initialize IRQs
1029 * @xdev: DMA device pointer
1030 */
1031static int xdma_irq_init(struct xdma_device *xdev)
1032{
1033 u32 irq = xdev->irq_start;
1034 u32 user_irq_start;
1035 int i, j, ret;
1036
1037 /* return failure if there are not enough IRQs */
1038 if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
1039 xdma_err(xdev, "not enough irq");
1040 return -EINVAL;
1041 }
1042
1043 /* setup H2C interrupt handler */
1044 for (i = 0; i < xdev->h2c_chan_num; i++) {
1045 ret = request_irq(irq, xdma_channel_isr, 0,
1046 "xdma-h2c-channel", &xdev->h2c_chans[i]);
1047 if (ret) {
1048 xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
1049 i, irq, ret);
1050 goto failed_init_h2c;
1051 }
1052 xdev->h2c_chans[i].irq = irq;
1053 irq++;
1054 }
1055
1056 /* setup C2H interrupt handler */
1057 for (j = 0; j < xdev->c2h_chan_num; j++) {
1058 ret = request_irq(irq, xdma_channel_isr, 0,
1059 "xdma-c2h-channel", &xdev->c2h_chans[j]);
1060 if (ret) {
1061 xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
1062 j, irq, ret);
1063 goto failed_init_c2h;
1064 }
1065 xdev->c2h_chans[j].irq = irq;
1066 irq++;
1067 }
1068
1069 /* config hardware IRQ registers */
1070 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
1071 XDMA_CHAN_NUM(xdev));
1072 if (ret) {
1073 xdma_err(xdev, "failed to set channel vectors: %d", ret);
1074 goto failed_init_c2h;
1075 }
1076
1077 /* config user IRQ registers if needed */
1078 user_irq_start = XDMA_CHAN_NUM(xdev);
1079 if (xdev->irq_num > user_irq_start) {
1080 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
1081 user_irq_start,
1082 xdev->irq_num - user_irq_start);
1083 if (ret) {
1084 xdma_err(xdev, "failed to set user vectors: %d", ret);
1085 goto failed_init_c2h;
1086 }
1087 }
1088
1089 /* enable interrupt */
1090 ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
1091 if (ret)
1092 goto failed_init_c2h;
1093
1094 return 0;
1095
1096failed_init_c2h:
1097 while (j--)
1098 free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
1099failed_init_h2c:
1100 while (i--)
1101 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
1102
1103 return ret;
1104}
1105
1106static bool xdma_filter_fn(struct dma_chan *chan, void *param)
1107{
1108 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
1109 struct xdma_chan_info *chan_info = param;
1110
1111 return chan_info->dir == xdma_chan->dir;
1112}
1113
1114/**
1115 * xdma_disable_user_irq - Disable user interrupt
1116 * @pdev: Pointer to the platform_device structure
1117 * @irq_num: System IRQ number
1118 */
1119void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
1120{
1121 struct xdma_device *xdev = platform_get_drvdata(pdev);
1122 u32 index;
1123
1124 index = irq_num - xdev->irq_start;
1125 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
1126 xdma_err(xdev, "invalid user irq number");
1127 return;
1128 }
1129 index -= XDMA_CHAN_NUM(xdev);
1130
1131 regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
1132}
1133EXPORT_SYMBOL(xdma_disable_user_irq);
1134
1135/**
1136 * xdma_enable_user_irq - Enable user logic interrupt
1137 * @pdev: Pointer to the platform_device structure
1138 * @irq_num: System IRQ number
1139 */
1140int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
1141{
1142 struct xdma_device *xdev = platform_get_drvdata(pdev);
1143 u32 index;
1144 int ret;
1145
1146 index = irq_num - xdev->irq_start;
1147 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
1148 xdma_err(xdev, "invalid user irq number");
1149 return -EINVAL;
1150 }
1151 index -= XDMA_CHAN_NUM(xdev);
1152
1153 ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
1154 if (ret)
1155 return ret;
1156
1157 return 0;
1158}
1159EXPORT_SYMBOL(xdma_enable_user_irq);
1160
1161/**
1162 * xdma_get_user_irq - Get system IRQ number
1163 * @pdev: Pointer to the platform_device structure
1164 * @user_irq_index: User logic IRQ wire index
1165 *
1166 * Return: The system IRQ number allocated for the given wire index.
1167 */
1168int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
1169{
1170 struct xdma_device *xdev = platform_get_drvdata(pdev);
1171
1172 if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
1173 xdma_err(xdev, "invalid user irq index");
1174 return -EINVAL;
1175 }
1176
1177 return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
1178}
1179EXPORT_SYMBOL(xdma_get_user_irq);
1180
1181/**
1182 * xdma_remove - Driver remove function
1183 * @pdev: Pointer to the platform_device structure
1184 */
1185static void xdma_remove(struct platform_device *pdev)
1186{
1187 struct xdma_device *xdev = platform_get_drvdata(pdev);
1188
1189 if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
1190 xdma_irq_fini(xdev);
1191
1192 if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
1193 dma_async_device_unregister(&xdev->dma_dev);
1194}
1195
1196/**
1197 * xdma_probe - Driver probe function
1198 * @pdev: Pointer to the platform_device structure
1199 */
1200static int xdma_probe(struct platform_device *pdev)
1201{
1202 struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
1203 struct xdma_device *xdev;
1204 void __iomem *reg_base;
1205 struct resource *res;
1206 int ret = -ENODEV;
1207
1208 if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
1209 dev_err(&pdev->dev, "invalid max dma channels %d",
1210 pdata->max_dma_channels);
1211 return -EINVAL;
1212 }
1213
1214 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1215 if (!xdev)
1216 return -ENOMEM;
1217
1218 platform_set_drvdata(pdev, xdev);
1219 xdev->pdev = pdev;
1220
1221 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1222 if (!res) {
1223 xdma_err(xdev, "failed to get irq resource");
1224 goto failed;
1225 }
1226 xdev->irq_start = res->start;
1227 xdev->irq_num = resource_size(res);
1228
1229 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1230 if (!res) {
1231 xdma_err(xdev, "failed to get io resource");
1232 goto failed;
1233 }
1234
1235 reg_base = devm_ioremap_resource(&pdev->dev, res);
1236 if (IS_ERR(reg_base)) {
1237 xdma_err(xdev, "ioremap failed");
1238 goto failed;
1239 }
1240
1241 xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
1242 &xdma_regmap_config);
1243 if (!xdev->rmap) {
1244 xdma_err(xdev, "config regmap failed: %d", ret);
1245 goto failed;
1246 }
1247 INIT_LIST_HEAD(&xdev->dma_dev.channels);
1248
1249 ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
1250 if (ret) {
1251 xdma_err(xdev, "config H2C channels failed: %d", ret);
1252 goto failed;
1253 }
1254
1255 ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
1256 if (ret) {
1257 xdma_err(xdev, "config C2H channels failed: %d", ret);
1258 goto failed;
1259 }
1260
1261 dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
1262 dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
1263 dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
1264 dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
1265 dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
1266 dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
1267
1268 xdev->dma_dev.dev = &pdev->dev;
1269 xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1270 xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
1271 xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
1272 xdev->dma_dev.device_tx_status = xdma_tx_status;
1273 xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
1274 xdev->dma_dev.device_config = xdma_device_config;
1275 xdev->dma_dev.device_issue_pending = xdma_issue_pending;
1276 xdev->dma_dev.device_terminate_all = xdma_terminate_all;
1277 xdev->dma_dev.device_synchronize = xdma_synchronize;
1278 xdev->dma_dev.filter.map = pdata->device_map;
1279 xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
1280 xdev->dma_dev.filter.fn = xdma_filter_fn;
1281 xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
1282 xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
1283
1284 ret = dma_async_device_register(&xdev->dma_dev);
1285 if (ret) {
1286 xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
1287 goto failed;
1288 }
1289 xdev->status |= XDMA_DEV_STATUS_REG_DMA;
1290
1291 ret = xdma_irq_init(xdev);
1292 if (ret) {
1293 xdma_err(xdev, "failed to init msix: %d", ret);
1294 goto failed;
1295 }
1296 xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
1297
1298 return 0;
1299
1300failed:
1301 xdma_remove(pdev);
1302
1303 return ret;
1304}
1305
1306static const struct platform_device_id xdma_id_table[] = {
1307 { "xdma", 0},
1308 { },
1309};
1310MODULE_DEVICE_TABLE(platform, xdma_id_table);
1311
1312static struct platform_driver xdma_driver = {
1313 .driver = {
1314 .name = "xdma",
1315 },
1316 .id_table = xdma_id_table,
1317 .probe = xdma_probe,
1318 .remove = xdma_remove,
1319};
1320
1321module_platform_driver(xdma_driver);
1322
1323MODULE_DESCRIPTION("AMD XDMA driver");
1324MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
1325MODULE_LICENSE("GPL");