Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
7 * Jaswinder Singh <jassi.brar@samsung.com>
8 */
9
10#include <linux/debugfs.h>
11#include <linux/kernel.h>
12#include <linux/io.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmaengine.h>
21#include <linux/amba/bus.h>
22#include <linux/scatterlist.h>
23#include <linux/of.h>
24#include <linux/of_dma.h>
25#include <linux/err.h>
26#include <linux/pm_runtime.h>
27#include <linux/bug.h>
28#include <linux/reset.h>
29
30#include "dmaengine.h"
31#define PL330_MAX_CHAN 8
32#define PL330_MAX_IRQS 32
33#define PL330_MAX_PERI 32
34#define PL330_MAX_BURST 16
35
36#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
37#define PL330_QUIRK_PERIPH_BURST BIT(1)
38
39enum pl330_cachectrl {
40 CCTRL0, /* Noncacheable and nonbufferable */
41 CCTRL1, /* Bufferable only */
42 CCTRL2, /* Cacheable, but do not allocate */
43 CCTRL3, /* Cacheable and bufferable, but do not allocate */
44 INVALID1, /* AWCACHE = 0x1000 */
45 INVALID2,
46 CCTRL6, /* Cacheable write-through, allocate on writes only */
47 CCTRL7, /* Cacheable write-back, allocate on writes only */
48};
49
50enum pl330_byteswap {
51 SWAP_NO,
52 SWAP_2,
53 SWAP_4,
54 SWAP_8,
55 SWAP_16,
56};
57
58/* Register and Bit field Definitions */
59#define DS 0x0
60#define DS_ST_STOP 0x0
61#define DS_ST_EXEC 0x1
62#define DS_ST_CMISS 0x2
63#define DS_ST_UPDTPC 0x3
64#define DS_ST_WFE 0x4
65#define DS_ST_ATBRR 0x5
66#define DS_ST_QBUSY 0x6
67#define DS_ST_WFP 0x7
68#define DS_ST_KILL 0x8
69#define DS_ST_CMPLT 0x9
70#define DS_ST_FLTCMP 0xe
71#define DS_ST_FAULT 0xf
72
73#define DPC 0x4
74#define INTEN 0x20
75#define ES 0x24
76#define INTSTATUS 0x28
77#define INTCLR 0x2c
78#define FSM 0x30
79#define FSC 0x34
80#define FTM 0x38
81
82#define _FTC 0x40
83#define FTC(n) (_FTC + (n)*0x4)
84
85#define _CS 0x100
86#define CS(n) (_CS + (n)*0x8)
87#define CS_CNS (1 << 21)
88
89#define _CPC 0x104
90#define CPC(n) (_CPC + (n)*0x8)
91
92#define _SA 0x400
93#define SA(n) (_SA + (n)*0x20)
94
95#define _DA 0x404
96#define DA(n) (_DA + (n)*0x20)
97
98#define _CC 0x408
99#define CC(n) (_CC + (n)*0x20)
100
101#define CC_SRCINC (1 << 0)
102#define CC_DSTINC (1 << 14)
103#define CC_SRCPRI (1 << 8)
104#define CC_DSTPRI (1 << 22)
105#define CC_SRCNS (1 << 9)
106#define CC_DSTNS (1 << 23)
107#define CC_SRCIA (1 << 10)
108#define CC_DSTIA (1 << 24)
109#define CC_SRCBRSTLEN_SHFT 4
110#define CC_DSTBRSTLEN_SHFT 18
111#define CC_SRCBRSTSIZE_SHFT 1
112#define CC_DSTBRSTSIZE_SHFT 15
113#define CC_SRCCCTRL_SHFT 11
114#define CC_SRCCCTRL_MASK 0x7
115#define CC_DSTCCTRL_SHFT 25
116#define CC_DRCCCTRL_MASK 0x7
117#define CC_SWAP_SHFT 28
118
119#define _LC0 0x40c
120#define LC0(n) (_LC0 + (n)*0x20)
121
122#define _LC1 0x410
123#define LC1(n) (_LC1 + (n)*0x20)
124
125#define DBGSTATUS 0xd00
126#define DBG_BUSY (1 << 0)
127
128#define DBGCMD 0xd04
129#define DBGINST0 0xd08
130#define DBGINST1 0xd0c
131
132#define CR0 0xe00
133#define CR1 0xe04
134#define CR2 0xe08
135#define CR3 0xe0c
136#define CR4 0xe10
137#define CRD 0xe14
138
139#define PERIPH_ID 0xfe0
140#define PERIPH_REV_SHIFT 20
141#define PERIPH_REV_MASK 0xf
142#define PERIPH_REV_R0P0 0
143#define PERIPH_REV_R1P0 1
144#define PERIPH_REV_R1P1 2
145
146#define CR0_PERIPH_REQ_SET (1 << 0)
147#define CR0_BOOT_EN_SET (1 << 1)
148#define CR0_BOOT_MAN_NS (1 << 2)
149#define CR0_NUM_CHANS_SHIFT 4
150#define CR0_NUM_CHANS_MASK 0x7
151#define CR0_NUM_PERIPH_SHIFT 12
152#define CR0_NUM_PERIPH_MASK 0x1f
153#define CR0_NUM_EVENTS_SHIFT 17
154#define CR0_NUM_EVENTS_MASK 0x1f
155
156#define CR1_ICACHE_LEN_SHIFT 0
157#define CR1_ICACHE_LEN_MASK 0x7
158#define CR1_NUM_ICACHELINES_SHIFT 4
159#define CR1_NUM_ICACHELINES_MASK 0xf
160
161#define CRD_DATA_WIDTH_SHIFT 0
162#define CRD_DATA_WIDTH_MASK 0x7
163#define CRD_WR_CAP_SHIFT 4
164#define CRD_WR_CAP_MASK 0x7
165#define CRD_WR_Q_DEP_SHIFT 8
166#define CRD_WR_Q_DEP_MASK 0xf
167#define CRD_RD_CAP_SHIFT 12
168#define CRD_RD_CAP_MASK 0x7
169#define CRD_RD_Q_DEP_SHIFT 16
170#define CRD_RD_Q_DEP_MASK 0xf
171#define CRD_DATA_BUFF_SHIFT 20
172#define CRD_DATA_BUFF_MASK 0x3ff
173
174#define PART 0x330
175#define DESIGNER 0x41
176#define REVISION 0x0
177#define INTEG_CFG 0x0
178#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
179
180#define PL330_STATE_STOPPED (1 << 0)
181#define PL330_STATE_EXECUTING (1 << 1)
182#define PL330_STATE_WFE (1 << 2)
183#define PL330_STATE_FAULTING (1 << 3)
184#define PL330_STATE_COMPLETING (1 << 4)
185#define PL330_STATE_WFP (1 << 5)
186#define PL330_STATE_KILLING (1 << 6)
187#define PL330_STATE_FAULT_COMPLETING (1 << 7)
188#define PL330_STATE_CACHEMISS (1 << 8)
189#define PL330_STATE_UPDTPC (1 << 9)
190#define PL330_STATE_ATBARRIER (1 << 10)
191#define PL330_STATE_QUEUEBUSY (1 << 11)
192#define PL330_STATE_INVALID (1 << 15)
193
194#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
195 | PL330_STATE_WFE | PL330_STATE_FAULTING)
196
197#define CMD_DMAADDH 0x54
198#define CMD_DMAEND 0x00
199#define CMD_DMAFLUSHP 0x35
200#define CMD_DMAGO 0xa0
201#define CMD_DMALD 0x04
202#define CMD_DMALDP 0x25
203#define CMD_DMALP 0x20
204#define CMD_DMALPEND 0x28
205#define CMD_DMAKILL 0x01
206#define CMD_DMAMOV 0xbc
207#define CMD_DMANOP 0x18
208#define CMD_DMARMB 0x12
209#define CMD_DMASEV 0x34
210#define CMD_DMAST 0x08
211#define CMD_DMASTP 0x29
212#define CMD_DMASTZ 0x0c
213#define CMD_DMAWFE 0x36
214#define CMD_DMAWFP 0x30
215#define CMD_DMAWMB 0x13
216
217#define SZ_DMAADDH 3
218#define SZ_DMAEND 1
219#define SZ_DMAFLUSHP 2
220#define SZ_DMALD 1
221#define SZ_DMALDP 2
222#define SZ_DMALP 2
223#define SZ_DMALPEND 2
224#define SZ_DMAKILL 1
225#define SZ_DMAMOV 6
226#define SZ_DMANOP 1
227#define SZ_DMARMB 1
228#define SZ_DMASEV 2
229#define SZ_DMAST 1
230#define SZ_DMASTP 2
231#define SZ_DMASTZ 1
232#define SZ_DMAWFE 2
233#define SZ_DMAWFP 2
234#define SZ_DMAWMB 1
235#define SZ_DMAGO 6
236
237#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
238#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
239
240#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
241#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
242
243/*
244 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
245 * at 1byte/burst for P<->M and M<->M respectively.
246 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
247 * should be enough for P<->M and M<->M respectively.
248 */
249#define MCODE_BUFF_PER_REQ 256
250
251/* Use this _only_ to wait on transient states */
252#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
253
254#ifdef PL330_DEBUG_MCGEN
255static unsigned cmd_line;
256#define PL330_DBGCMD_DUMP(off, x...) do { \
257 printk("%x:", cmd_line); \
258 printk(x); \
259 cmd_line += off; \
260 } while (0)
261#define PL330_DBGMC_START(addr) (cmd_line = addr)
262#else
263#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
264#define PL330_DBGMC_START(addr) do {} while (0)
265#endif
266
267/* The number of default descriptors */
268
269#define NR_DEFAULT_DESC 16
270
271/* Delay for runtime PM autosuspend, ms */
272#define PL330_AUTOSUSPEND_DELAY 20
273
274/* Populated by the PL330 core driver for DMA API driver's info */
275struct pl330_config {
276 u32 periph_id;
277#define DMAC_MODE_NS (1 << 0)
278 unsigned int mode;
279 unsigned int data_bus_width:10; /* In number of bits */
280 unsigned int data_buf_dep:11;
281 unsigned int num_chan:4;
282 unsigned int num_peri:6;
283 u32 peri_ns;
284 unsigned int num_events:6;
285 u32 irq_ns;
286};
287
288/*
289 * Request Configuration.
290 * The PL330 core does not modify this and uses the last
291 * working configuration if the request doesn't provide any.
292 *
293 * The Client may want to provide this info only for the
294 * first request and a request with new settings.
295 */
296struct pl330_reqcfg {
297 /* Address Incrementing */
298 unsigned dst_inc:1;
299 unsigned src_inc:1;
300
301 /*
302 * For now, the SRC & DST protection levels
303 * and burst size/length are assumed same.
304 */
305 bool nonsecure;
306 bool privileged;
307 bool insnaccess;
308 unsigned brst_len:5;
309 unsigned brst_size:3; /* in power of 2 */
310
311 enum pl330_cachectrl dcctl;
312 enum pl330_cachectrl scctl;
313 enum pl330_byteswap swap;
314 struct pl330_config *pcfg;
315};
316
317/*
318 * One cycle of DMAC operation.
319 * There may be more than one xfer in a request.
320 */
321struct pl330_xfer {
322 u32 src_addr;
323 u32 dst_addr;
324 /* Size to xfer */
325 u32 bytes;
326};
327
328/* The xfer callbacks are made with one of these arguments. */
329enum pl330_op_err {
330 /* The all xfers in the request were success. */
331 PL330_ERR_NONE,
332 /* If req aborted due to global error. */
333 PL330_ERR_ABORT,
334 /* If req failed due to problem with Channel. */
335 PL330_ERR_FAIL,
336};
337
338enum dmamov_dst {
339 SAR = 0,
340 CCR,
341 DAR,
342};
343
344enum pl330_dst {
345 SRC = 0,
346 DST,
347};
348
349enum pl330_cond {
350 SINGLE,
351 BURST,
352 ALWAYS,
353};
354
355struct dma_pl330_desc;
356
357struct _pl330_req {
358 u32 mc_bus;
359 void *mc_cpu;
360 struct dma_pl330_desc *desc;
361};
362
363/* ToBeDone for tasklet */
364struct _pl330_tbd {
365 bool reset_dmac;
366 bool reset_mngr;
367 u8 reset_chan;
368};
369
370/* A DMAC Thread */
371struct pl330_thread {
372 u8 id;
373 int ev;
374 /* If the channel is not yet acquired by any client */
375 bool free;
376 /* Parent DMAC */
377 struct pl330_dmac *dmac;
378 /* Only two at a time */
379 struct _pl330_req req[2];
380 /* Index of the last enqueued request */
381 unsigned lstenq;
382 /* Index of the last submitted request or -1 if the DMA is stopped */
383 int req_running;
384};
385
386enum pl330_dmac_state {
387 UNINIT,
388 INIT,
389 DYING,
390};
391
392enum desc_status {
393 /* In the DMAC pool */
394 FREE,
395 /*
396 * Allocated to some channel during prep_xxx
397 * Also may be sitting on the work_list.
398 */
399 PREP,
400 /*
401 * Sitting on the work_list and already submitted
402 * to the PL330 core. Not more than two descriptors
403 * of a channel can be BUSY at any time.
404 */
405 BUSY,
406 /*
407 * Sitting on the channel work_list but xfer done
408 * by PL330 core
409 */
410 DONE,
411};
412
413struct dma_pl330_chan {
414 /* Schedule desc completion */
415 struct tasklet_struct task;
416
417 /* DMA-Engine Channel */
418 struct dma_chan chan;
419
420 /* List of submitted descriptors */
421 struct list_head submitted_list;
422 /* List of issued descriptors */
423 struct list_head work_list;
424 /* List of completed descriptors */
425 struct list_head completed_list;
426
427 /* Pointer to the DMAC that manages this channel,
428 * NULL if the channel is available to be acquired.
429 * As the parent, this DMAC also provides descriptors
430 * to the channel.
431 */
432 struct pl330_dmac *dmac;
433
434 /* To protect channel manipulation */
435 spinlock_t lock;
436
437 /*
438 * Hardware channel thread of PL330 DMAC. NULL if the channel is
439 * available.
440 */
441 struct pl330_thread *thread;
442
443 /* For D-to-M and M-to-D channels */
444 int burst_sz; /* the peripheral fifo width */
445 int burst_len; /* the number of burst */
446 phys_addr_t fifo_addr;
447 /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */
448 dma_addr_t fifo_dma;
449 enum dma_data_direction dir;
450 struct dma_slave_config slave_config;
451
452 /* for cyclic capability */
453 bool cyclic;
454
455 /* for runtime pm tracking */
456 bool active;
457};
458
459struct pl330_dmac {
460 /* DMA-Engine Device */
461 struct dma_device ddma;
462
463 /* Holds info about sg limitations */
464 struct device_dma_parameters dma_parms;
465
466 /* Pool of descriptors available for the DMAC's channels */
467 struct list_head desc_pool;
468 /* To protect desc_pool manipulation */
469 spinlock_t pool_lock;
470
471 /* Size of MicroCode buffers for each channel. */
472 unsigned mcbufsz;
473 /* ioremap'ed address of PL330 registers. */
474 void __iomem *base;
475 /* Populated by the PL330 core driver during pl330_add */
476 struct pl330_config pcfg;
477
478 spinlock_t lock;
479 /* Maximum possible events/irqs */
480 int events[32];
481 /* BUS address of MicroCode buffer */
482 dma_addr_t mcode_bus;
483 /* CPU address of MicroCode buffer */
484 void *mcode_cpu;
485 /* List of all Channel threads */
486 struct pl330_thread *channels;
487 /* Pointer to the MANAGER thread */
488 struct pl330_thread *manager;
489 /* To handle bad news in interrupt */
490 struct tasklet_struct tasks;
491 struct _pl330_tbd dmac_tbd;
492 /* State of DMAC operation */
493 enum pl330_dmac_state state;
494 /* Holds list of reqs with due callbacks */
495 struct list_head req_done;
496
497 /* Peripheral channels connected to this DMAC */
498 unsigned int num_peripherals;
499 struct dma_pl330_chan *peripherals; /* keep at end */
500 int quirks;
501
502 struct reset_control *rstc;
503 struct reset_control *rstc_ocp;
504};
505
506static struct pl330_of_quirks {
507 char *quirk;
508 int id;
509} of_quirks[] = {
510 {
511 .quirk = "arm,pl330-broken-no-flushp",
512 .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
513 },
514 {
515 .quirk = "arm,pl330-periph-burst",
516 .id = PL330_QUIRK_PERIPH_BURST,
517 }
518};
519
520struct dma_pl330_desc {
521 /* To attach to a queue as child */
522 struct list_head node;
523
524 /* Descriptor for the DMA Engine API */
525 struct dma_async_tx_descriptor txd;
526
527 /* Xfer for PL330 core */
528 struct pl330_xfer px;
529
530 struct pl330_reqcfg rqcfg;
531
532 enum desc_status status;
533
534 int bytes_requested;
535 bool last;
536
537 /* The channel which currently holds this desc */
538 struct dma_pl330_chan *pchan;
539
540 enum dma_transfer_direction rqtype;
541 /* Index of peripheral for the xfer. */
542 unsigned peri:5;
543 /* Hook to attach to DMAC's list of reqs with due callback */
544 struct list_head rqd;
545};
546
547struct _xfer_spec {
548 u32 ccr;
549 struct dma_pl330_desc *desc;
550};
551
552static int pl330_config_write(struct dma_chan *chan,
553 struct dma_slave_config *slave_config,
554 enum dma_transfer_direction direction);
555
556static inline bool _queue_full(struct pl330_thread *thrd)
557{
558 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
559}
560
561static inline bool is_manager(struct pl330_thread *thrd)
562{
563 return thrd->dmac->manager == thrd;
564}
565
566/* If manager of the thread is in Non-Secure mode */
567static inline bool _manager_ns(struct pl330_thread *thrd)
568{
569 return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
570}
571
572static inline u32 get_revision(u32 periph_id)
573{
574 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
575}
576
577static inline u32 _emit_END(unsigned dry_run, u8 buf[])
578{
579 if (dry_run)
580 return SZ_DMAEND;
581
582 buf[0] = CMD_DMAEND;
583
584 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
585
586 return SZ_DMAEND;
587}
588
589static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
590{
591 if (dry_run)
592 return SZ_DMAFLUSHP;
593
594 buf[0] = CMD_DMAFLUSHP;
595
596 peri &= 0x1f;
597 peri <<= 3;
598 buf[1] = peri;
599
600 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
601
602 return SZ_DMAFLUSHP;
603}
604
605static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
606{
607 if (dry_run)
608 return SZ_DMALD;
609
610 buf[0] = CMD_DMALD;
611
612 if (cond == SINGLE)
613 buf[0] |= (0 << 1) | (1 << 0);
614 else if (cond == BURST)
615 buf[0] |= (1 << 1) | (1 << 0);
616
617 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
618 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
619
620 return SZ_DMALD;
621}
622
623static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
624 enum pl330_cond cond, u8 peri)
625{
626 if (dry_run)
627 return SZ_DMALDP;
628
629 buf[0] = CMD_DMALDP;
630
631 if (cond == BURST)
632 buf[0] |= (1 << 1);
633
634 peri &= 0x1f;
635 peri <<= 3;
636 buf[1] = peri;
637
638 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
639 cond == SINGLE ? 'S' : 'B', peri >> 3);
640
641 return SZ_DMALDP;
642}
643
644static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
645 unsigned loop, u8 cnt)
646{
647 if (dry_run)
648 return SZ_DMALP;
649
650 buf[0] = CMD_DMALP;
651
652 if (loop)
653 buf[0] |= (1 << 1);
654
655 cnt--; /* DMAC increments by 1 internally */
656 buf[1] = cnt;
657
658 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
659
660 return SZ_DMALP;
661}
662
663struct _arg_LPEND {
664 enum pl330_cond cond;
665 bool forever;
666 unsigned loop;
667 u8 bjump;
668};
669
670static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
671 const struct _arg_LPEND *arg)
672{
673 enum pl330_cond cond = arg->cond;
674 bool forever = arg->forever;
675 unsigned loop = arg->loop;
676 u8 bjump = arg->bjump;
677
678 if (dry_run)
679 return SZ_DMALPEND;
680
681 buf[0] = CMD_DMALPEND;
682
683 if (loop)
684 buf[0] |= (1 << 2);
685
686 if (!forever)
687 buf[0] |= (1 << 4);
688
689 if (cond == SINGLE)
690 buf[0] |= (0 << 1) | (1 << 0);
691 else if (cond == BURST)
692 buf[0] |= (1 << 1) | (1 << 0);
693
694 buf[1] = bjump;
695
696 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
697 forever ? "FE" : "END",
698 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
699 loop ? '1' : '0',
700 bjump);
701
702 return SZ_DMALPEND;
703}
704
705static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
706{
707 if (dry_run)
708 return SZ_DMAKILL;
709
710 buf[0] = CMD_DMAKILL;
711
712 return SZ_DMAKILL;
713}
714
715static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
716 enum dmamov_dst dst, u32 val)
717{
718 if (dry_run)
719 return SZ_DMAMOV;
720
721 buf[0] = CMD_DMAMOV;
722 buf[1] = dst;
723 buf[2] = val;
724 buf[3] = val >> 8;
725 buf[4] = val >> 16;
726 buf[5] = val >> 24;
727
728 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
729 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
730
731 return SZ_DMAMOV;
732}
733
734static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
735{
736 if (dry_run)
737 return SZ_DMARMB;
738
739 buf[0] = CMD_DMARMB;
740
741 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
742
743 return SZ_DMARMB;
744}
745
746static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
747{
748 if (dry_run)
749 return SZ_DMASEV;
750
751 buf[0] = CMD_DMASEV;
752
753 ev &= 0x1f;
754 ev <<= 3;
755 buf[1] = ev;
756
757 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
758
759 return SZ_DMASEV;
760}
761
762static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
763{
764 if (dry_run)
765 return SZ_DMAST;
766
767 buf[0] = CMD_DMAST;
768
769 if (cond == SINGLE)
770 buf[0] |= (0 << 1) | (1 << 0);
771 else if (cond == BURST)
772 buf[0] |= (1 << 1) | (1 << 0);
773
774 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
775 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
776
777 return SZ_DMAST;
778}
779
780static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
781 enum pl330_cond cond, u8 peri)
782{
783 if (dry_run)
784 return SZ_DMASTP;
785
786 buf[0] = CMD_DMASTP;
787
788 if (cond == BURST)
789 buf[0] |= (1 << 1);
790
791 peri &= 0x1f;
792 peri <<= 3;
793 buf[1] = peri;
794
795 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
796 cond == SINGLE ? 'S' : 'B', peri >> 3);
797
798 return SZ_DMASTP;
799}
800
801static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
802 enum pl330_cond cond, u8 peri)
803{
804 if (dry_run)
805 return SZ_DMAWFP;
806
807 buf[0] = CMD_DMAWFP;
808
809 if (cond == SINGLE)
810 buf[0] |= (0 << 1) | (0 << 0);
811 else if (cond == BURST)
812 buf[0] |= (1 << 1) | (0 << 0);
813 else
814 buf[0] |= (0 << 1) | (1 << 0);
815
816 peri &= 0x1f;
817 peri <<= 3;
818 buf[1] = peri;
819
820 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
821 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
822
823 return SZ_DMAWFP;
824}
825
826static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
827{
828 if (dry_run)
829 return SZ_DMAWMB;
830
831 buf[0] = CMD_DMAWMB;
832
833 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
834
835 return SZ_DMAWMB;
836}
837
838struct _arg_GO {
839 u8 chan;
840 u32 addr;
841 unsigned ns;
842};
843
844static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
845 const struct _arg_GO *arg)
846{
847 u8 chan = arg->chan;
848 u32 addr = arg->addr;
849 unsigned ns = arg->ns;
850
851 if (dry_run)
852 return SZ_DMAGO;
853
854 buf[0] = CMD_DMAGO;
855 buf[0] |= (ns << 1);
856 buf[1] = chan & 0x7;
857 buf[2] = addr;
858 buf[3] = addr >> 8;
859 buf[4] = addr >> 16;
860 buf[5] = addr >> 24;
861
862 return SZ_DMAGO;
863}
864
865#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
866
867/* Returns Time-Out */
868static bool _until_dmac_idle(struct pl330_thread *thrd)
869{
870 void __iomem *regs = thrd->dmac->base;
871 unsigned long loops = msecs_to_loops(5);
872
873 do {
874 /* Until Manager is Idle */
875 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
876 break;
877
878 cpu_relax();
879 } while (--loops);
880
881 if (!loops)
882 return true;
883
884 return false;
885}
886
887static inline void _execute_DBGINSN(struct pl330_thread *thrd,
888 u8 insn[], bool as_manager)
889{
890 void __iomem *regs = thrd->dmac->base;
891 u32 val;
892
893 /* If timed out due to halted state-machine */
894 if (_until_dmac_idle(thrd)) {
895 dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
896 return;
897 }
898
899 val = (insn[0] << 16) | (insn[1] << 24);
900 if (!as_manager) {
901 val |= (1 << 0);
902 val |= (thrd->id << 8); /* Channel Number */
903 }
904 writel(val, regs + DBGINST0);
905
906 val = le32_to_cpu(*((__le32 *)&insn[2]));
907 writel(val, regs + DBGINST1);
908
909 /* Get going */
910 writel(0, regs + DBGCMD);
911}
912
913static inline u32 _state(struct pl330_thread *thrd)
914{
915 void __iomem *regs = thrd->dmac->base;
916 u32 val;
917
918 if (is_manager(thrd))
919 val = readl(regs + DS) & 0xf;
920 else
921 val = readl(regs + CS(thrd->id)) & 0xf;
922
923 switch (val) {
924 case DS_ST_STOP:
925 return PL330_STATE_STOPPED;
926 case DS_ST_EXEC:
927 return PL330_STATE_EXECUTING;
928 case DS_ST_CMISS:
929 return PL330_STATE_CACHEMISS;
930 case DS_ST_UPDTPC:
931 return PL330_STATE_UPDTPC;
932 case DS_ST_WFE:
933 return PL330_STATE_WFE;
934 case DS_ST_FAULT:
935 return PL330_STATE_FAULTING;
936 case DS_ST_ATBRR:
937 if (is_manager(thrd))
938 return PL330_STATE_INVALID;
939 else
940 return PL330_STATE_ATBARRIER;
941 case DS_ST_QBUSY:
942 if (is_manager(thrd))
943 return PL330_STATE_INVALID;
944 else
945 return PL330_STATE_QUEUEBUSY;
946 case DS_ST_WFP:
947 if (is_manager(thrd))
948 return PL330_STATE_INVALID;
949 else
950 return PL330_STATE_WFP;
951 case DS_ST_KILL:
952 if (is_manager(thrd))
953 return PL330_STATE_INVALID;
954 else
955 return PL330_STATE_KILLING;
956 case DS_ST_CMPLT:
957 if (is_manager(thrd))
958 return PL330_STATE_INVALID;
959 else
960 return PL330_STATE_COMPLETING;
961 case DS_ST_FLTCMP:
962 if (is_manager(thrd))
963 return PL330_STATE_INVALID;
964 else
965 return PL330_STATE_FAULT_COMPLETING;
966 default:
967 return PL330_STATE_INVALID;
968 }
969}
970
971static void _stop(struct pl330_thread *thrd)
972{
973 void __iomem *regs = thrd->dmac->base;
974 u8 insn[6] = {0, 0, 0, 0, 0, 0};
975 u32 inten = readl(regs + INTEN);
976
977 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
978 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
979
980 /* Return if nothing needs to be done */
981 if (_state(thrd) == PL330_STATE_COMPLETING
982 || _state(thrd) == PL330_STATE_KILLING
983 || _state(thrd) == PL330_STATE_STOPPED)
984 return;
985
986 _emit_KILL(0, insn);
987
988 _execute_DBGINSN(thrd, insn, is_manager(thrd));
989
990 /* clear the event */
991 if (inten & (1 << thrd->ev))
992 writel(1 << thrd->ev, regs + INTCLR);
993 /* Stop generating interrupts for SEV */
994 writel(inten & ~(1 << thrd->ev), regs + INTEN);
995}
996
997/* Start doing req 'idx' of thread 'thrd' */
998static bool _trigger(struct pl330_thread *thrd)
999{
1000 void __iomem *regs = thrd->dmac->base;
1001 struct _pl330_req *req;
1002 struct dma_pl330_desc *desc;
1003 struct _arg_GO go;
1004 unsigned ns;
1005 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1006 int idx;
1007
1008 /* Return if already ACTIVE */
1009 if (_state(thrd) != PL330_STATE_STOPPED)
1010 return true;
1011
1012 idx = 1 - thrd->lstenq;
1013 if (thrd->req[idx].desc != NULL) {
1014 req = &thrd->req[idx];
1015 } else {
1016 idx = thrd->lstenq;
1017 if (thrd->req[idx].desc != NULL)
1018 req = &thrd->req[idx];
1019 else
1020 req = NULL;
1021 }
1022
1023 /* Return if no request */
1024 if (!req)
1025 return true;
1026
1027 /* Return if req is running */
1028 if (idx == thrd->req_running)
1029 return true;
1030
1031 desc = req->desc;
1032
1033 ns = desc->rqcfg.nonsecure ? 1 : 0;
1034
1035 /* See 'Abort Sources' point-4 at Page 2-25 */
1036 if (_manager_ns(thrd) && !ns)
1037 dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
1038 __func__, __LINE__);
1039
1040 go.chan = thrd->id;
1041 go.addr = req->mc_bus;
1042 go.ns = ns;
1043 _emit_GO(0, insn, &go);
1044
1045 /* Set to generate interrupts for SEV */
1046 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1047
1048 /* Only manager can execute GO */
1049 _execute_DBGINSN(thrd, insn, true);
1050
1051 thrd->req_running = idx;
1052
1053 return true;
1054}
1055
1056static bool _start(struct pl330_thread *thrd)
1057{
1058 switch (_state(thrd)) {
1059 case PL330_STATE_FAULT_COMPLETING:
1060 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1061
1062 if (_state(thrd) == PL330_STATE_KILLING)
1063 UNTIL(thrd, PL330_STATE_STOPPED)
1064 fallthrough;
1065
1066 case PL330_STATE_FAULTING:
1067 _stop(thrd);
1068 fallthrough;
1069
1070 case PL330_STATE_KILLING:
1071 case PL330_STATE_COMPLETING:
1072 UNTIL(thrd, PL330_STATE_STOPPED)
1073 fallthrough;
1074
1075 case PL330_STATE_STOPPED:
1076 return _trigger(thrd);
1077
1078 case PL330_STATE_WFP:
1079 case PL330_STATE_QUEUEBUSY:
1080 case PL330_STATE_ATBARRIER:
1081 case PL330_STATE_UPDTPC:
1082 case PL330_STATE_CACHEMISS:
1083 case PL330_STATE_EXECUTING:
1084 return true;
1085
1086 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1087 default:
1088 return false;
1089 }
1090}
1091
1092static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1093 const struct _xfer_spec *pxs, int cyc)
1094{
1095 int off = 0;
1096 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
1097
1098 /* check lock-up free version */
1099 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1100 while (cyc--) {
1101 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1102 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1103 }
1104 } else {
1105 while (cyc--) {
1106 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1107 off += _emit_RMB(dry_run, &buf[off]);
1108 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1109 off += _emit_WMB(dry_run, &buf[off]);
1110 }
1111 }
1112
1113 return off;
1114}
1115
1116static u32 _emit_load(unsigned int dry_run, u8 buf[],
1117 enum pl330_cond cond, enum dma_transfer_direction direction,
1118 u8 peri)
1119{
1120 int off = 0;
1121
1122 switch (direction) {
1123 case DMA_MEM_TO_MEM:
1124 case DMA_MEM_TO_DEV:
1125 off += _emit_LD(dry_run, &buf[off], cond);
1126 break;
1127
1128 case DMA_DEV_TO_MEM:
1129 if (cond == ALWAYS) {
1130 off += _emit_LDP(dry_run, &buf[off], SINGLE,
1131 peri);
1132 off += _emit_LDP(dry_run, &buf[off], BURST,
1133 peri);
1134 } else {
1135 off += _emit_LDP(dry_run, &buf[off], cond,
1136 peri);
1137 }
1138 break;
1139
1140 default:
1141 /* this code should be unreachable */
1142 WARN_ON(1);
1143 break;
1144 }
1145
1146 return off;
1147}
1148
1149static inline u32 _emit_store(unsigned int dry_run, u8 buf[],
1150 enum pl330_cond cond, enum dma_transfer_direction direction,
1151 u8 peri)
1152{
1153 int off = 0;
1154
1155 switch (direction) {
1156 case DMA_MEM_TO_MEM:
1157 case DMA_DEV_TO_MEM:
1158 off += _emit_ST(dry_run, &buf[off], cond);
1159 break;
1160
1161 case DMA_MEM_TO_DEV:
1162 if (cond == ALWAYS) {
1163 off += _emit_STP(dry_run, &buf[off], SINGLE,
1164 peri);
1165 off += _emit_STP(dry_run, &buf[off], BURST,
1166 peri);
1167 } else {
1168 off += _emit_STP(dry_run, &buf[off], cond,
1169 peri);
1170 }
1171 break;
1172
1173 default:
1174 /* this code should be unreachable */
1175 WARN_ON(1);
1176 break;
1177 }
1178
1179 return off;
1180}
1181
1182static inline int _ldst_peripheral(struct pl330_dmac *pl330,
1183 unsigned dry_run, u8 buf[],
1184 const struct _xfer_spec *pxs, int cyc,
1185 enum pl330_cond cond)
1186{
1187 int off = 0;
1188
1189 /*
1190 * do FLUSHP at beginning to clear any stale dma requests before the
1191 * first WFP.
1192 */
1193 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1194 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
1195 while (cyc--) {
1196 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1197 off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype,
1198 pxs->desc->peri);
1199 off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype,
1200 pxs->desc->peri);
1201 }
1202
1203 return off;
1204}
1205
1206static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1207 const struct _xfer_spec *pxs, int cyc)
1208{
1209 int off = 0;
1210 enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE;
1211
1212 if (pl330->quirks & PL330_QUIRK_PERIPH_BURST)
1213 cond = BURST;
1214
1215 switch (pxs->desc->rqtype) {
1216 case DMA_MEM_TO_DEV:
1217 case DMA_DEV_TO_MEM:
1218 off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
1219 cond);
1220 break;
1221
1222 case DMA_MEM_TO_MEM:
1223 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1224 break;
1225
1226 default:
1227 /* this code should be unreachable */
1228 WARN_ON(1);
1229 break;
1230 }
1231
1232 return off;
1233}
1234
1235/*
1236 * only the unaligned burst transfers have the dregs.
1237 * so, still transfer dregs with a reduced size burst
1238 * for mem-to-mem, mem-to-dev or dev-to-mem.
1239 */
1240static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
1241 const struct _xfer_spec *pxs, int transfer_length)
1242{
1243 int off = 0;
1244 int dregs_ccr;
1245
1246 if (transfer_length == 0)
1247 return off;
1248
1249 /*
1250 * dregs_len = (total bytes - BURST_TO_BYTE(bursts, ccr)) /
1251 * BRST_SIZE(ccr)
1252 * the dregs len must be smaller than burst len,
1253 * so, for higher efficiency, we can modify CCR
1254 * to use a reduced size burst len for the dregs.
1255 */
1256 dregs_ccr = pxs->ccr;
1257 dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) |
1258 (0xf << CC_DSTBRSTLEN_SHFT));
1259 dregs_ccr |= (((transfer_length - 1) & 0xf) <<
1260 CC_SRCBRSTLEN_SHFT);
1261 dregs_ccr |= (((transfer_length - 1) & 0xf) <<
1262 CC_DSTBRSTLEN_SHFT);
1263
1264 switch (pxs->desc->rqtype) {
1265 case DMA_MEM_TO_DEV:
1266 case DMA_DEV_TO_MEM:
1267 off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
1268 off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, 1,
1269 BURST);
1270 break;
1271
1272 case DMA_MEM_TO_MEM:
1273 off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
1274 off += _ldst_memtomem(dry_run, &buf[off], pxs, 1);
1275 break;
1276
1277 default:
1278 /* this code should be unreachable */
1279 WARN_ON(1);
1280 break;
1281 }
1282
1283 return off;
1284}
1285
1286/* Returns bytes consumed and updates bursts */
1287static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1288 unsigned long *bursts, const struct _xfer_spec *pxs)
1289{
1290 int cyc, cycmax, szlp, szlpend, szbrst, off;
1291 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1292 struct _arg_LPEND lpend;
1293
1294 if (*bursts == 1)
1295 return _bursts(pl330, dry_run, buf, pxs, 1);
1296
1297 /* Max iterations possible in DMALP is 256 */
1298 if (*bursts >= 256*256) {
1299 lcnt1 = 256;
1300 lcnt0 = 256;
1301 cyc = *bursts / lcnt1 / lcnt0;
1302 } else if (*bursts > 256) {
1303 lcnt1 = 256;
1304 lcnt0 = *bursts / lcnt1;
1305 cyc = 1;
1306 } else {
1307 lcnt1 = *bursts;
1308 lcnt0 = 0;
1309 cyc = 1;
1310 }
1311
1312 szlp = _emit_LP(1, buf, 0, 0);
1313 szbrst = _bursts(pl330, 1, buf, pxs, 1);
1314
1315 lpend.cond = ALWAYS;
1316 lpend.forever = false;
1317 lpend.loop = 0;
1318 lpend.bjump = 0;
1319 szlpend = _emit_LPEND(1, buf, &lpend);
1320
1321 if (lcnt0) {
1322 szlp *= 2;
1323 szlpend *= 2;
1324 }
1325
1326 /*
1327 * Max bursts that we can unroll due to limit on the
1328 * size of backward jump that can be encoded in DMALPEND
1329 * which is 8-bits and hence 255
1330 */
1331 cycmax = (255 - (szlp + szlpend)) / szbrst;
1332
1333 cyc = (cycmax < cyc) ? cycmax : cyc;
1334
1335 off = 0;
1336
1337 if (lcnt0) {
1338 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1339 ljmp0 = off;
1340 }
1341
1342 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1343 ljmp1 = off;
1344
1345 off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
1346
1347 lpend.cond = ALWAYS;
1348 lpend.forever = false;
1349 lpend.loop = 1;
1350 lpend.bjump = off - ljmp1;
1351 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1352
1353 if (lcnt0) {
1354 lpend.cond = ALWAYS;
1355 lpend.forever = false;
1356 lpend.loop = 0;
1357 lpend.bjump = off - ljmp0;
1358 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1359 }
1360
1361 *bursts = lcnt1 * cyc;
1362 if (lcnt0)
1363 *bursts *= lcnt0;
1364
1365 return off;
1366}
1367
1368static inline int _setup_loops(struct pl330_dmac *pl330,
1369 unsigned dry_run, u8 buf[],
1370 const struct _xfer_spec *pxs)
1371{
1372 struct pl330_xfer *x = &pxs->desc->px;
1373 u32 ccr = pxs->ccr;
1374 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1375 int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) /
1376 BRST_SIZE(ccr);
1377 int off = 0;
1378
1379 while (bursts) {
1380 c = bursts;
1381 off += _loop(pl330, dry_run, &buf[off], &c, pxs);
1382 bursts -= c;
1383 }
1384 off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs);
1385
1386 return off;
1387}
1388
1389static inline int _setup_xfer(struct pl330_dmac *pl330,
1390 unsigned dry_run, u8 buf[],
1391 const struct _xfer_spec *pxs)
1392{
1393 struct pl330_xfer *x = &pxs->desc->px;
1394 int off = 0;
1395
1396 /* DMAMOV SAR, x->src_addr */
1397 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1398 /* DMAMOV DAR, x->dst_addr */
1399 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1400
1401 /* Setup Loop(s) */
1402 off += _setup_loops(pl330, dry_run, &buf[off], pxs);
1403
1404 return off;
1405}
1406
1407/*
1408 * A req is a sequence of one or more xfer units.
1409 * Returns the number of bytes taken to setup the MC for the req.
1410 */
1411static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
1412 struct pl330_thread *thrd, unsigned index,
1413 struct _xfer_spec *pxs)
1414{
1415 struct _pl330_req *req = &thrd->req[index];
1416 u8 *buf = req->mc_cpu;
1417 int off = 0;
1418
1419 PL330_DBGMC_START(req->mc_bus);
1420
1421 /* DMAMOV CCR, ccr */
1422 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1423
1424 off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
1425
1426 /* DMASEV peripheral/event */
1427 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1428 /* DMAEND */
1429 off += _emit_END(dry_run, &buf[off]);
1430
1431 return off;
1432}
1433
1434static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1435{
1436 u32 ccr = 0;
1437
1438 if (rqc->src_inc)
1439 ccr |= CC_SRCINC;
1440
1441 if (rqc->dst_inc)
1442 ccr |= CC_DSTINC;
1443
1444 /* We set same protection levels for Src and DST for now */
1445 if (rqc->privileged)
1446 ccr |= CC_SRCPRI | CC_DSTPRI;
1447 if (rqc->nonsecure)
1448 ccr |= CC_SRCNS | CC_DSTNS;
1449 if (rqc->insnaccess)
1450 ccr |= CC_SRCIA | CC_DSTIA;
1451
1452 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1453 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1454
1455 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1456 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1457
1458 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1459 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1460
1461 ccr |= (rqc->swap << CC_SWAP_SHFT);
1462
1463 return ccr;
1464}
1465
1466/*
1467 * Submit a list of xfers after which the client wants notification.
1468 * Client is not notified after each xfer unit, just once after all
1469 * xfer units are done or some error occurs.
1470 */
1471static int pl330_submit_req(struct pl330_thread *thrd,
1472 struct dma_pl330_desc *desc)
1473{
1474 struct pl330_dmac *pl330 = thrd->dmac;
1475 struct _xfer_spec xs;
1476 unsigned long flags;
1477 unsigned idx;
1478 u32 ccr;
1479 int ret = 0;
1480
1481 switch (desc->rqtype) {
1482 case DMA_MEM_TO_DEV:
1483 break;
1484
1485 case DMA_DEV_TO_MEM:
1486 break;
1487
1488 case DMA_MEM_TO_MEM:
1489 break;
1490
1491 default:
1492 return -ENOTSUPP;
1493 }
1494
1495 if (pl330->state == DYING
1496 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1497 dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
1498 __func__, __LINE__);
1499 return -EAGAIN;
1500 }
1501
1502 /* If request for non-existing peripheral */
1503 if (desc->rqtype != DMA_MEM_TO_MEM &&
1504 desc->peri >= pl330->pcfg.num_peri) {
1505 dev_info(thrd->dmac->ddma.dev,
1506 "%s:%d Invalid peripheral(%u)!\n",
1507 __func__, __LINE__, desc->peri);
1508 return -EINVAL;
1509 }
1510
1511 spin_lock_irqsave(&pl330->lock, flags);
1512
1513 if (_queue_full(thrd)) {
1514 ret = -EAGAIN;
1515 goto xfer_exit;
1516 }
1517
1518 /* Prefer Secure Channel */
1519 if (!_manager_ns(thrd))
1520 desc->rqcfg.nonsecure = 0;
1521 else
1522 desc->rqcfg.nonsecure = 1;
1523
1524 ccr = _prepare_ccr(&desc->rqcfg);
1525
1526 idx = thrd->req[0].desc == NULL ? 0 : 1;
1527
1528 xs.ccr = ccr;
1529 xs.desc = desc;
1530
1531 /* First dry run to check if req is acceptable */
1532 ret = _setup_req(pl330, 1, thrd, idx, &xs);
1533 if (ret < 0)
1534 goto xfer_exit;
1535
1536 if (ret > pl330->mcbufsz / 2) {
1537 dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
1538 __func__, __LINE__, ret, pl330->mcbufsz / 2);
1539 ret = -ENOMEM;
1540 goto xfer_exit;
1541 }
1542
1543 /* Hook the request */
1544 thrd->lstenq = idx;
1545 thrd->req[idx].desc = desc;
1546 _setup_req(pl330, 0, thrd, idx, &xs);
1547
1548 ret = 0;
1549
1550xfer_exit:
1551 spin_unlock_irqrestore(&pl330->lock, flags);
1552
1553 return ret;
1554}
1555
1556static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
1557{
1558 struct dma_pl330_chan *pch;
1559 unsigned long flags;
1560
1561 if (!desc)
1562 return;
1563
1564 pch = desc->pchan;
1565
1566 /* If desc aborted */
1567 if (!pch)
1568 return;
1569
1570 spin_lock_irqsave(&pch->lock, flags);
1571
1572 desc->status = DONE;
1573
1574 spin_unlock_irqrestore(&pch->lock, flags);
1575
1576 tasklet_schedule(&pch->task);
1577}
1578
1579static void pl330_dotask(unsigned long data)
1580{
1581 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1582 unsigned long flags;
1583 int i;
1584
1585 spin_lock_irqsave(&pl330->lock, flags);
1586
1587 /* The DMAC itself gone nuts */
1588 if (pl330->dmac_tbd.reset_dmac) {
1589 pl330->state = DYING;
1590 /* Reset the manager too */
1591 pl330->dmac_tbd.reset_mngr = true;
1592 /* Clear the reset flag */
1593 pl330->dmac_tbd.reset_dmac = false;
1594 }
1595
1596 if (pl330->dmac_tbd.reset_mngr) {
1597 _stop(pl330->manager);
1598 /* Reset all channels */
1599 pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
1600 /* Clear the reset flag */
1601 pl330->dmac_tbd.reset_mngr = false;
1602 }
1603
1604 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1605
1606 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1607 struct pl330_thread *thrd = &pl330->channels[i];
1608 void __iomem *regs = pl330->base;
1609 enum pl330_op_err err;
1610
1611 _stop(thrd);
1612
1613 if (readl(regs + FSC) & (1 << thrd->id))
1614 err = PL330_ERR_FAIL;
1615 else
1616 err = PL330_ERR_ABORT;
1617
1618 spin_unlock_irqrestore(&pl330->lock, flags);
1619 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1620 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
1621 spin_lock_irqsave(&pl330->lock, flags);
1622
1623 thrd->req[0].desc = NULL;
1624 thrd->req[1].desc = NULL;
1625 thrd->req_running = -1;
1626
1627 /* Clear the reset flag */
1628 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1629 }
1630 }
1631
1632 spin_unlock_irqrestore(&pl330->lock, flags);
1633
1634 return;
1635}
1636
1637/* Returns 1 if state was updated, 0 otherwise */
1638static int pl330_update(struct pl330_dmac *pl330)
1639{
1640 struct dma_pl330_desc *descdone;
1641 unsigned long flags;
1642 void __iomem *regs;
1643 u32 val;
1644 int id, ev, ret = 0;
1645
1646 regs = pl330->base;
1647
1648 spin_lock_irqsave(&pl330->lock, flags);
1649
1650 val = readl(regs + FSM) & 0x1;
1651 if (val)
1652 pl330->dmac_tbd.reset_mngr = true;
1653 else
1654 pl330->dmac_tbd.reset_mngr = false;
1655
1656 val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
1657 pl330->dmac_tbd.reset_chan |= val;
1658 if (val) {
1659 int i = 0;
1660 while (i < pl330->pcfg.num_chan) {
1661 if (val & (1 << i)) {
1662 dev_info(pl330->ddma.dev,
1663 "Reset Channel-%d\t CS-%x FTC-%x\n",
1664 i, readl(regs + CS(i)),
1665 readl(regs + FTC(i)));
1666 _stop(&pl330->channels[i]);
1667 }
1668 i++;
1669 }
1670 }
1671
1672 /* Check which event happened i.e, thread notified */
1673 val = readl(regs + ES);
1674 if (pl330->pcfg.num_events < 32
1675 && val & ~((1 << pl330->pcfg.num_events) - 1)) {
1676 pl330->dmac_tbd.reset_dmac = true;
1677 dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
1678 __LINE__);
1679 ret = 1;
1680 goto updt_exit;
1681 }
1682
1683 for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
1684 if (val & (1 << ev)) { /* Event occurred */
1685 struct pl330_thread *thrd;
1686 u32 inten = readl(regs + INTEN);
1687 int active;
1688
1689 /* Clear the event */
1690 if (inten & (1 << ev))
1691 writel(1 << ev, regs + INTCLR);
1692
1693 ret = 1;
1694
1695 id = pl330->events[ev];
1696
1697 thrd = &pl330->channels[id];
1698
1699 active = thrd->req_running;
1700 if (active == -1) /* Aborted */
1701 continue;
1702
1703 /* Detach the req */
1704 descdone = thrd->req[active].desc;
1705 thrd->req[active].desc = NULL;
1706
1707 thrd->req_running = -1;
1708
1709 /* Get going again ASAP */
1710 _start(thrd);
1711
1712 /* For now, just make a list of callbacks to be done */
1713 list_add_tail(&descdone->rqd, &pl330->req_done);
1714 }
1715 }
1716
1717 /* Now that we are in no hurry, do the callbacks */
1718 while (!list_empty(&pl330->req_done)) {
1719 descdone = list_first_entry(&pl330->req_done,
1720 struct dma_pl330_desc, rqd);
1721 list_del(&descdone->rqd);
1722 spin_unlock_irqrestore(&pl330->lock, flags);
1723 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
1724 spin_lock_irqsave(&pl330->lock, flags);
1725 }
1726
1727updt_exit:
1728 spin_unlock_irqrestore(&pl330->lock, flags);
1729
1730 if (pl330->dmac_tbd.reset_dmac
1731 || pl330->dmac_tbd.reset_mngr
1732 || pl330->dmac_tbd.reset_chan) {
1733 ret = 1;
1734 tasklet_schedule(&pl330->tasks);
1735 }
1736
1737 return ret;
1738}
1739
1740/* Reserve an event */
1741static inline int _alloc_event(struct pl330_thread *thrd)
1742{
1743 struct pl330_dmac *pl330 = thrd->dmac;
1744 int ev;
1745
1746 for (ev = 0; ev < pl330->pcfg.num_events; ev++)
1747 if (pl330->events[ev] == -1) {
1748 pl330->events[ev] = thrd->id;
1749 return ev;
1750 }
1751
1752 return -1;
1753}
1754
1755static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1756{
1757 return pl330->pcfg.irq_ns & (1 << i);
1758}
1759
1760/* Upon success, returns IdentityToken for the
1761 * allocated channel, NULL otherwise.
1762 */
1763static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1764{
1765 struct pl330_thread *thrd = NULL;
1766 int chans, i;
1767
1768 if (pl330->state == DYING)
1769 return NULL;
1770
1771 chans = pl330->pcfg.num_chan;
1772
1773 for (i = 0; i < chans; i++) {
1774 thrd = &pl330->channels[i];
1775 if ((thrd->free) && (!_manager_ns(thrd) ||
1776 _chan_ns(pl330, i))) {
1777 thrd->ev = _alloc_event(thrd);
1778 if (thrd->ev >= 0) {
1779 thrd->free = false;
1780 thrd->lstenq = 1;
1781 thrd->req[0].desc = NULL;
1782 thrd->req[1].desc = NULL;
1783 thrd->req_running = -1;
1784 break;
1785 }
1786 }
1787 thrd = NULL;
1788 }
1789
1790 return thrd;
1791}
1792
1793/* Release an event */
1794static inline void _free_event(struct pl330_thread *thrd, int ev)
1795{
1796 struct pl330_dmac *pl330 = thrd->dmac;
1797
1798 /* If the event is valid and was held by the thread */
1799 if (ev >= 0 && ev < pl330->pcfg.num_events
1800 && pl330->events[ev] == thrd->id)
1801 pl330->events[ev] = -1;
1802}
1803
1804static void pl330_release_channel(struct pl330_thread *thrd)
1805{
1806 if (!thrd || thrd->free)
1807 return;
1808
1809 _stop(thrd);
1810
1811 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1812 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
1813
1814 _free_event(thrd, thrd->ev);
1815 thrd->free = true;
1816}
1817
1818/* Initialize the structure for PL330 configuration, that can be used
1819 * by the client driver the make best use of the DMAC
1820 */
1821static void read_dmac_config(struct pl330_dmac *pl330)
1822{
1823 void __iomem *regs = pl330->base;
1824 u32 val;
1825
1826 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1827 val &= CRD_DATA_WIDTH_MASK;
1828 pl330->pcfg.data_bus_width = 8 * (1 << val);
1829
1830 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1831 val &= CRD_DATA_BUFF_MASK;
1832 pl330->pcfg.data_buf_dep = val + 1;
1833
1834 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1835 val &= CR0_NUM_CHANS_MASK;
1836 val += 1;
1837 pl330->pcfg.num_chan = val;
1838
1839 val = readl(regs + CR0);
1840 if (val & CR0_PERIPH_REQ_SET) {
1841 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1842 val += 1;
1843 pl330->pcfg.num_peri = val;
1844 pl330->pcfg.peri_ns = readl(regs + CR4);
1845 } else {
1846 pl330->pcfg.num_peri = 0;
1847 }
1848
1849 val = readl(regs + CR0);
1850 if (val & CR0_BOOT_MAN_NS)
1851 pl330->pcfg.mode |= DMAC_MODE_NS;
1852 else
1853 pl330->pcfg.mode &= ~DMAC_MODE_NS;
1854
1855 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1856 val &= CR0_NUM_EVENTS_MASK;
1857 val += 1;
1858 pl330->pcfg.num_events = val;
1859
1860 pl330->pcfg.irq_ns = readl(regs + CR3);
1861}
1862
1863static inline void _reset_thread(struct pl330_thread *thrd)
1864{
1865 struct pl330_dmac *pl330 = thrd->dmac;
1866
1867 thrd->req[0].mc_cpu = pl330->mcode_cpu
1868 + (thrd->id * pl330->mcbufsz);
1869 thrd->req[0].mc_bus = pl330->mcode_bus
1870 + (thrd->id * pl330->mcbufsz);
1871 thrd->req[0].desc = NULL;
1872
1873 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1874 + pl330->mcbufsz / 2;
1875 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1876 + pl330->mcbufsz / 2;
1877 thrd->req[1].desc = NULL;
1878
1879 thrd->req_running = -1;
1880}
1881
1882static int dmac_alloc_threads(struct pl330_dmac *pl330)
1883{
1884 int chans = pl330->pcfg.num_chan;
1885 struct pl330_thread *thrd;
1886 int i;
1887
1888 /* Allocate 1 Manager and 'chans' Channel threads */
1889 pl330->channels = kcalloc(1 + chans, sizeof(*thrd),
1890 GFP_KERNEL);
1891 if (!pl330->channels)
1892 return -ENOMEM;
1893
1894 /* Init Channel threads */
1895 for (i = 0; i < chans; i++) {
1896 thrd = &pl330->channels[i];
1897 thrd->id = i;
1898 thrd->dmac = pl330;
1899 _reset_thread(thrd);
1900 thrd->free = true;
1901 }
1902
1903 /* MANAGER is indexed at the end */
1904 thrd = &pl330->channels[chans];
1905 thrd->id = chans;
1906 thrd->dmac = pl330;
1907 thrd->free = false;
1908 pl330->manager = thrd;
1909
1910 return 0;
1911}
1912
1913static int dmac_alloc_resources(struct pl330_dmac *pl330)
1914{
1915 int chans = pl330->pcfg.num_chan;
1916 int ret;
1917
1918 /*
1919 * Alloc MicroCode buffer for 'chans' Channel threads.
1920 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1921 */
1922 pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
1923 chans * pl330->mcbufsz,
1924 &pl330->mcode_bus, GFP_KERNEL,
1925 DMA_ATTR_PRIVILEGED);
1926 if (!pl330->mcode_cpu) {
1927 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
1928 __func__, __LINE__);
1929 return -ENOMEM;
1930 }
1931
1932 ret = dmac_alloc_threads(pl330);
1933 if (ret) {
1934 dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
1935 __func__, __LINE__);
1936 dma_free_attrs(pl330->ddma.dev,
1937 chans * pl330->mcbufsz,
1938 pl330->mcode_cpu, pl330->mcode_bus,
1939 DMA_ATTR_PRIVILEGED);
1940 return ret;
1941 }
1942
1943 return 0;
1944}
1945
1946static int pl330_add(struct pl330_dmac *pl330)
1947{
1948 int i, ret;
1949
1950 /* Check if we can handle this DMAC */
1951 if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
1952 dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
1953 pl330->pcfg.periph_id);
1954 return -EINVAL;
1955 }
1956
1957 /* Read the configuration of the DMAC */
1958 read_dmac_config(pl330);
1959
1960 if (pl330->pcfg.num_events == 0) {
1961 dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
1962 __func__, __LINE__);
1963 return -EINVAL;
1964 }
1965
1966 spin_lock_init(&pl330->lock);
1967
1968 INIT_LIST_HEAD(&pl330->req_done);
1969
1970 /* Use default MC buffer size if not provided */
1971 if (!pl330->mcbufsz)
1972 pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1973
1974 /* Mark all events as free */
1975 for (i = 0; i < pl330->pcfg.num_events; i++)
1976 pl330->events[i] = -1;
1977
1978 /* Allocate resources needed by the DMAC */
1979 ret = dmac_alloc_resources(pl330);
1980 if (ret) {
1981 dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
1982 return ret;
1983 }
1984
1985 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1986
1987 pl330->state = INIT;
1988
1989 return 0;
1990}
1991
1992static int dmac_free_threads(struct pl330_dmac *pl330)
1993{
1994 struct pl330_thread *thrd;
1995 int i;
1996
1997 /* Release Channel threads */
1998 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1999 thrd = &pl330->channels[i];
2000 pl330_release_channel(thrd);
2001 }
2002
2003 /* Free memory */
2004 kfree(pl330->channels);
2005
2006 return 0;
2007}
2008
2009static void pl330_del(struct pl330_dmac *pl330)
2010{
2011 pl330->state = UNINIT;
2012
2013 tasklet_kill(&pl330->tasks);
2014
2015 /* Free DMAC resources */
2016 dmac_free_threads(pl330);
2017
2018 dma_free_attrs(pl330->ddma.dev,
2019 pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
2020 pl330->mcode_bus, DMA_ATTR_PRIVILEGED);
2021}
2022
2023/* forward declaration */
2024static struct amba_driver pl330_driver;
2025
2026static inline struct dma_pl330_chan *
2027to_pchan(struct dma_chan *ch)
2028{
2029 if (!ch)
2030 return NULL;
2031
2032 return container_of(ch, struct dma_pl330_chan, chan);
2033}
2034
2035static inline struct dma_pl330_desc *
2036to_desc(struct dma_async_tx_descriptor *tx)
2037{
2038 return container_of(tx, struct dma_pl330_desc, txd);
2039}
2040
2041static inline void fill_queue(struct dma_pl330_chan *pch)
2042{
2043 struct dma_pl330_desc *desc;
2044 int ret;
2045
2046 list_for_each_entry(desc, &pch->work_list, node) {
2047
2048 /* If already submitted */
2049 if (desc->status == BUSY)
2050 continue;
2051
2052 ret = pl330_submit_req(pch->thread, desc);
2053 if (!ret) {
2054 desc->status = BUSY;
2055 } else if (ret == -EAGAIN) {
2056 /* QFull or DMAC Dying */
2057 break;
2058 } else {
2059 /* Unacceptable request */
2060 desc->status = DONE;
2061 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
2062 __func__, __LINE__, desc->txd.cookie);
2063 tasklet_schedule(&pch->task);
2064 }
2065 }
2066}
2067
2068static void pl330_tasklet(unsigned long data)
2069{
2070 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
2071 struct dma_pl330_desc *desc, *_dt;
2072 unsigned long flags;
2073 bool power_down = false;
2074
2075 spin_lock_irqsave(&pch->lock, flags);
2076
2077 /* Pick up ripe tomatoes */
2078 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2079 if (desc->status == DONE) {
2080 if (!pch->cyclic)
2081 dma_cookie_complete(&desc->txd);
2082 list_move_tail(&desc->node, &pch->completed_list);
2083 }
2084
2085 /* Try to submit a req imm. next to the last completed cookie */
2086 fill_queue(pch);
2087
2088 if (list_empty(&pch->work_list)) {
2089 spin_lock(&pch->thread->dmac->lock);
2090 _stop(pch->thread);
2091 spin_unlock(&pch->thread->dmac->lock);
2092 power_down = true;
2093 pch->active = false;
2094 } else {
2095 /* Make sure the PL330 Channel thread is active */
2096 spin_lock(&pch->thread->dmac->lock);
2097 _start(pch->thread);
2098 spin_unlock(&pch->thread->dmac->lock);
2099 }
2100
2101 while (!list_empty(&pch->completed_list)) {
2102 struct dmaengine_desc_callback cb;
2103
2104 desc = list_first_entry(&pch->completed_list,
2105 struct dma_pl330_desc, node);
2106
2107 dmaengine_desc_get_callback(&desc->txd, &cb);
2108
2109 if (pch->cyclic) {
2110 desc->status = PREP;
2111 list_move_tail(&desc->node, &pch->work_list);
2112 if (power_down) {
2113 pch->active = true;
2114 spin_lock(&pch->thread->dmac->lock);
2115 _start(pch->thread);
2116 spin_unlock(&pch->thread->dmac->lock);
2117 power_down = false;
2118 }
2119 } else {
2120 desc->status = FREE;
2121 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2122 }
2123
2124 dma_descriptor_unmap(&desc->txd);
2125
2126 if (dmaengine_desc_callback_valid(&cb)) {
2127 spin_unlock_irqrestore(&pch->lock, flags);
2128 dmaengine_desc_callback_invoke(&cb, NULL);
2129 spin_lock_irqsave(&pch->lock, flags);
2130 }
2131 }
2132 spin_unlock_irqrestore(&pch->lock, flags);
2133
2134 /* If work list empty, power down */
2135 if (power_down) {
2136 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2137 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2138 }
2139}
2140
2141static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2142 struct of_dma *ofdma)
2143{
2144 int count = dma_spec->args_count;
2145 struct pl330_dmac *pl330 = ofdma->of_dma_data;
2146 unsigned int chan_id;
2147
2148 if (!pl330)
2149 return NULL;
2150
2151 if (count != 1)
2152 return NULL;
2153
2154 chan_id = dma_spec->args[0];
2155 if (chan_id >= pl330->num_peripherals)
2156 return NULL;
2157
2158 return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
2159}
2160
2161static int pl330_alloc_chan_resources(struct dma_chan *chan)
2162{
2163 struct dma_pl330_chan *pch = to_pchan(chan);
2164 struct pl330_dmac *pl330 = pch->dmac;
2165 unsigned long flags;
2166
2167 spin_lock_irqsave(&pl330->lock, flags);
2168
2169 dma_cookie_init(chan);
2170 pch->cyclic = false;
2171
2172 pch->thread = pl330_request_channel(pl330);
2173 if (!pch->thread) {
2174 spin_unlock_irqrestore(&pl330->lock, flags);
2175 return -ENOMEM;
2176 }
2177
2178 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2179
2180 spin_unlock_irqrestore(&pl330->lock, flags);
2181
2182 return 1;
2183}
2184
2185/*
2186 * We need the data direction between the DMAC (the dma-mapping "device") and
2187 * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing!
2188 */
2189static enum dma_data_direction
2190pl330_dma_slave_map_dir(enum dma_transfer_direction dir)
2191{
2192 switch (dir) {
2193 case DMA_MEM_TO_DEV:
2194 return DMA_FROM_DEVICE;
2195 case DMA_DEV_TO_MEM:
2196 return DMA_TO_DEVICE;
2197 case DMA_DEV_TO_DEV:
2198 return DMA_BIDIRECTIONAL;
2199 default:
2200 return DMA_NONE;
2201 }
2202}
2203
2204static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch)
2205{
2206 if (pch->dir != DMA_NONE)
2207 dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma,
2208 1 << pch->burst_sz, pch->dir, 0);
2209 pch->dir = DMA_NONE;
2210}
2211
2212
2213static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
2214 enum dma_transfer_direction dir)
2215{
2216 struct device *dev = pch->chan.device->dev;
2217 enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir);
2218
2219 /* Already mapped for this config? */
2220 if (pch->dir == dma_dir)
2221 return true;
2222
2223 pl330_unprep_slave_fifo(pch);
2224 pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr,
2225 1 << pch->burst_sz, dma_dir, 0);
2226 if (dma_mapping_error(dev, pch->fifo_dma))
2227 return false;
2228
2229 pch->dir = dma_dir;
2230 return true;
2231}
2232
2233static int fixup_burst_len(int max_burst_len, int quirks)
2234{
2235 if (max_burst_len > PL330_MAX_BURST)
2236 return PL330_MAX_BURST;
2237 else if (max_burst_len < 1)
2238 return 1;
2239 else
2240 return max_burst_len;
2241}
2242
2243static int pl330_config_write(struct dma_chan *chan,
2244 struct dma_slave_config *slave_config,
2245 enum dma_transfer_direction direction)
2246{
2247 struct dma_pl330_chan *pch = to_pchan(chan);
2248
2249 pl330_unprep_slave_fifo(pch);
2250 if (direction == DMA_MEM_TO_DEV) {
2251 if (slave_config->dst_addr)
2252 pch->fifo_addr = slave_config->dst_addr;
2253 if (slave_config->dst_addr_width)
2254 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2255 pch->burst_len = fixup_burst_len(slave_config->dst_maxburst,
2256 pch->dmac->quirks);
2257 } else if (direction == DMA_DEV_TO_MEM) {
2258 if (slave_config->src_addr)
2259 pch->fifo_addr = slave_config->src_addr;
2260 if (slave_config->src_addr_width)
2261 pch->burst_sz = __ffs(slave_config->src_addr_width);
2262 pch->burst_len = fixup_burst_len(slave_config->src_maxburst,
2263 pch->dmac->quirks);
2264 }
2265
2266 return 0;
2267}
2268
2269static int pl330_config(struct dma_chan *chan,
2270 struct dma_slave_config *slave_config)
2271{
2272 struct dma_pl330_chan *pch = to_pchan(chan);
2273
2274 memcpy(&pch->slave_config, slave_config, sizeof(*slave_config));
2275
2276 return 0;
2277}
2278
2279static int pl330_terminate_all(struct dma_chan *chan)
2280{
2281 struct dma_pl330_chan *pch = to_pchan(chan);
2282 struct dma_pl330_desc *desc;
2283 unsigned long flags;
2284 struct pl330_dmac *pl330 = pch->dmac;
2285 bool power_down = false;
2286
2287 pm_runtime_get_sync(pl330->ddma.dev);
2288 spin_lock_irqsave(&pch->lock, flags);
2289
2290 spin_lock(&pl330->lock);
2291 _stop(pch->thread);
2292 pch->thread->req[0].desc = NULL;
2293 pch->thread->req[1].desc = NULL;
2294 pch->thread->req_running = -1;
2295 spin_unlock(&pl330->lock);
2296
2297 power_down = pch->active;
2298 pch->active = false;
2299
2300 /* Mark all desc done */
2301 list_for_each_entry(desc, &pch->submitted_list, node) {
2302 desc->status = FREE;
2303 dma_cookie_complete(&desc->txd);
2304 }
2305
2306 list_for_each_entry(desc, &pch->work_list , node) {
2307 desc->status = FREE;
2308 dma_cookie_complete(&desc->txd);
2309 }
2310
2311 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2312 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2313 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2314 spin_unlock_irqrestore(&pch->lock, flags);
2315 pm_runtime_mark_last_busy(pl330->ddma.dev);
2316 if (power_down)
2317 pm_runtime_put_autosuspend(pl330->ddma.dev);
2318 pm_runtime_put_autosuspend(pl330->ddma.dev);
2319
2320 return 0;
2321}
2322
2323/*
2324 * We don't support DMA_RESUME command because of hardware
2325 * limitations, so after pausing the channel we cannot restore
2326 * it to active state. We have to terminate channel and setup
2327 * DMA transfer again. This pause feature was implemented to
2328 * allow safely read residue before channel termination.
2329 */
2330static int pl330_pause(struct dma_chan *chan)
2331{
2332 struct dma_pl330_chan *pch = to_pchan(chan);
2333 struct pl330_dmac *pl330 = pch->dmac;
2334 unsigned long flags;
2335
2336 pm_runtime_get_sync(pl330->ddma.dev);
2337 spin_lock_irqsave(&pch->lock, flags);
2338
2339 spin_lock(&pl330->lock);
2340 _stop(pch->thread);
2341 spin_unlock(&pl330->lock);
2342
2343 spin_unlock_irqrestore(&pch->lock, flags);
2344 pm_runtime_mark_last_busy(pl330->ddma.dev);
2345 pm_runtime_put_autosuspend(pl330->ddma.dev);
2346
2347 return 0;
2348}
2349
2350static void pl330_free_chan_resources(struct dma_chan *chan)
2351{
2352 struct dma_pl330_chan *pch = to_pchan(chan);
2353 struct pl330_dmac *pl330 = pch->dmac;
2354 unsigned long flags;
2355
2356 tasklet_kill(&pch->task);
2357
2358 pm_runtime_get_sync(pch->dmac->ddma.dev);
2359 spin_lock_irqsave(&pl330->lock, flags);
2360
2361 pl330_release_channel(pch->thread);
2362 pch->thread = NULL;
2363
2364 if (pch->cyclic)
2365 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2366
2367 spin_unlock_irqrestore(&pl330->lock, flags);
2368 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2369 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2370 pl330_unprep_slave_fifo(pch);
2371}
2372
2373static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2374 struct dma_pl330_desc *desc)
2375{
2376 struct pl330_thread *thrd = pch->thread;
2377 struct pl330_dmac *pl330 = pch->dmac;
2378 void __iomem *regs = thrd->dmac->base;
2379 u32 val, addr;
2380
2381 pm_runtime_get_sync(pl330->ddma.dev);
2382 val = addr = 0;
2383 if (desc->rqcfg.src_inc) {
2384 val = readl(regs + SA(thrd->id));
2385 addr = desc->px.src_addr;
2386 } else {
2387 val = readl(regs + DA(thrd->id));
2388 addr = desc->px.dst_addr;
2389 }
2390 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2391 pm_runtime_put_autosuspend(pl330->ddma.dev);
2392
2393 /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
2394 if (!val)
2395 return 0;
2396
2397 return val - addr;
2398}
2399
2400static enum dma_status
2401pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2402 struct dma_tx_state *txstate)
2403{
2404 enum dma_status ret;
2405 unsigned long flags;
2406 struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL;
2407 struct dma_pl330_chan *pch = to_pchan(chan);
2408 unsigned int transferred, residual = 0;
2409
2410 ret = dma_cookie_status(chan, cookie, txstate);
2411
2412 if (!txstate)
2413 return ret;
2414
2415 if (ret == DMA_COMPLETE)
2416 goto out;
2417
2418 spin_lock_irqsave(&pch->lock, flags);
2419 spin_lock(&pch->thread->dmac->lock);
2420
2421 if (pch->thread->req_running != -1)
2422 running = pch->thread->req[pch->thread->req_running].desc;
2423
2424 last_enq = pch->thread->req[pch->thread->lstenq].desc;
2425
2426 /* Check in pending list */
2427 list_for_each_entry(desc, &pch->work_list, node) {
2428 if (desc->status == DONE)
2429 transferred = desc->bytes_requested;
2430 else if (running && desc == running)
2431 transferred =
2432 pl330_get_current_xferred_count(pch, desc);
2433 else if (desc->status == BUSY)
2434 /*
2435 * Busy but not running means either just enqueued,
2436 * or finished and not yet marked done
2437 */
2438 if (desc == last_enq)
2439 transferred = 0;
2440 else
2441 transferred = desc->bytes_requested;
2442 else
2443 transferred = 0;
2444 residual += desc->bytes_requested - transferred;
2445 if (desc->txd.cookie == cookie) {
2446 switch (desc->status) {
2447 case DONE:
2448 ret = DMA_COMPLETE;
2449 break;
2450 case PREP:
2451 case BUSY:
2452 ret = DMA_IN_PROGRESS;
2453 break;
2454 default:
2455 WARN_ON(1);
2456 }
2457 break;
2458 }
2459 if (desc->last)
2460 residual = 0;
2461 }
2462 spin_unlock(&pch->thread->dmac->lock);
2463 spin_unlock_irqrestore(&pch->lock, flags);
2464
2465out:
2466 dma_set_residue(txstate, residual);
2467
2468 return ret;
2469}
2470
2471static void pl330_issue_pending(struct dma_chan *chan)
2472{
2473 struct dma_pl330_chan *pch = to_pchan(chan);
2474 unsigned long flags;
2475
2476 spin_lock_irqsave(&pch->lock, flags);
2477 if (list_empty(&pch->work_list)) {
2478 /*
2479 * Warn on nothing pending. Empty submitted_list may
2480 * break our pm_runtime usage counter as it is
2481 * updated on work_list emptiness status.
2482 */
2483 WARN_ON(list_empty(&pch->submitted_list));
2484 pch->active = true;
2485 pm_runtime_get_sync(pch->dmac->ddma.dev);
2486 }
2487 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2488 spin_unlock_irqrestore(&pch->lock, flags);
2489
2490 pl330_tasklet((unsigned long)pch);
2491}
2492
2493/*
2494 * We returned the last one of the circular list of descriptor(s)
2495 * from prep_xxx, so the argument to submit corresponds to the last
2496 * descriptor of the list.
2497 */
2498static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2499{
2500 struct dma_pl330_desc *desc, *last = to_desc(tx);
2501 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2502 dma_cookie_t cookie;
2503 unsigned long flags;
2504
2505 spin_lock_irqsave(&pch->lock, flags);
2506
2507 /* Assign cookies to all nodes */
2508 while (!list_empty(&last->node)) {
2509 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2510 if (pch->cyclic) {
2511 desc->txd.callback = last->txd.callback;
2512 desc->txd.callback_param = last->txd.callback_param;
2513 }
2514 desc->last = false;
2515
2516 dma_cookie_assign(&desc->txd);
2517
2518 list_move_tail(&desc->node, &pch->submitted_list);
2519 }
2520
2521 last->last = true;
2522 cookie = dma_cookie_assign(&last->txd);
2523 list_add_tail(&last->node, &pch->submitted_list);
2524 spin_unlock_irqrestore(&pch->lock, flags);
2525
2526 return cookie;
2527}
2528
2529static inline void _init_desc(struct dma_pl330_desc *desc)
2530{
2531 desc->rqcfg.swap = SWAP_NO;
2532 desc->rqcfg.scctl = CCTRL0;
2533 desc->rqcfg.dcctl = CCTRL0;
2534 desc->txd.tx_submit = pl330_tx_submit;
2535
2536 INIT_LIST_HEAD(&desc->node);
2537}
2538
2539/* Returns the number of descriptors added to the DMAC pool */
2540static int add_desc(struct list_head *pool, spinlock_t *lock,
2541 gfp_t flg, int count)
2542{
2543 struct dma_pl330_desc *desc;
2544 unsigned long flags;
2545 int i;
2546
2547 desc = kcalloc(count, sizeof(*desc), flg);
2548 if (!desc)
2549 return 0;
2550
2551 spin_lock_irqsave(lock, flags);
2552
2553 for (i = 0; i < count; i++) {
2554 _init_desc(&desc[i]);
2555 list_add_tail(&desc[i].node, pool);
2556 }
2557
2558 spin_unlock_irqrestore(lock, flags);
2559
2560 return count;
2561}
2562
2563static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
2564 spinlock_t *lock)
2565{
2566 struct dma_pl330_desc *desc = NULL;
2567 unsigned long flags;
2568
2569 spin_lock_irqsave(lock, flags);
2570
2571 if (!list_empty(pool)) {
2572 desc = list_entry(pool->next,
2573 struct dma_pl330_desc, node);
2574
2575 list_del_init(&desc->node);
2576
2577 desc->status = PREP;
2578 desc->txd.callback = NULL;
2579 }
2580
2581 spin_unlock_irqrestore(lock, flags);
2582
2583 return desc;
2584}
2585
2586static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2587{
2588 struct pl330_dmac *pl330 = pch->dmac;
2589 u8 *peri_id = pch->chan.private;
2590 struct dma_pl330_desc *desc;
2591
2592 /* Pluck one desc from the pool of DMAC */
2593 desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock);
2594
2595 /* If the DMAC pool is empty, alloc new */
2596 if (!desc) {
2597 DEFINE_SPINLOCK(lock);
2598 LIST_HEAD(pool);
2599
2600 if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
2601 return NULL;
2602
2603 desc = pluck_desc(&pool, &lock);
2604 WARN_ON(!desc || !list_empty(&pool));
2605 }
2606
2607 /* Initialize the descriptor */
2608 desc->pchan = pch;
2609 desc->txd.cookie = 0;
2610 async_tx_ack(&desc->txd);
2611
2612 desc->peri = peri_id ? pch->chan.chan_id : 0;
2613 desc->rqcfg.pcfg = &pch->dmac->pcfg;
2614
2615 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2616
2617 return desc;
2618}
2619
2620static inline void fill_px(struct pl330_xfer *px,
2621 dma_addr_t dst, dma_addr_t src, size_t len)
2622{
2623 px->bytes = len;
2624 px->dst_addr = dst;
2625 px->src_addr = src;
2626}
2627
2628static struct dma_pl330_desc *
2629__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2630 dma_addr_t src, size_t len)
2631{
2632 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2633
2634 if (!desc) {
2635 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2636 __func__, __LINE__);
2637 return NULL;
2638 }
2639
2640 /*
2641 * Ideally we should lookout for reqs bigger than
2642 * those that can be programmed with 256 bytes of
2643 * MC buffer, but considering a req size is seldom
2644 * going to be word-unaligned and more than 200MB,
2645 * we take it easy.
2646 * Also, should the limit is reached we'd rather
2647 * have the platform increase MC buffer size than
2648 * complicating this API driver.
2649 */
2650 fill_px(&desc->px, dst, src, len);
2651
2652 return desc;
2653}
2654
2655/* Call after fixing burst size */
2656static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2657{
2658 struct dma_pl330_chan *pch = desc->pchan;
2659 struct pl330_dmac *pl330 = pch->dmac;
2660 int burst_len;
2661
2662 burst_len = pl330->pcfg.data_bus_width / 8;
2663 burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan;
2664 burst_len >>= desc->rqcfg.brst_size;
2665
2666 /* src/dst_burst_len can't be more than 16 */
2667 if (burst_len > PL330_MAX_BURST)
2668 burst_len = PL330_MAX_BURST;
2669
2670 return burst_len;
2671}
2672
2673static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2674 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
2675 size_t period_len, enum dma_transfer_direction direction,
2676 unsigned long flags)
2677{
2678 struct dma_pl330_desc *desc = NULL, *first = NULL;
2679 struct dma_pl330_chan *pch = to_pchan(chan);
2680 struct pl330_dmac *pl330 = pch->dmac;
2681 unsigned int i;
2682 dma_addr_t dst;
2683 dma_addr_t src;
2684
2685 if (len % period_len != 0)
2686 return NULL;
2687
2688 if (!is_slave_direction(direction)) {
2689 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
2690 __func__, __LINE__);
2691 return NULL;
2692 }
2693
2694 pl330_config_write(chan, &pch->slave_config, direction);
2695
2696 if (!pl330_prep_slave_fifo(pch, direction))
2697 return NULL;
2698
2699 for (i = 0; i < len / period_len; i++) {
2700 desc = pl330_get_desc(pch);
2701 if (!desc) {
2702 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2703 __func__, __LINE__);
2704
2705 if (!first)
2706 return NULL;
2707
2708 spin_lock_irqsave(&pl330->pool_lock, flags);
2709
2710 while (!list_empty(&first->node)) {
2711 desc = list_entry(first->node.next,
2712 struct dma_pl330_desc, node);
2713 list_move_tail(&desc->node, &pl330->desc_pool);
2714 }
2715
2716 list_move_tail(&first->node, &pl330->desc_pool);
2717
2718 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2719
2720 return NULL;
2721 }
2722
2723 switch (direction) {
2724 case DMA_MEM_TO_DEV:
2725 desc->rqcfg.src_inc = 1;
2726 desc->rqcfg.dst_inc = 0;
2727 src = dma_addr;
2728 dst = pch->fifo_dma;
2729 break;
2730 case DMA_DEV_TO_MEM:
2731 desc->rqcfg.src_inc = 0;
2732 desc->rqcfg.dst_inc = 1;
2733 src = pch->fifo_dma;
2734 dst = dma_addr;
2735 break;
2736 default:
2737 break;
2738 }
2739
2740 desc->rqtype = direction;
2741 desc->rqcfg.brst_size = pch->burst_sz;
2742 desc->rqcfg.brst_len = pch->burst_len;
2743 desc->bytes_requested = period_len;
2744 fill_px(&desc->px, dst, src, period_len);
2745
2746 if (!first)
2747 first = desc;
2748 else
2749 list_add_tail(&desc->node, &first->node);
2750
2751 dma_addr += period_len;
2752 }
2753
2754 if (!desc)
2755 return NULL;
2756
2757 pch->cyclic = true;
2758 desc->txd.flags = flags;
2759
2760 return &desc->txd;
2761}
2762
2763static struct dma_async_tx_descriptor *
2764pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2765 dma_addr_t src, size_t len, unsigned long flags)
2766{
2767 struct dma_pl330_desc *desc;
2768 struct dma_pl330_chan *pch = to_pchan(chan);
2769 struct pl330_dmac *pl330;
2770 int burst;
2771
2772 if (unlikely(!pch || !len))
2773 return NULL;
2774
2775 pl330 = pch->dmac;
2776
2777 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2778 if (!desc)
2779 return NULL;
2780
2781 desc->rqcfg.src_inc = 1;
2782 desc->rqcfg.dst_inc = 1;
2783 desc->rqtype = DMA_MEM_TO_MEM;
2784
2785 /* Select max possible burst size */
2786 burst = pl330->pcfg.data_bus_width / 8;
2787
2788 /*
2789 * Make sure we use a burst size that aligns with all the memcpy
2790 * parameters because our DMA programming algorithm doesn't cope with
2791 * transfers which straddle an entry in the DMA device's MFIFO.
2792 */
2793 while ((src | dst | len) & (burst - 1))
2794 burst /= 2;
2795
2796 desc->rqcfg.brst_size = 0;
2797 while (burst != (1 << desc->rqcfg.brst_size))
2798 desc->rqcfg.brst_size++;
2799
2800 desc->rqcfg.brst_len = get_burst_len(desc, len);
2801 /*
2802 * If burst size is smaller than bus width then make sure we only
2803 * transfer one at a time to avoid a burst stradling an MFIFO entry.
2804 */
2805 if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
2806 desc->rqcfg.brst_len = 1;
2807
2808 desc->bytes_requested = len;
2809
2810 desc->txd.flags = flags;
2811
2812 return &desc->txd;
2813}
2814
2815static void __pl330_giveback_desc(struct pl330_dmac *pl330,
2816 struct dma_pl330_desc *first)
2817{
2818 unsigned long flags;
2819 struct dma_pl330_desc *desc;
2820
2821 if (!first)
2822 return;
2823
2824 spin_lock_irqsave(&pl330->pool_lock, flags);
2825
2826 while (!list_empty(&first->node)) {
2827 desc = list_entry(first->node.next,
2828 struct dma_pl330_desc, node);
2829 list_move_tail(&desc->node, &pl330->desc_pool);
2830 }
2831
2832 list_move_tail(&first->node, &pl330->desc_pool);
2833
2834 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2835}
2836
2837static struct dma_async_tx_descriptor *
2838pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2839 unsigned int sg_len, enum dma_transfer_direction direction,
2840 unsigned long flg, void *context)
2841{
2842 struct dma_pl330_desc *first, *desc = NULL;
2843 struct dma_pl330_chan *pch = to_pchan(chan);
2844 struct scatterlist *sg;
2845 int i;
2846
2847 if (unlikely(!pch || !sgl || !sg_len))
2848 return NULL;
2849
2850 pl330_config_write(chan, &pch->slave_config, direction);
2851
2852 if (!pl330_prep_slave_fifo(pch, direction))
2853 return NULL;
2854
2855 first = NULL;
2856
2857 for_each_sg(sgl, sg, sg_len, i) {
2858
2859 desc = pl330_get_desc(pch);
2860 if (!desc) {
2861 struct pl330_dmac *pl330 = pch->dmac;
2862
2863 dev_err(pch->dmac->ddma.dev,
2864 "%s:%d Unable to fetch desc\n",
2865 __func__, __LINE__);
2866 __pl330_giveback_desc(pl330, first);
2867
2868 return NULL;
2869 }
2870
2871 if (!first)
2872 first = desc;
2873 else
2874 list_add_tail(&desc->node, &first->node);
2875
2876 if (direction == DMA_MEM_TO_DEV) {
2877 desc->rqcfg.src_inc = 1;
2878 desc->rqcfg.dst_inc = 0;
2879 fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg),
2880 sg_dma_len(sg));
2881 } else {
2882 desc->rqcfg.src_inc = 0;
2883 desc->rqcfg.dst_inc = 1;
2884 fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma,
2885 sg_dma_len(sg));
2886 }
2887
2888 desc->rqcfg.brst_size = pch->burst_sz;
2889 desc->rqcfg.brst_len = pch->burst_len;
2890 desc->rqtype = direction;
2891 desc->bytes_requested = sg_dma_len(sg);
2892 }
2893
2894 /* Return the last desc in the chain */
2895 desc->txd.flags = flg;
2896 return &desc->txd;
2897}
2898
2899static irqreturn_t pl330_irq_handler(int irq, void *data)
2900{
2901 if (pl330_update(data))
2902 return IRQ_HANDLED;
2903 else
2904 return IRQ_NONE;
2905}
2906
2907#define PL330_DMA_BUSWIDTHS \
2908 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2909 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2910 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2911 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2912 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2913
2914#ifdef CONFIG_DEBUG_FS
2915static int pl330_debugfs_show(struct seq_file *s, void *data)
2916{
2917 struct pl330_dmac *pl330 = s->private;
2918 int chans, pchs, ch, pr;
2919
2920 chans = pl330->pcfg.num_chan;
2921 pchs = pl330->num_peripherals;
2922
2923 seq_puts(s, "PL330 physical channels:\n");
2924 seq_puts(s, "THREAD:\t\tCHANNEL:\n");
2925 seq_puts(s, "--------\t-----\n");
2926 for (ch = 0; ch < chans; ch++) {
2927 struct pl330_thread *thrd = &pl330->channels[ch];
2928 int found = -1;
2929
2930 for (pr = 0; pr < pchs; pr++) {
2931 struct dma_pl330_chan *pch = &pl330->peripherals[pr];
2932
2933 if (!pch->thread || thrd->id != pch->thread->id)
2934 continue;
2935
2936 found = pr;
2937 }
2938
2939 seq_printf(s, "%d\t\t", thrd->id);
2940 if (found == -1)
2941 seq_puts(s, "--\n");
2942 else
2943 seq_printf(s, "%d\n", found);
2944 }
2945
2946 return 0;
2947}
2948
2949DEFINE_SHOW_ATTRIBUTE(pl330_debugfs);
2950
2951static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
2952{
2953 debugfs_create_file(dev_name(pl330->ddma.dev),
2954 S_IFREG | 0444, NULL, pl330,
2955 &pl330_debugfs_fops);
2956}
2957#else
2958static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
2959{
2960}
2961#endif
2962
2963/*
2964 * Runtime PM callbacks are provided by amba/bus.c driver.
2965 *
2966 * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
2967 * bus driver will only disable/enable the clock in runtime PM callbacks.
2968 */
2969static int __maybe_unused pl330_suspend(struct device *dev)
2970{
2971 struct amba_device *pcdev = to_amba_device(dev);
2972
2973 pm_runtime_force_suspend(dev);
2974 amba_pclk_unprepare(pcdev);
2975
2976 return 0;
2977}
2978
2979static int __maybe_unused pl330_resume(struct device *dev)
2980{
2981 struct amba_device *pcdev = to_amba_device(dev);
2982 int ret;
2983
2984 ret = amba_pclk_prepare(pcdev);
2985 if (ret)
2986 return ret;
2987
2988 pm_runtime_force_resume(dev);
2989
2990 return ret;
2991}
2992
2993static const struct dev_pm_ops pl330_pm = {
2994 SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume)
2995};
2996
2997static int
2998pl330_probe(struct amba_device *adev, const struct amba_id *id)
2999{
3000 struct pl330_config *pcfg;
3001 struct pl330_dmac *pl330;
3002 struct dma_pl330_chan *pch, *_p;
3003 struct dma_device *pd;
3004 struct resource *res;
3005 int i, ret, irq;
3006 int num_chan;
3007 struct device_node *np = adev->dev.of_node;
3008
3009 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
3010 if (ret)
3011 return ret;
3012
3013 /* Allocate a new DMAC and its Channels */
3014 pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
3015 if (!pl330)
3016 return -ENOMEM;
3017
3018 pd = &pl330->ddma;
3019 pd->dev = &adev->dev;
3020
3021 pl330->mcbufsz = 0;
3022
3023 /* get quirk */
3024 for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
3025 if (of_property_read_bool(np, of_quirks[i].quirk))
3026 pl330->quirks |= of_quirks[i].id;
3027
3028 res = &adev->res;
3029 pl330->base = devm_ioremap_resource(&adev->dev, res);
3030 if (IS_ERR(pl330->base))
3031 return PTR_ERR(pl330->base);
3032
3033 amba_set_drvdata(adev, pl330);
3034
3035 pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma");
3036 if (IS_ERR(pl330->rstc)) {
3037 if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER)
3038 dev_err(&adev->dev, "Failed to get reset!\n");
3039 return PTR_ERR(pl330->rstc);
3040 } else {
3041 ret = reset_control_deassert(pl330->rstc);
3042 if (ret) {
3043 dev_err(&adev->dev, "Couldn't deassert the device from reset!\n");
3044 return ret;
3045 }
3046 }
3047
3048 pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp");
3049 if (IS_ERR(pl330->rstc_ocp)) {
3050 if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER)
3051 dev_err(&adev->dev, "Failed to get OCP reset!\n");
3052 return PTR_ERR(pl330->rstc_ocp);
3053 } else {
3054 ret = reset_control_deassert(pl330->rstc_ocp);
3055 if (ret) {
3056 dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n");
3057 return ret;
3058 }
3059 }
3060
3061 for (i = 0; i < AMBA_NR_IRQS; i++) {
3062 irq = adev->irq[i];
3063 if (irq) {
3064 ret = devm_request_irq(&adev->dev, irq,
3065 pl330_irq_handler, 0,
3066 dev_name(&adev->dev), pl330);
3067 if (ret)
3068 return ret;
3069 } else {
3070 break;
3071 }
3072 }
3073
3074 pcfg = &pl330->pcfg;
3075
3076 pcfg->periph_id = adev->periphid;
3077 ret = pl330_add(pl330);
3078 if (ret)
3079 return ret;
3080
3081 INIT_LIST_HEAD(&pl330->desc_pool);
3082 spin_lock_init(&pl330->pool_lock);
3083
3084 /* Create a descriptor pool of default size */
3085 if (!add_desc(&pl330->desc_pool, &pl330->pool_lock,
3086 GFP_KERNEL, NR_DEFAULT_DESC))
3087 dev_warn(&adev->dev, "unable to allocate desc\n");
3088
3089 INIT_LIST_HEAD(&pd->channels);
3090
3091 /* Initialize channel parameters */
3092 num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
3093
3094 pl330->num_peripherals = num_chan;
3095
3096 pl330->peripherals = kcalloc(num_chan, sizeof(*pch), GFP_KERNEL);
3097 if (!pl330->peripherals) {
3098 ret = -ENOMEM;
3099 goto probe_err2;
3100 }
3101
3102 for (i = 0; i < num_chan; i++) {
3103 pch = &pl330->peripherals[i];
3104
3105 pch->chan.private = adev->dev.of_node;
3106 INIT_LIST_HEAD(&pch->submitted_list);
3107 INIT_LIST_HEAD(&pch->work_list);
3108 INIT_LIST_HEAD(&pch->completed_list);
3109 spin_lock_init(&pch->lock);
3110 pch->thread = NULL;
3111 pch->chan.device = pd;
3112 pch->dmac = pl330;
3113 pch->dir = DMA_NONE;
3114
3115 /* Add the channel to the DMAC list */
3116 list_add_tail(&pch->chan.device_node, &pd->channels);
3117 }
3118
3119 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
3120 if (pcfg->num_peri) {
3121 dma_cap_set(DMA_SLAVE, pd->cap_mask);
3122 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
3123 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
3124 }
3125
3126 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
3127 pd->device_free_chan_resources = pl330_free_chan_resources;
3128 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
3129 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
3130 pd->device_tx_status = pl330_tx_status;
3131 pd->device_prep_slave_sg = pl330_prep_slave_sg;
3132 pd->device_config = pl330_config;
3133 pd->device_pause = pl330_pause;
3134 pd->device_terminate_all = pl330_terminate_all;
3135 pd->device_issue_pending = pl330_issue_pending;
3136 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
3137 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
3138 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3139 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3140 pd->max_burst = PL330_MAX_BURST;
3141
3142 ret = dma_async_device_register(pd);
3143 if (ret) {
3144 dev_err(&adev->dev, "unable to register DMAC\n");
3145 goto probe_err3;
3146 }
3147
3148 if (adev->dev.of_node) {
3149 ret = of_dma_controller_register(adev->dev.of_node,
3150 of_dma_pl330_xlate, pl330);
3151 if (ret) {
3152 dev_err(&adev->dev,
3153 "unable to register DMA to the generic DT DMA helpers\n");
3154 }
3155 }
3156
3157 adev->dev.dma_parms = &pl330->dma_parms;
3158
3159 /*
3160 * This is the limit for transfers with a buswidth of 1, larger
3161 * buswidths will have larger limits.
3162 */
3163 ret = dma_set_max_seg_size(&adev->dev, 1900800);
3164 if (ret)
3165 dev_err(&adev->dev, "unable to set the seg size\n");
3166
3167
3168 init_pl330_debugfs(pl330);
3169 dev_info(&adev->dev,
3170 "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
3171 dev_info(&adev->dev,
3172 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
3173 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
3174 pcfg->num_peri, pcfg->num_events);
3175
3176 pm_runtime_irq_safe(&adev->dev);
3177 pm_runtime_use_autosuspend(&adev->dev);
3178 pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
3179 pm_runtime_mark_last_busy(&adev->dev);
3180 pm_runtime_put_autosuspend(&adev->dev);
3181
3182 return 0;
3183probe_err3:
3184 /* Idle the DMAC */
3185 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3186 chan.device_node) {
3187
3188 /* Remove the channel */
3189 list_del(&pch->chan.device_node);
3190
3191 /* Flush the channel */
3192 if (pch->thread) {
3193 pl330_terminate_all(&pch->chan);
3194 pl330_free_chan_resources(&pch->chan);
3195 }
3196 }
3197probe_err2:
3198 pl330_del(pl330);
3199
3200 if (pl330->rstc_ocp)
3201 reset_control_assert(pl330->rstc_ocp);
3202
3203 if (pl330->rstc)
3204 reset_control_assert(pl330->rstc);
3205 return ret;
3206}
3207
3208static int pl330_remove(struct amba_device *adev)
3209{
3210 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
3211 struct dma_pl330_chan *pch, *_p;
3212 int i, irq;
3213
3214 pm_runtime_get_noresume(pl330->ddma.dev);
3215
3216 if (adev->dev.of_node)
3217 of_dma_controller_free(adev->dev.of_node);
3218
3219 for (i = 0; i < AMBA_NR_IRQS; i++) {
3220 irq = adev->irq[i];
3221 if (irq)
3222 devm_free_irq(&adev->dev, irq, pl330);
3223 }
3224
3225 dma_async_device_unregister(&pl330->ddma);
3226
3227 /* Idle the DMAC */
3228 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3229 chan.device_node) {
3230
3231 /* Remove the channel */
3232 list_del(&pch->chan.device_node);
3233
3234 /* Flush the channel */
3235 if (pch->thread) {
3236 pl330_terminate_all(&pch->chan);
3237 pl330_free_chan_resources(&pch->chan);
3238 }
3239 }
3240
3241 pl330_del(pl330);
3242
3243 if (pl330->rstc_ocp)
3244 reset_control_assert(pl330->rstc_ocp);
3245
3246 if (pl330->rstc)
3247 reset_control_assert(pl330->rstc);
3248 return 0;
3249}
3250
3251static const struct amba_id pl330_ids[] = {
3252 {
3253 .id = 0x00041330,
3254 .mask = 0x000fffff,
3255 },
3256 { 0, 0 },
3257};
3258
3259MODULE_DEVICE_TABLE(amba, pl330_ids);
3260
3261static struct amba_driver pl330_driver = {
3262 .drv = {
3263 .owner = THIS_MODULE,
3264 .name = "dma-pl330",
3265 .pm = &pl330_pm,
3266 },
3267 .id_table = pl330_ids,
3268 .probe = pl330_probe,
3269 .remove = pl330_remove,
3270};
3271
3272module_amba_driver(pl330_driver);
3273
3274MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
3275MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3276MODULE_LICENSE("GPL");
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/io.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/string.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h>
24#include <linux/amba/bus.h>
25#include <linux/scatterlist.h>
26#include <linux/of.h>
27#include <linux/of_dma.h>
28#include <linux/err.h>
29#include <linux/pm_runtime.h>
30
31#include "dmaengine.h"
32#define PL330_MAX_CHAN 8
33#define PL330_MAX_IRQS 32
34#define PL330_MAX_PERI 32
35#define PL330_MAX_BURST 16
36
37#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
38
39enum pl330_cachectrl {
40 CCTRL0, /* Noncacheable and nonbufferable */
41 CCTRL1, /* Bufferable only */
42 CCTRL2, /* Cacheable, but do not allocate */
43 CCTRL3, /* Cacheable and bufferable, but do not allocate */
44 INVALID1, /* AWCACHE = 0x1000 */
45 INVALID2,
46 CCTRL6, /* Cacheable write-through, allocate on writes only */
47 CCTRL7, /* Cacheable write-back, allocate on writes only */
48};
49
50enum pl330_byteswap {
51 SWAP_NO,
52 SWAP_2,
53 SWAP_4,
54 SWAP_8,
55 SWAP_16,
56};
57
58/* Register and Bit field Definitions */
59#define DS 0x0
60#define DS_ST_STOP 0x0
61#define DS_ST_EXEC 0x1
62#define DS_ST_CMISS 0x2
63#define DS_ST_UPDTPC 0x3
64#define DS_ST_WFE 0x4
65#define DS_ST_ATBRR 0x5
66#define DS_ST_QBUSY 0x6
67#define DS_ST_WFP 0x7
68#define DS_ST_KILL 0x8
69#define DS_ST_CMPLT 0x9
70#define DS_ST_FLTCMP 0xe
71#define DS_ST_FAULT 0xf
72
73#define DPC 0x4
74#define INTEN 0x20
75#define ES 0x24
76#define INTSTATUS 0x28
77#define INTCLR 0x2c
78#define FSM 0x30
79#define FSC 0x34
80#define FTM 0x38
81
82#define _FTC 0x40
83#define FTC(n) (_FTC + (n)*0x4)
84
85#define _CS 0x100
86#define CS(n) (_CS + (n)*0x8)
87#define CS_CNS (1 << 21)
88
89#define _CPC 0x104
90#define CPC(n) (_CPC + (n)*0x8)
91
92#define _SA 0x400
93#define SA(n) (_SA + (n)*0x20)
94
95#define _DA 0x404
96#define DA(n) (_DA + (n)*0x20)
97
98#define _CC 0x408
99#define CC(n) (_CC + (n)*0x20)
100
101#define CC_SRCINC (1 << 0)
102#define CC_DSTINC (1 << 14)
103#define CC_SRCPRI (1 << 8)
104#define CC_DSTPRI (1 << 22)
105#define CC_SRCNS (1 << 9)
106#define CC_DSTNS (1 << 23)
107#define CC_SRCIA (1 << 10)
108#define CC_DSTIA (1 << 24)
109#define CC_SRCBRSTLEN_SHFT 4
110#define CC_DSTBRSTLEN_SHFT 18
111#define CC_SRCBRSTSIZE_SHFT 1
112#define CC_DSTBRSTSIZE_SHFT 15
113#define CC_SRCCCTRL_SHFT 11
114#define CC_SRCCCTRL_MASK 0x7
115#define CC_DSTCCTRL_SHFT 25
116#define CC_DRCCCTRL_MASK 0x7
117#define CC_SWAP_SHFT 28
118
119#define _LC0 0x40c
120#define LC0(n) (_LC0 + (n)*0x20)
121
122#define _LC1 0x410
123#define LC1(n) (_LC1 + (n)*0x20)
124
125#define DBGSTATUS 0xd00
126#define DBG_BUSY (1 << 0)
127
128#define DBGCMD 0xd04
129#define DBGINST0 0xd08
130#define DBGINST1 0xd0c
131
132#define CR0 0xe00
133#define CR1 0xe04
134#define CR2 0xe08
135#define CR3 0xe0c
136#define CR4 0xe10
137#define CRD 0xe14
138
139#define PERIPH_ID 0xfe0
140#define PERIPH_REV_SHIFT 20
141#define PERIPH_REV_MASK 0xf
142#define PERIPH_REV_R0P0 0
143#define PERIPH_REV_R1P0 1
144#define PERIPH_REV_R1P1 2
145
146#define CR0_PERIPH_REQ_SET (1 << 0)
147#define CR0_BOOT_EN_SET (1 << 1)
148#define CR0_BOOT_MAN_NS (1 << 2)
149#define CR0_NUM_CHANS_SHIFT 4
150#define CR0_NUM_CHANS_MASK 0x7
151#define CR0_NUM_PERIPH_SHIFT 12
152#define CR0_NUM_PERIPH_MASK 0x1f
153#define CR0_NUM_EVENTS_SHIFT 17
154#define CR0_NUM_EVENTS_MASK 0x1f
155
156#define CR1_ICACHE_LEN_SHIFT 0
157#define CR1_ICACHE_LEN_MASK 0x7
158#define CR1_NUM_ICACHELINES_SHIFT 4
159#define CR1_NUM_ICACHELINES_MASK 0xf
160
161#define CRD_DATA_WIDTH_SHIFT 0
162#define CRD_DATA_WIDTH_MASK 0x7
163#define CRD_WR_CAP_SHIFT 4
164#define CRD_WR_CAP_MASK 0x7
165#define CRD_WR_Q_DEP_SHIFT 8
166#define CRD_WR_Q_DEP_MASK 0xf
167#define CRD_RD_CAP_SHIFT 12
168#define CRD_RD_CAP_MASK 0x7
169#define CRD_RD_Q_DEP_SHIFT 16
170#define CRD_RD_Q_DEP_MASK 0xf
171#define CRD_DATA_BUFF_SHIFT 20
172#define CRD_DATA_BUFF_MASK 0x3ff
173
174#define PART 0x330
175#define DESIGNER 0x41
176#define REVISION 0x0
177#define INTEG_CFG 0x0
178#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
179
180#define PL330_STATE_STOPPED (1 << 0)
181#define PL330_STATE_EXECUTING (1 << 1)
182#define PL330_STATE_WFE (1 << 2)
183#define PL330_STATE_FAULTING (1 << 3)
184#define PL330_STATE_COMPLETING (1 << 4)
185#define PL330_STATE_WFP (1 << 5)
186#define PL330_STATE_KILLING (1 << 6)
187#define PL330_STATE_FAULT_COMPLETING (1 << 7)
188#define PL330_STATE_CACHEMISS (1 << 8)
189#define PL330_STATE_UPDTPC (1 << 9)
190#define PL330_STATE_ATBARRIER (1 << 10)
191#define PL330_STATE_QUEUEBUSY (1 << 11)
192#define PL330_STATE_INVALID (1 << 15)
193
194#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
195 | PL330_STATE_WFE | PL330_STATE_FAULTING)
196
197#define CMD_DMAADDH 0x54
198#define CMD_DMAEND 0x00
199#define CMD_DMAFLUSHP 0x35
200#define CMD_DMAGO 0xa0
201#define CMD_DMALD 0x04
202#define CMD_DMALDP 0x25
203#define CMD_DMALP 0x20
204#define CMD_DMALPEND 0x28
205#define CMD_DMAKILL 0x01
206#define CMD_DMAMOV 0xbc
207#define CMD_DMANOP 0x18
208#define CMD_DMARMB 0x12
209#define CMD_DMASEV 0x34
210#define CMD_DMAST 0x08
211#define CMD_DMASTP 0x29
212#define CMD_DMASTZ 0x0c
213#define CMD_DMAWFE 0x36
214#define CMD_DMAWFP 0x30
215#define CMD_DMAWMB 0x13
216
217#define SZ_DMAADDH 3
218#define SZ_DMAEND 1
219#define SZ_DMAFLUSHP 2
220#define SZ_DMALD 1
221#define SZ_DMALDP 2
222#define SZ_DMALP 2
223#define SZ_DMALPEND 2
224#define SZ_DMAKILL 1
225#define SZ_DMAMOV 6
226#define SZ_DMANOP 1
227#define SZ_DMARMB 1
228#define SZ_DMASEV 2
229#define SZ_DMAST 1
230#define SZ_DMASTP 2
231#define SZ_DMASTZ 1
232#define SZ_DMAWFE 2
233#define SZ_DMAWFP 2
234#define SZ_DMAWMB 1
235#define SZ_DMAGO 6
236
237#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
238#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
239
240#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
241#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
242
243/*
244 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
245 * at 1byte/burst for P<->M and M<->M respectively.
246 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
247 * should be enough for P<->M and M<->M respectively.
248 */
249#define MCODE_BUFF_PER_REQ 256
250
251/* Use this _only_ to wait on transient states */
252#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
253
254#ifdef PL330_DEBUG_MCGEN
255static unsigned cmd_line;
256#define PL330_DBGCMD_DUMP(off, x...) do { \
257 printk("%x:", cmd_line); \
258 printk(x); \
259 cmd_line += off; \
260 } while (0)
261#define PL330_DBGMC_START(addr) (cmd_line = addr)
262#else
263#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
264#define PL330_DBGMC_START(addr) do {} while (0)
265#endif
266
267/* The number of default descriptors */
268
269#define NR_DEFAULT_DESC 16
270
271/* Delay for runtime PM autosuspend, ms */
272#define PL330_AUTOSUSPEND_DELAY 20
273
274/* Populated by the PL330 core driver for DMA API driver's info */
275struct pl330_config {
276 u32 periph_id;
277#define DMAC_MODE_NS (1 << 0)
278 unsigned int mode;
279 unsigned int data_bus_width:10; /* In number of bits */
280 unsigned int data_buf_dep:11;
281 unsigned int num_chan:4;
282 unsigned int num_peri:6;
283 u32 peri_ns;
284 unsigned int num_events:6;
285 u32 irq_ns;
286};
287
288/**
289 * Request Configuration.
290 * The PL330 core does not modify this and uses the last
291 * working configuration if the request doesn't provide any.
292 *
293 * The Client may want to provide this info only for the
294 * first request and a request with new settings.
295 */
296struct pl330_reqcfg {
297 /* Address Incrementing */
298 unsigned dst_inc:1;
299 unsigned src_inc:1;
300
301 /*
302 * For now, the SRC & DST protection levels
303 * and burst size/length are assumed same.
304 */
305 bool nonsecure;
306 bool privileged;
307 bool insnaccess;
308 unsigned brst_len:5;
309 unsigned brst_size:3; /* in power of 2 */
310
311 enum pl330_cachectrl dcctl;
312 enum pl330_cachectrl scctl;
313 enum pl330_byteswap swap;
314 struct pl330_config *pcfg;
315};
316
317/*
318 * One cycle of DMAC operation.
319 * There may be more than one xfer in a request.
320 */
321struct pl330_xfer {
322 u32 src_addr;
323 u32 dst_addr;
324 /* Size to xfer */
325 u32 bytes;
326};
327
328/* The xfer callbacks are made with one of these arguments. */
329enum pl330_op_err {
330 /* The all xfers in the request were success. */
331 PL330_ERR_NONE,
332 /* If req aborted due to global error. */
333 PL330_ERR_ABORT,
334 /* If req failed due to problem with Channel. */
335 PL330_ERR_FAIL,
336};
337
338enum dmamov_dst {
339 SAR = 0,
340 CCR,
341 DAR,
342};
343
344enum pl330_dst {
345 SRC = 0,
346 DST,
347};
348
349enum pl330_cond {
350 SINGLE,
351 BURST,
352 ALWAYS,
353};
354
355struct dma_pl330_desc;
356
357struct _pl330_req {
358 u32 mc_bus;
359 void *mc_cpu;
360 struct dma_pl330_desc *desc;
361};
362
363/* ToBeDone for tasklet */
364struct _pl330_tbd {
365 bool reset_dmac;
366 bool reset_mngr;
367 u8 reset_chan;
368};
369
370/* A DMAC Thread */
371struct pl330_thread {
372 u8 id;
373 int ev;
374 /* If the channel is not yet acquired by any client */
375 bool free;
376 /* Parent DMAC */
377 struct pl330_dmac *dmac;
378 /* Only two at a time */
379 struct _pl330_req req[2];
380 /* Index of the last enqueued request */
381 unsigned lstenq;
382 /* Index of the last submitted request or -1 if the DMA is stopped */
383 int req_running;
384};
385
386enum pl330_dmac_state {
387 UNINIT,
388 INIT,
389 DYING,
390};
391
392enum desc_status {
393 /* In the DMAC pool */
394 FREE,
395 /*
396 * Allocated to some channel during prep_xxx
397 * Also may be sitting on the work_list.
398 */
399 PREP,
400 /*
401 * Sitting on the work_list and already submitted
402 * to the PL330 core. Not more than two descriptors
403 * of a channel can be BUSY at any time.
404 */
405 BUSY,
406 /*
407 * Sitting on the channel work_list but xfer done
408 * by PL330 core
409 */
410 DONE,
411};
412
413struct dma_pl330_chan {
414 /* Schedule desc completion */
415 struct tasklet_struct task;
416
417 /* DMA-Engine Channel */
418 struct dma_chan chan;
419
420 /* List of submitted descriptors */
421 struct list_head submitted_list;
422 /* List of issued descriptors */
423 struct list_head work_list;
424 /* List of completed descriptors */
425 struct list_head completed_list;
426
427 /* Pointer to the DMAC that manages this channel,
428 * NULL if the channel is available to be acquired.
429 * As the parent, this DMAC also provides descriptors
430 * to the channel.
431 */
432 struct pl330_dmac *dmac;
433
434 /* To protect channel manipulation */
435 spinlock_t lock;
436
437 /*
438 * Hardware channel thread of PL330 DMAC. NULL if the channel is
439 * available.
440 */
441 struct pl330_thread *thread;
442
443 /* For D-to-M and M-to-D channels */
444 int burst_sz; /* the peripheral fifo width */
445 int burst_len; /* the number of burst */
446 phys_addr_t fifo_addr;
447 /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */
448 dma_addr_t fifo_dma;
449 enum dma_data_direction dir;
450
451 /* for cyclic capability */
452 bool cyclic;
453
454 /* for runtime pm tracking */
455 bool active;
456};
457
458struct pl330_dmac {
459 /* DMA-Engine Device */
460 struct dma_device ddma;
461
462 /* Holds info about sg limitations */
463 struct device_dma_parameters dma_parms;
464
465 /* Pool of descriptors available for the DMAC's channels */
466 struct list_head desc_pool;
467 /* To protect desc_pool manipulation */
468 spinlock_t pool_lock;
469
470 /* Size of MicroCode buffers for each channel. */
471 unsigned mcbufsz;
472 /* ioremap'ed address of PL330 registers. */
473 void __iomem *base;
474 /* Populated by the PL330 core driver during pl330_add */
475 struct pl330_config pcfg;
476
477 spinlock_t lock;
478 /* Maximum possible events/irqs */
479 int events[32];
480 /* BUS address of MicroCode buffer */
481 dma_addr_t mcode_bus;
482 /* CPU address of MicroCode buffer */
483 void *mcode_cpu;
484 /* List of all Channel threads */
485 struct pl330_thread *channels;
486 /* Pointer to the MANAGER thread */
487 struct pl330_thread *manager;
488 /* To handle bad news in interrupt */
489 struct tasklet_struct tasks;
490 struct _pl330_tbd dmac_tbd;
491 /* State of DMAC operation */
492 enum pl330_dmac_state state;
493 /* Holds list of reqs with due callbacks */
494 struct list_head req_done;
495
496 /* Peripheral channels connected to this DMAC */
497 unsigned int num_peripherals;
498 struct dma_pl330_chan *peripherals; /* keep at end */
499 int quirks;
500};
501
502static struct pl330_of_quirks {
503 char *quirk;
504 int id;
505} of_quirks[] = {
506 {
507 .quirk = "arm,pl330-broken-no-flushp",
508 .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
509 }
510};
511
512struct dma_pl330_desc {
513 /* To attach to a queue as child */
514 struct list_head node;
515
516 /* Descriptor for the DMA Engine API */
517 struct dma_async_tx_descriptor txd;
518
519 /* Xfer for PL330 core */
520 struct pl330_xfer px;
521
522 struct pl330_reqcfg rqcfg;
523
524 enum desc_status status;
525
526 int bytes_requested;
527 bool last;
528
529 /* The channel which currently holds this desc */
530 struct dma_pl330_chan *pchan;
531
532 enum dma_transfer_direction rqtype;
533 /* Index of peripheral for the xfer. */
534 unsigned peri:5;
535 /* Hook to attach to DMAC's list of reqs with due callback */
536 struct list_head rqd;
537};
538
539struct _xfer_spec {
540 u32 ccr;
541 struct dma_pl330_desc *desc;
542};
543
544static inline bool _queue_full(struct pl330_thread *thrd)
545{
546 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
547}
548
549static inline bool is_manager(struct pl330_thread *thrd)
550{
551 return thrd->dmac->manager == thrd;
552}
553
554/* If manager of the thread is in Non-Secure mode */
555static inline bool _manager_ns(struct pl330_thread *thrd)
556{
557 return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
558}
559
560static inline u32 get_revision(u32 periph_id)
561{
562 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
563}
564
565static inline u32 _emit_END(unsigned dry_run, u8 buf[])
566{
567 if (dry_run)
568 return SZ_DMAEND;
569
570 buf[0] = CMD_DMAEND;
571
572 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
573
574 return SZ_DMAEND;
575}
576
577static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
578{
579 if (dry_run)
580 return SZ_DMAFLUSHP;
581
582 buf[0] = CMD_DMAFLUSHP;
583
584 peri &= 0x1f;
585 peri <<= 3;
586 buf[1] = peri;
587
588 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
589
590 return SZ_DMAFLUSHP;
591}
592
593static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
594{
595 if (dry_run)
596 return SZ_DMALD;
597
598 buf[0] = CMD_DMALD;
599
600 if (cond == SINGLE)
601 buf[0] |= (0 << 1) | (1 << 0);
602 else if (cond == BURST)
603 buf[0] |= (1 << 1) | (1 << 0);
604
605 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
606 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
607
608 return SZ_DMALD;
609}
610
611static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
612 enum pl330_cond cond, u8 peri)
613{
614 if (dry_run)
615 return SZ_DMALDP;
616
617 buf[0] = CMD_DMALDP;
618
619 if (cond == BURST)
620 buf[0] |= (1 << 1);
621
622 peri &= 0x1f;
623 peri <<= 3;
624 buf[1] = peri;
625
626 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
627 cond == SINGLE ? 'S' : 'B', peri >> 3);
628
629 return SZ_DMALDP;
630}
631
632static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
633 unsigned loop, u8 cnt)
634{
635 if (dry_run)
636 return SZ_DMALP;
637
638 buf[0] = CMD_DMALP;
639
640 if (loop)
641 buf[0] |= (1 << 1);
642
643 cnt--; /* DMAC increments by 1 internally */
644 buf[1] = cnt;
645
646 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
647
648 return SZ_DMALP;
649}
650
651struct _arg_LPEND {
652 enum pl330_cond cond;
653 bool forever;
654 unsigned loop;
655 u8 bjump;
656};
657
658static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
659 const struct _arg_LPEND *arg)
660{
661 enum pl330_cond cond = arg->cond;
662 bool forever = arg->forever;
663 unsigned loop = arg->loop;
664 u8 bjump = arg->bjump;
665
666 if (dry_run)
667 return SZ_DMALPEND;
668
669 buf[0] = CMD_DMALPEND;
670
671 if (loop)
672 buf[0] |= (1 << 2);
673
674 if (!forever)
675 buf[0] |= (1 << 4);
676
677 if (cond == SINGLE)
678 buf[0] |= (0 << 1) | (1 << 0);
679 else if (cond == BURST)
680 buf[0] |= (1 << 1) | (1 << 0);
681
682 buf[1] = bjump;
683
684 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
685 forever ? "FE" : "END",
686 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
687 loop ? '1' : '0',
688 bjump);
689
690 return SZ_DMALPEND;
691}
692
693static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
694{
695 if (dry_run)
696 return SZ_DMAKILL;
697
698 buf[0] = CMD_DMAKILL;
699
700 return SZ_DMAKILL;
701}
702
703static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
704 enum dmamov_dst dst, u32 val)
705{
706 if (dry_run)
707 return SZ_DMAMOV;
708
709 buf[0] = CMD_DMAMOV;
710 buf[1] = dst;
711 buf[2] = val;
712 buf[3] = val >> 8;
713 buf[4] = val >> 16;
714 buf[5] = val >> 24;
715
716 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
717 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
718
719 return SZ_DMAMOV;
720}
721
722static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
723{
724 if (dry_run)
725 return SZ_DMARMB;
726
727 buf[0] = CMD_DMARMB;
728
729 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
730
731 return SZ_DMARMB;
732}
733
734static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
735{
736 if (dry_run)
737 return SZ_DMASEV;
738
739 buf[0] = CMD_DMASEV;
740
741 ev &= 0x1f;
742 ev <<= 3;
743 buf[1] = ev;
744
745 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
746
747 return SZ_DMASEV;
748}
749
750static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
751{
752 if (dry_run)
753 return SZ_DMAST;
754
755 buf[0] = CMD_DMAST;
756
757 if (cond == SINGLE)
758 buf[0] |= (0 << 1) | (1 << 0);
759 else if (cond == BURST)
760 buf[0] |= (1 << 1) | (1 << 0);
761
762 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
763 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
764
765 return SZ_DMAST;
766}
767
768static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
769 enum pl330_cond cond, u8 peri)
770{
771 if (dry_run)
772 return SZ_DMASTP;
773
774 buf[0] = CMD_DMASTP;
775
776 if (cond == BURST)
777 buf[0] |= (1 << 1);
778
779 peri &= 0x1f;
780 peri <<= 3;
781 buf[1] = peri;
782
783 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
784 cond == SINGLE ? 'S' : 'B', peri >> 3);
785
786 return SZ_DMASTP;
787}
788
789static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
790 enum pl330_cond cond, u8 peri)
791{
792 if (dry_run)
793 return SZ_DMAWFP;
794
795 buf[0] = CMD_DMAWFP;
796
797 if (cond == SINGLE)
798 buf[0] |= (0 << 1) | (0 << 0);
799 else if (cond == BURST)
800 buf[0] |= (1 << 1) | (0 << 0);
801 else
802 buf[0] |= (0 << 1) | (1 << 0);
803
804 peri &= 0x1f;
805 peri <<= 3;
806 buf[1] = peri;
807
808 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
809 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
810
811 return SZ_DMAWFP;
812}
813
814static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
815{
816 if (dry_run)
817 return SZ_DMAWMB;
818
819 buf[0] = CMD_DMAWMB;
820
821 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
822
823 return SZ_DMAWMB;
824}
825
826struct _arg_GO {
827 u8 chan;
828 u32 addr;
829 unsigned ns;
830};
831
832static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
833 const struct _arg_GO *arg)
834{
835 u8 chan = arg->chan;
836 u32 addr = arg->addr;
837 unsigned ns = arg->ns;
838
839 if (dry_run)
840 return SZ_DMAGO;
841
842 buf[0] = CMD_DMAGO;
843 buf[0] |= (ns << 1);
844 buf[1] = chan & 0x7;
845 buf[2] = addr;
846 buf[3] = addr >> 8;
847 buf[4] = addr >> 16;
848 buf[5] = addr >> 24;
849
850 return SZ_DMAGO;
851}
852
853#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
854
855/* Returns Time-Out */
856static bool _until_dmac_idle(struct pl330_thread *thrd)
857{
858 void __iomem *regs = thrd->dmac->base;
859 unsigned long loops = msecs_to_loops(5);
860
861 do {
862 /* Until Manager is Idle */
863 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
864 break;
865
866 cpu_relax();
867 } while (--loops);
868
869 if (!loops)
870 return true;
871
872 return false;
873}
874
875static inline void _execute_DBGINSN(struct pl330_thread *thrd,
876 u8 insn[], bool as_manager)
877{
878 void __iomem *regs = thrd->dmac->base;
879 u32 val;
880
881 val = (insn[0] << 16) | (insn[1] << 24);
882 if (!as_manager) {
883 val |= (1 << 0);
884 val |= (thrd->id << 8); /* Channel Number */
885 }
886 writel(val, regs + DBGINST0);
887
888 val = le32_to_cpu(*((__le32 *)&insn[2]));
889 writel(val, regs + DBGINST1);
890
891 /* If timed out due to halted state-machine */
892 if (_until_dmac_idle(thrd)) {
893 dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
894 return;
895 }
896
897 /* Get going */
898 writel(0, regs + DBGCMD);
899}
900
901static inline u32 _state(struct pl330_thread *thrd)
902{
903 void __iomem *regs = thrd->dmac->base;
904 u32 val;
905
906 if (is_manager(thrd))
907 val = readl(regs + DS) & 0xf;
908 else
909 val = readl(regs + CS(thrd->id)) & 0xf;
910
911 switch (val) {
912 case DS_ST_STOP:
913 return PL330_STATE_STOPPED;
914 case DS_ST_EXEC:
915 return PL330_STATE_EXECUTING;
916 case DS_ST_CMISS:
917 return PL330_STATE_CACHEMISS;
918 case DS_ST_UPDTPC:
919 return PL330_STATE_UPDTPC;
920 case DS_ST_WFE:
921 return PL330_STATE_WFE;
922 case DS_ST_FAULT:
923 return PL330_STATE_FAULTING;
924 case DS_ST_ATBRR:
925 if (is_manager(thrd))
926 return PL330_STATE_INVALID;
927 else
928 return PL330_STATE_ATBARRIER;
929 case DS_ST_QBUSY:
930 if (is_manager(thrd))
931 return PL330_STATE_INVALID;
932 else
933 return PL330_STATE_QUEUEBUSY;
934 case DS_ST_WFP:
935 if (is_manager(thrd))
936 return PL330_STATE_INVALID;
937 else
938 return PL330_STATE_WFP;
939 case DS_ST_KILL:
940 if (is_manager(thrd))
941 return PL330_STATE_INVALID;
942 else
943 return PL330_STATE_KILLING;
944 case DS_ST_CMPLT:
945 if (is_manager(thrd))
946 return PL330_STATE_INVALID;
947 else
948 return PL330_STATE_COMPLETING;
949 case DS_ST_FLTCMP:
950 if (is_manager(thrd))
951 return PL330_STATE_INVALID;
952 else
953 return PL330_STATE_FAULT_COMPLETING;
954 default:
955 return PL330_STATE_INVALID;
956 }
957}
958
959static void _stop(struct pl330_thread *thrd)
960{
961 void __iomem *regs = thrd->dmac->base;
962 u8 insn[6] = {0, 0, 0, 0, 0, 0};
963
964 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
965 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
966
967 /* Return if nothing needs to be done */
968 if (_state(thrd) == PL330_STATE_COMPLETING
969 || _state(thrd) == PL330_STATE_KILLING
970 || _state(thrd) == PL330_STATE_STOPPED)
971 return;
972
973 _emit_KILL(0, insn);
974
975 /* Stop generating interrupts for SEV */
976 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
977
978 _execute_DBGINSN(thrd, insn, is_manager(thrd));
979}
980
981/* Start doing req 'idx' of thread 'thrd' */
982static bool _trigger(struct pl330_thread *thrd)
983{
984 void __iomem *regs = thrd->dmac->base;
985 struct _pl330_req *req;
986 struct dma_pl330_desc *desc;
987 struct _arg_GO go;
988 unsigned ns;
989 u8 insn[6] = {0, 0, 0, 0, 0, 0};
990 int idx;
991
992 /* Return if already ACTIVE */
993 if (_state(thrd) != PL330_STATE_STOPPED)
994 return true;
995
996 idx = 1 - thrd->lstenq;
997 if (thrd->req[idx].desc != NULL) {
998 req = &thrd->req[idx];
999 } else {
1000 idx = thrd->lstenq;
1001 if (thrd->req[idx].desc != NULL)
1002 req = &thrd->req[idx];
1003 else
1004 req = NULL;
1005 }
1006
1007 /* Return if no request */
1008 if (!req)
1009 return true;
1010
1011 /* Return if req is running */
1012 if (idx == thrd->req_running)
1013 return true;
1014
1015 desc = req->desc;
1016
1017 ns = desc->rqcfg.nonsecure ? 1 : 0;
1018
1019 /* See 'Abort Sources' point-4 at Page 2-25 */
1020 if (_manager_ns(thrd) && !ns)
1021 dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
1022 __func__, __LINE__);
1023
1024 go.chan = thrd->id;
1025 go.addr = req->mc_bus;
1026 go.ns = ns;
1027 _emit_GO(0, insn, &go);
1028
1029 /* Set to generate interrupts for SEV */
1030 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1031
1032 /* Only manager can execute GO */
1033 _execute_DBGINSN(thrd, insn, true);
1034
1035 thrd->req_running = idx;
1036
1037 return true;
1038}
1039
1040static bool _start(struct pl330_thread *thrd)
1041{
1042 switch (_state(thrd)) {
1043 case PL330_STATE_FAULT_COMPLETING:
1044 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1045
1046 if (_state(thrd) == PL330_STATE_KILLING)
1047 UNTIL(thrd, PL330_STATE_STOPPED)
1048
1049 case PL330_STATE_FAULTING:
1050 _stop(thrd);
1051
1052 case PL330_STATE_KILLING:
1053 case PL330_STATE_COMPLETING:
1054 UNTIL(thrd, PL330_STATE_STOPPED)
1055
1056 case PL330_STATE_STOPPED:
1057 return _trigger(thrd);
1058
1059 case PL330_STATE_WFP:
1060 case PL330_STATE_QUEUEBUSY:
1061 case PL330_STATE_ATBARRIER:
1062 case PL330_STATE_UPDTPC:
1063 case PL330_STATE_CACHEMISS:
1064 case PL330_STATE_EXECUTING:
1065 return true;
1066
1067 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1068 default:
1069 return false;
1070 }
1071}
1072
1073static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1074 const struct _xfer_spec *pxs, int cyc)
1075{
1076 int off = 0;
1077 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
1078
1079 /* check lock-up free version */
1080 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1081 while (cyc--) {
1082 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1083 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1084 }
1085 } else {
1086 while (cyc--) {
1087 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1088 off += _emit_RMB(dry_run, &buf[off]);
1089 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1090 off += _emit_WMB(dry_run, &buf[off]);
1091 }
1092 }
1093
1094 return off;
1095}
1096
1097static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
1098 u8 buf[], const struct _xfer_spec *pxs,
1099 int cyc)
1100{
1101 int off = 0;
1102 enum pl330_cond cond;
1103
1104 if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
1105 cond = BURST;
1106 else
1107 cond = SINGLE;
1108
1109 while (cyc--) {
1110 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1111 off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
1112 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1113
1114 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1115 off += _emit_FLUSHP(dry_run, &buf[off],
1116 pxs->desc->peri);
1117 }
1118
1119 return off;
1120}
1121
1122static inline int _ldst_memtodev(struct pl330_dmac *pl330,
1123 unsigned dry_run, u8 buf[],
1124 const struct _xfer_spec *pxs, int cyc)
1125{
1126 int off = 0;
1127 enum pl330_cond cond;
1128
1129 if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
1130 cond = BURST;
1131 else
1132 cond = SINGLE;
1133
1134 while (cyc--) {
1135 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1136 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1137 off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
1138
1139 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1140 off += _emit_FLUSHP(dry_run, &buf[off],
1141 pxs->desc->peri);
1142 }
1143
1144 return off;
1145}
1146
1147static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1148 const struct _xfer_spec *pxs, int cyc)
1149{
1150 int off = 0;
1151
1152 switch (pxs->desc->rqtype) {
1153 case DMA_MEM_TO_DEV:
1154 off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
1155 break;
1156 case DMA_DEV_TO_MEM:
1157 off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
1158 break;
1159 case DMA_MEM_TO_MEM:
1160 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1161 break;
1162 default:
1163 off += 0x40000000; /* Scare off the Client */
1164 break;
1165 }
1166
1167 return off;
1168}
1169
1170/* Returns bytes consumed and updates bursts */
1171static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1172 unsigned long *bursts, const struct _xfer_spec *pxs)
1173{
1174 int cyc, cycmax, szlp, szlpend, szbrst, off;
1175 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1176 struct _arg_LPEND lpend;
1177
1178 if (*bursts == 1)
1179 return _bursts(pl330, dry_run, buf, pxs, 1);
1180
1181 /* Max iterations possible in DMALP is 256 */
1182 if (*bursts >= 256*256) {
1183 lcnt1 = 256;
1184 lcnt0 = 256;
1185 cyc = *bursts / lcnt1 / lcnt0;
1186 } else if (*bursts > 256) {
1187 lcnt1 = 256;
1188 lcnt0 = *bursts / lcnt1;
1189 cyc = 1;
1190 } else {
1191 lcnt1 = *bursts;
1192 lcnt0 = 0;
1193 cyc = 1;
1194 }
1195
1196 szlp = _emit_LP(1, buf, 0, 0);
1197 szbrst = _bursts(pl330, 1, buf, pxs, 1);
1198
1199 lpend.cond = ALWAYS;
1200 lpend.forever = false;
1201 lpend.loop = 0;
1202 lpend.bjump = 0;
1203 szlpend = _emit_LPEND(1, buf, &lpend);
1204
1205 if (lcnt0) {
1206 szlp *= 2;
1207 szlpend *= 2;
1208 }
1209
1210 /*
1211 * Max bursts that we can unroll due to limit on the
1212 * size of backward jump that can be encoded in DMALPEND
1213 * which is 8-bits and hence 255
1214 */
1215 cycmax = (255 - (szlp + szlpend)) / szbrst;
1216
1217 cyc = (cycmax < cyc) ? cycmax : cyc;
1218
1219 off = 0;
1220
1221 if (lcnt0) {
1222 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1223 ljmp0 = off;
1224 }
1225
1226 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1227 ljmp1 = off;
1228
1229 off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
1230
1231 lpend.cond = ALWAYS;
1232 lpend.forever = false;
1233 lpend.loop = 1;
1234 lpend.bjump = off - ljmp1;
1235 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1236
1237 if (lcnt0) {
1238 lpend.cond = ALWAYS;
1239 lpend.forever = false;
1240 lpend.loop = 0;
1241 lpend.bjump = off - ljmp0;
1242 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1243 }
1244
1245 *bursts = lcnt1 * cyc;
1246 if (lcnt0)
1247 *bursts *= lcnt0;
1248
1249 return off;
1250}
1251
1252static inline int _setup_loops(struct pl330_dmac *pl330,
1253 unsigned dry_run, u8 buf[],
1254 const struct _xfer_spec *pxs)
1255{
1256 struct pl330_xfer *x = &pxs->desc->px;
1257 u32 ccr = pxs->ccr;
1258 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1259 int off = 0;
1260
1261 while (bursts) {
1262 c = bursts;
1263 off += _loop(pl330, dry_run, &buf[off], &c, pxs);
1264 bursts -= c;
1265 }
1266
1267 return off;
1268}
1269
1270static inline int _setup_xfer(struct pl330_dmac *pl330,
1271 unsigned dry_run, u8 buf[],
1272 const struct _xfer_spec *pxs)
1273{
1274 struct pl330_xfer *x = &pxs->desc->px;
1275 int off = 0;
1276
1277 /* DMAMOV SAR, x->src_addr */
1278 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1279 /* DMAMOV DAR, x->dst_addr */
1280 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1281
1282 /* Setup Loop(s) */
1283 off += _setup_loops(pl330, dry_run, &buf[off], pxs);
1284
1285 return off;
1286}
1287
1288/*
1289 * A req is a sequence of one or more xfer units.
1290 * Returns the number of bytes taken to setup the MC for the req.
1291 */
1292static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
1293 struct pl330_thread *thrd, unsigned index,
1294 struct _xfer_spec *pxs)
1295{
1296 struct _pl330_req *req = &thrd->req[index];
1297 struct pl330_xfer *x;
1298 u8 *buf = req->mc_cpu;
1299 int off = 0;
1300
1301 PL330_DBGMC_START(req->mc_bus);
1302
1303 /* DMAMOV CCR, ccr */
1304 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1305
1306 x = &pxs->desc->px;
1307 /* Error if xfer length is not aligned at burst size */
1308 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1309 return -EINVAL;
1310
1311 off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
1312
1313 /* DMASEV peripheral/event */
1314 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1315 /* DMAEND */
1316 off += _emit_END(dry_run, &buf[off]);
1317
1318 return off;
1319}
1320
1321static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1322{
1323 u32 ccr = 0;
1324
1325 if (rqc->src_inc)
1326 ccr |= CC_SRCINC;
1327
1328 if (rqc->dst_inc)
1329 ccr |= CC_DSTINC;
1330
1331 /* We set same protection levels for Src and DST for now */
1332 if (rqc->privileged)
1333 ccr |= CC_SRCPRI | CC_DSTPRI;
1334 if (rqc->nonsecure)
1335 ccr |= CC_SRCNS | CC_DSTNS;
1336 if (rqc->insnaccess)
1337 ccr |= CC_SRCIA | CC_DSTIA;
1338
1339 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1340 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1341
1342 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1343 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1344
1345 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1346 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1347
1348 ccr |= (rqc->swap << CC_SWAP_SHFT);
1349
1350 return ccr;
1351}
1352
1353/*
1354 * Submit a list of xfers after which the client wants notification.
1355 * Client is not notified after each xfer unit, just once after all
1356 * xfer units are done or some error occurs.
1357 */
1358static int pl330_submit_req(struct pl330_thread *thrd,
1359 struct dma_pl330_desc *desc)
1360{
1361 struct pl330_dmac *pl330 = thrd->dmac;
1362 struct _xfer_spec xs;
1363 unsigned long flags;
1364 unsigned idx;
1365 u32 ccr;
1366 int ret = 0;
1367
1368 if (pl330->state == DYING
1369 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1370 dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
1371 __func__, __LINE__);
1372 return -EAGAIN;
1373 }
1374
1375 /* If request for non-existing peripheral */
1376 if (desc->rqtype != DMA_MEM_TO_MEM &&
1377 desc->peri >= pl330->pcfg.num_peri) {
1378 dev_info(thrd->dmac->ddma.dev,
1379 "%s:%d Invalid peripheral(%u)!\n",
1380 __func__, __LINE__, desc->peri);
1381 return -EINVAL;
1382 }
1383
1384 spin_lock_irqsave(&pl330->lock, flags);
1385
1386 if (_queue_full(thrd)) {
1387 ret = -EAGAIN;
1388 goto xfer_exit;
1389 }
1390
1391 /* Prefer Secure Channel */
1392 if (!_manager_ns(thrd))
1393 desc->rqcfg.nonsecure = 0;
1394 else
1395 desc->rqcfg.nonsecure = 1;
1396
1397 ccr = _prepare_ccr(&desc->rqcfg);
1398
1399 idx = thrd->req[0].desc == NULL ? 0 : 1;
1400
1401 xs.ccr = ccr;
1402 xs.desc = desc;
1403
1404 /* First dry run to check if req is acceptable */
1405 ret = _setup_req(pl330, 1, thrd, idx, &xs);
1406 if (ret < 0)
1407 goto xfer_exit;
1408
1409 if (ret > pl330->mcbufsz / 2) {
1410 dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
1411 __func__, __LINE__, ret, pl330->mcbufsz / 2);
1412 ret = -ENOMEM;
1413 goto xfer_exit;
1414 }
1415
1416 /* Hook the request */
1417 thrd->lstenq = idx;
1418 thrd->req[idx].desc = desc;
1419 _setup_req(pl330, 0, thrd, idx, &xs);
1420
1421 ret = 0;
1422
1423xfer_exit:
1424 spin_unlock_irqrestore(&pl330->lock, flags);
1425
1426 return ret;
1427}
1428
1429static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
1430{
1431 struct dma_pl330_chan *pch;
1432 unsigned long flags;
1433
1434 if (!desc)
1435 return;
1436
1437 pch = desc->pchan;
1438
1439 /* If desc aborted */
1440 if (!pch)
1441 return;
1442
1443 spin_lock_irqsave(&pch->lock, flags);
1444
1445 desc->status = DONE;
1446
1447 spin_unlock_irqrestore(&pch->lock, flags);
1448
1449 tasklet_schedule(&pch->task);
1450}
1451
1452static void pl330_dotask(unsigned long data)
1453{
1454 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1455 unsigned long flags;
1456 int i;
1457
1458 spin_lock_irqsave(&pl330->lock, flags);
1459
1460 /* The DMAC itself gone nuts */
1461 if (pl330->dmac_tbd.reset_dmac) {
1462 pl330->state = DYING;
1463 /* Reset the manager too */
1464 pl330->dmac_tbd.reset_mngr = true;
1465 /* Clear the reset flag */
1466 pl330->dmac_tbd.reset_dmac = false;
1467 }
1468
1469 if (pl330->dmac_tbd.reset_mngr) {
1470 _stop(pl330->manager);
1471 /* Reset all channels */
1472 pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
1473 /* Clear the reset flag */
1474 pl330->dmac_tbd.reset_mngr = false;
1475 }
1476
1477 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1478
1479 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1480 struct pl330_thread *thrd = &pl330->channels[i];
1481 void __iomem *regs = pl330->base;
1482 enum pl330_op_err err;
1483
1484 _stop(thrd);
1485
1486 if (readl(regs + FSC) & (1 << thrd->id))
1487 err = PL330_ERR_FAIL;
1488 else
1489 err = PL330_ERR_ABORT;
1490
1491 spin_unlock_irqrestore(&pl330->lock, flags);
1492 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1493 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
1494 spin_lock_irqsave(&pl330->lock, flags);
1495
1496 thrd->req[0].desc = NULL;
1497 thrd->req[1].desc = NULL;
1498 thrd->req_running = -1;
1499
1500 /* Clear the reset flag */
1501 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1502 }
1503 }
1504
1505 spin_unlock_irqrestore(&pl330->lock, flags);
1506
1507 return;
1508}
1509
1510/* Returns 1 if state was updated, 0 otherwise */
1511static int pl330_update(struct pl330_dmac *pl330)
1512{
1513 struct dma_pl330_desc *descdone;
1514 unsigned long flags;
1515 void __iomem *regs;
1516 u32 val;
1517 int id, ev, ret = 0;
1518
1519 regs = pl330->base;
1520
1521 spin_lock_irqsave(&pl330->lock, flags);
1522
1523 val = readl(regs + FSM) & 0x1;
1524 if (val)
1525 pl330->dmac_tbd.reset_mngr = true;
1526 else
1527 pl330->dmac_tbd.reset_mngr = false;
1528
1529 val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
1530 pl330->dmac_tbd.reset_chan |= val;
1531 if (val) {
1532 int i = 0;
1533 while (i < pl330->pcfg.num_chan) {
1534 if (val & (1 << i)) {
1535 dev_info(pl330->ddma.dev,
1536 "Reset Channel-%d\t CS-%x FTC-%x\n",
1537 i, readl(regs + CS(i)),
1538 readl(regs + FTC(i)));
1539 _stop(&pl330->channels[i]);
1540 }
1541 i++;
1542 }
1543 }
1544
1545 /* Check which event happened i.e, thread notified */
1546 val = readl(regs + ES);
1547 if (pl330->pcfg.num_events < 32
1548 && val & ~((1 << pl330->pcfg.num_events) - 1)) {
1549 pl330->dmac_tbd.reset_dmac = true;
1550 dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
1551 __LINE__);
1552 ret = 1;
1553 goto updt_exit;
1554 }
1555
1556 for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
1557 if (val & (1 << ev)) { /* Event occurred */
1558 struct pl330_thread *thrd;
1559 u32 inten = readl(regs + INTEN);
1560 int active;
1561
1562 /* Clear the event */
1563 if (inten & (1 << ev))
1564 writel(1 << ev, regs + INTCLR);
1565
1566 ret = 1;
1567
1568 id = pl330->events[ev];
1569
1570 thrd = &pl330->channels[id];
1571
1572 active = thrd->req_running;
1573 if (active == -1) /* Aborted */
1574 continue;
1575
1576 /* Detach the req */
1577 descdone = thrd->req[active].desc;
1578 thrd->req[active].desc = NULL;
1579
1580 thrd->req_running = -1;
1581
1582 /* Get going again ASAP */
1583 _start(thrd);
1584
1585 /* For now, just make a list of callbacks to be done */
1586 list_add_tail(&descdone->rqd, &pl330->req_done);
1587 }
1588 }
1589
1590 /* Now that we are in no hurry, do the callbacks */
1591 while (!list_empty(&pl330->req_done)) {
1592 descdone = list_first_entry(&pl330->req_done,
1593 struct dma_pl330_desc, rqd);
1594 list_del(&descdone->rqd);
1595 spin_unlock_irqrestore(&pl330->lock, flags);
1596 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
1597 spin_lock_irqsave(&pl330->lock, flags);
1598 }
1599
1600updt_exit:
1601 spin_unlock_irqrestore(&pl330->lock, flags);
1602
1603 if (pl330->dmac_tbd.reset_dmac
1604 || pl330->dmac_tbd.reset_mngr
1605 || pl330->dmac_tbd.reset_chan) {
1606 ret = 1;
1607 tasklet_schedule(&pl330->tasks);
1608 }
1609
1610 return ret;
1611}
1612
1613/* Reserve an event */
1614static inline int _alloc_event(struct pl330_thread *thrd)
1615{
1616 struct pl330_dmac *pl330 = thrd->dmac;
1617 int ev;
1618
1619 for (ev = 0; ev < pl330->pcfg.num_events; ev++)
1620 if (pl330->events[ev] == -1) {
1621 pl330->events[ev] = thrd->id;
1622 return ev;
1623 }
1624
1625 return -1;
1626}
1627
1628static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1629{
1630 return pl330->pcfg.irq_ns & (1 << i);
1631}
1632
1633/* Upon success, returns IdentityToken for the
1634 * allocated channel, NULL otherwise.
1635 */
1636static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1637{
1638 struct pl330_thread *thrd = NULL;
1639 int chans, i;
1640
1641 if (pl330->state == DYING)
1642 return NULL;
1643
1644 chans = pl330->pcfg.num_chan;
1645
1646 for (i = 0; i < chans; i++) {
1647 thrd = &pl330->channels[i];
1648 if ((thrd->free) && (!_manager_ns(thrd) ||
1649 _chan_ns(pl330, i))) {
1650 thrd->ev = _alloc_event(thrd);
1651 if (thrd->ev >= 0) {
1652 thrd->free = false;
1653 thrd->lstenq = 1;
1654 thrd->req[0].desc = NULL;
1655 thrd->req[1].desc = NULL;
1656 thrd->req_running = -1;
1657 break;
1658 }
1659 }
1660 thrd = NULL;
1661 }
1662
1663 return thrd;
1664}
1665
1666/* Release an event */
1667static inline void _free_event(struct pl330_thread *thrd, int ev)
1668{
1669 struct pl330_dmac *pl330 = thrd->dmac;
1670
1671 /* If the event is valid and was held by the thread */
1672 if (ev >= 0 && ev < pl330->pcfg.num_events
1673 && pl330->events[ev] == thrd->id)
1674 pl330->events[ev] = -1;
1675}
1676
1677static void pl330_release_channel(struct pl330_thread *thrd)
1678{
1679 struct pl330_dmac *pl330;
1680
1681 if (!thrd || thrd->free)
1682 return;
1683
1684 _stop(thrd);
1685
1686 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1687 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
1688
1689 pl330 = thrd->dmac;
1690
1691 _free_event(thrd, thrd->ev);
1692 thrd->free = true;
1693}
1694
1695/* Initialize the structure for PL330 configuration, that can be used
1696 * by the client driver the make best use of the DMAC
1697 */
1698static void read_dmac_config(struct pl330_dmac *pl330)
1699{
1700 void __iomem *regs = pl330->base;
1701 u32 val;
1702
1703 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1704 val &= CRD_DATA_WIDTH_MASK;
1705 pl330->pcfg.data_bus_width = 8 * (1 << val);
1706
1707 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1708 val &= CRD_DATA_BUFF_MASK;
1709 pl330->pcfg.data_buf_dep = val + 1;
1710
1711 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1712 val &= CR0_NUM_CHANS_MASK;
1713 val += 1;
1714 pl330->pcfg.num_chan = val;
1715
1716 val = readl(regs + CR0);
1717 if (val & CR0_PERIPH_REQ_SET) {
1718 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1719 val += 1;
1720 pl330->pcfg.num_peri = val;
1721 pl330->pcfg.peri_ns = readl(regs + CR4);
1722 } else {
1723 pl330->pcfg.num_peri = 0;
1724 }
1725
1726 val = readl(regs + CR0);
1727 if (val & CR0_BOOT_MAN_NS)
1728 pl330->pcfg.mode |= DMAC_MODE_NS;
1729 else
1730 pl330->pcfg.mode &= ~DMAC_MODE_NS;
1731
1732 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1733 val &= CR0_NUM_EVENTS_MASK;
1734 val += 1;
1735 pl330->pcfg.num_events = val;
1736
1737 pl330->pcfg.irq_ns = readl(regs + CR3);
1738}
1739
1740static inline void _reset_thread(struct pl330_thread *thrd)
1741{
1742 struct pl330_dmac *pl330 = thrd->dmac;
1743
1744 thrd->req[0].mc_cpu = pl330->mcode_cpu
1745 + (thrd->id * pl330->mcbufsz);
1746 thrd->req[0].mc_bus = pl330->mcode_bus
1747 + (thrd->id * pl330->mcbufsz);
1748 thrd->req[0].desc = NULL;
1749
1750 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1751 + pl330->mcbufsz / 2;
1752 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1753 + pl330->mcbufsz / 2;
1754 thrd->req[1].desc = NULL;
1755
1756 thrd->req_running = -1;
1757}
1758
1759static int dmac_alloc_threads(struct pl330_dmac *pl330)
1760{
1761 int chans = pl330->pcfg.num_chan;
1762 struct pl330_thread *thrd;
1763 int i;
1764
1765 /* Allocate 1 Manager and 'chans' Channel threads */
1766 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
1767 GFP_KERNEL);
1768 if (!pl330->channels)
1769 return -ENOMEM;
1770
1771 /* Init Channel threads */
1772 for (i = 0; i < chans; i++) {
1773 thrd = &pl330->channels[i];
1774 thrd->id = i;
1775 thrd->dmac = pl330;
1776 _reset_thread(thrd);
1777 thrd->free = true;
1778 }
1779
1780 /* MANAGER is indexed at the end */
1781 thrd = &pl330->channels[chans];
1782 thrd->id = chans;
1783 thrd->dmac = pl330;
1784 thrd->free = false;
1785 pl330->manager = thrd;
1786
1787 return 0;
1788}
1789
1790static int dmac_alloc_resources(struct pl330_dmac *pl330)
1791{
1792 int chans = pl330->pcfg.num_chan;
1793 int ret;
1794
1795 /*
1796 * Alloc MicroCode buffer for 'chans' Channel threads.
1797 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1798 */
1799 pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
1800 chans * pl330->mcbufsz,
1801 &pl330->mcode_bus, GFP_KERNEL,
1802 DMA_ATTR_PRIVILEGED);
1803 if (!pl330->mcode_cpu) {
1804 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
1805 __func__, __LINE__);
1806 return -ENOMEM;
1807 }
1808
1809 ret = dmac_alloc_threads(pl330);
1810 if (ret) {
1811 dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
1812 __func__, __LINE__);
1813 dma_free_coherent(pl330->ddma.dev,
1814 chans * pl330->mcbufsz,
1815 pl330->mcode_cpu, pl330->mcode_bus);
1816 return ret;
1817 }
1818
1819 return 0;
1820}
1821
1822static int pl330_add(struct pl330_dmac *pl330)
1823{
1824 int i, ret;
1825
1826 /* Check if we can handle this DMAC */
1827 if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
1828 dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
1829 pl330->pcfg.periph_id);
1830 return -EINVAL;
1831 }
1832
1833 /* Read the configuration of the DMAC */
1834 read_dmac_config(pl330);
1835
1836 if (pl330->pcfg.num_events == 0) {
1837 dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
1838 __func__, __LINE__);
1839 return -EINVAL;
1840 }
1841
1842 spin_lock_init(&pl330->lock);
1843
1844 INIT_LIST_HEAD(&pl330->req_done);
1845
1846 /* Use default MC buffer size if not provided */
1847 if (!pl330->mcbufsz)
1848 pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1849
1850 /* Mark all events as free */
1851 for (i = 0; i < pl330->pcfg.num_events; i++)
1852 pl330->events[i] = -1;
1853
1854 /* Allocate resources needed by the DMAC */
1855 ret = dmac_alloc_resources(pl330);
1856 if (ret) {
1857 dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
1858 return ret;
1859 }
1860
1861 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1862
1863 pl330->state = INIT;
1864
1865 return 0;
1866}
1867
1868static int dmac_free_threads(struct pl330_dmac *pl330)
1869{
1870 struct pl330_thread *thrd;
1871 int i;
1872
1873 /* Release Channel threads */
1874 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1875 thrd = &pl330->channels[i];
1876 pl330_release_channel(thrd);
1877 }
1878
1879 /* Free memory */
1880 kfree(pl330->channels);
1881
1882 return 0;
1883}
1884
1885static void pl330_del(struct pl330_dmac *pl330)
1886{
1887 pl330->state = UNINIT;
1888
1889 tasklet_kill(&pl330->tasks);
1890
1891 /* Free DMAC resources */
1892 dmac_free_threads(pl330);
1893
1894 dma_free_coherent(pl330->ddma.dev,
1895 pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
1896 pl330->mcode_bus);
1897}
1898
1899/* forward declaration */
1900static struct amba_driver pl330_driver;
1901
1902static inline struct dma_pl330_chan *
1903to_pchan(struct dma_chan *ch)
1904{
1905 if (!ch)
1906 return NULL;
1907
1908 return container_of(ch, struct dma_pl330_chan, chan);
1909}
1910
1911static inline struct dma_pl330_desc *
1912to_desc(struct dma_async_tx_descriptor *tx)
1913{
1914 return container_of(tx, struct dma_pl330_desc, txd);
1915}
1916
1917static inline void fill_queue(struct dma_pl330_chan *pch)
1918{
1919 struct dma_pl330_desc *desc;
1920 int ret;
1921
1922 list_for_each_entry(desc, &pch->work_list, node) {
1923
1924 /* If already submitted */
1925 if (desc->status == BUSY)
1926 continue;
1927
1928 ret = pl330_submit_req(pch->thread, desc);
1929 if (!ret) {
1930 desc->status = BUSY;
1931 } else if (ret == -EAGAIN) {
1932 /* QFull or DMAC Dying */
1933 break;
1934 } else {
1935 /* Unacceptable request */
1936 desc->status = DONE;
1937 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
1938 __func__, __LINE__, desc->txd.cookie);
1939 tasklet_schedule(&pch->task);
1940 }
1941 }
1942}
1943
1944static void pl330_tasklet(unsigned long data)
1945{
1946 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
1947 struct dma_pl330_desc *desc, *_dt;
1948 unsigned long flags;
1949 bool power_down = false;
1950
1951 spin_lock_irqsave(&pch->lock, flags);
1952
1953 /* Pick up ripe tomatoes */
1954 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
1955 if (desc->status == DONE) {
1956 if (!pch->cyclic)
1957 dma_cookie_complete(&desc->txd);
1958 list_move_tail(&desc->node, &pch->completed_list);
1959 }
1960
1961 /* Try to submit a req imm. next to the last completed cookie */
1962 fill_queue(pch);
1963
1964 if (list_empty(&pch->work_list)) {
1965 spin_lock(&pch->thread->dmac->lock);
1966 _stop(pch->thread);
1967 spin_unlock(&pch->thread->dmac->lock);
1968 power_down = true;
1969 pch->active = false;
1970 } else {
1971 /* Make sure the PL330 Channel thread is active */
1972 spin_lock(&pch->thread->dmac->lock);
1973 _start(pch->thread);
1974 spin_unlock(&pch->thread->dmac->lock);
1975 }
1976
1977 while (!list_empty(&pch->completed_list)) {
1978 struct dmaengine_desc_callback cb;
1979
1980 desc = list_first_entry(&pch->completed_list,
1981 struct dma_pl330_desc, node);
1982
1983 dmaengine_desc_get_callback(&desc->txd, &cb);
1984
1985 if (pch->cyclic) {
1986 desc->status = PREP;
1987 list_move_tail(&desc->node, &pch->work_list);
1988 if (power_down) {
1989 pch->active = true;
1990 spin_lock(&pch->thread->dmac->lock);
1991 _start(pch->thread);
1992 spin_unlock(&pch->thread->dmac->lock);
1993 power_down = false;
1994 }
1995 } else {
1996 desc->status = FREE;
1997 list_move_tail(&desc->node, &pch->dmac->desc_pool);
1998 }
1999
2000 dma_descriptor_unmap(&desc->txd);
2001
2002 if (dmaengine_desc_callback_valid(&cb)) {
2003 spin_unlock_irqrestore(&pch->lock, flags);
2004 dmaengine_desc_callback_invoke(&cb, NULL);
2005 spin_lock_irqsave(&pch->lock, flags);
2006 }
2007 }
2008 spin_unlock_irqrestore(&pch->lock, flags);
2009
2010 /* If work list empty, power down */
2011 if (power_down) {
2012 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2013 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2014 }
2015}
2016
2017static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2018 struct of_dma *ofdma)
2019{
2020 int count = dma_spec->args_count;
2021 struct pl330_dmac *pl330 = ofdma->of_dma_data;
2022 unsigned int chan_id;
2023
2024 if (!pl330)
2025 return NULL;
2026
2027 if (count != 1)
2028 return NULL;
2029
2030 chan_id = dma_spec->args[0];
2031 if (chan_id >= pl330->num_peripherals)
2032 return NULL;
2033
2034 return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
2035}
2036
2037static int pl330_alloc_chan_resources(struct dma_chan *chan)
2038{
2039 struct dma_pl330_chan *pch = to_pchan(chan);
2040 struct pl330_dmac *pl330 = pch->dmac;
2041 unsigned long flags;
2042
2043 spin_lock_irqsave(&pl330->lock, flags);
2044
2045 dma_cookie_init(chan);
2046 pch->cyclic = false;
2047
2048 pch->thread = pl330_request_channel(pl330);
2049 if (!pch->thread) {
2050 spin_unlock_irqrestore(&pl330->lock, flags);
2051 return -ENOMEM;
2052 }
2053
2054 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2055
2056 spin_unlock_irqrestore(&pl330->lock, flags);
2057
2058 return 1;
2059}
2060
2061/*
2062 * We need the data direction between the DMAC (the dma-mapping "device") and
2063 * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing!
2064 */
2065static enum dma_data_direction
2066pl330_dma_slave_map_dir(enum dma_transfer_direction dir)
2067{
2068 switch (dir) {
2069 case DMA_MEM_TO_DEV:
2070 return DMA_FROM_DEVICE;
2071 case DMA_DEV_TO_MEM:
2072 return DMA_TO_DEVICE;
2073 case DMA_DEV_TO_DEV:
2074 return DMA_BIDIRECTIONAL;
2075 default:
2076 return DMA_NONE;
2077 }
2078}
2079
2080static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch)
2081{
2082 if (pch->dir != DMA_NONE)
2083 dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma,
2084 1 << pch->burst_sz, pch->dir, 0);
2085 pch->dir = DMA_NONE;
2086}
2087
2088
2089static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
2090 enum dma_transfer_direction dir)
2091{
2092 struct device *dev = pch->chan.device->dev;
2093 enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir);
2094
2095 /* Already mapped for this config? */
2096 if (pch->dir == dma_dir)
2097 return true;
2098
2099 pl330_unprep_slave_fifo(pch);
2100 pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr,
2101 1 << pch->burst_sz, dma_dir, 0);
2102 if (dma_mapping_error(dev, pch->fifo_dma))
2103 return false;
2104
2105 pch->dir = dma_dir;
2106 return true;
2107}
2108
2109static int pl330_config(struct dma_chan *chan,
2110 struct dma_slave_config *slave_config)
2111{
2112 struct dma_pl330_chan *pch = to_pchan(chan);
2113
2114 pl330_unprep_slave_fifo(pch);
2115 if (slave_config->direction == DMA_MEM_TO_DEV) {
2116 if (slave_config->dst_addr)
2117 pch->fifo_addr = slave_config->dst_addr;
2118 if (slave_config->dst_addr_width)
2119 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2120 if (slave_config->dst_maxburst)
2121 pch->burst_len = slave_config->dst_maxburst;
2122 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2123 if (slave_config->src_addr)
2124 pch->fifo_addr = slave_config->src_addr;
2125 if (slave_config->src_addr_width)
2126 pch->burst_sz = __ffs(slave_config->src_addr_width);
2127 if (slave_config->src_maxburst)
2128 pch->burst_len = slave_config->src_maxburst;
2129 }
2130
2131 return 0;
2132}
2133
2134static int pl330_terminate_all(struct dma_chan *chan)
2135{
2136 struct dma_pl330_chan *pch = to_pchan(chan);
2137 struct dma_pl330_desc *desc;
2138 unsigned long flags;
2139 struct pl330_dmac *pl330 = pch->dmac;
2140 LIST_HEAD(list);
2141 bool power_down = false;
2142
2143 pm_runtime_get_sync(pl330->ddma.dev);
2144 spin_lock_irqsave(&pch->lock, flags);
2145 spin_lock(&pl330->lock);
2146 _stop(pch->thread);
2147 spin_unlock(&pl330->lock);
2148
2149 pch->thread->req[0].desc = NULL;
2150 pch->thread->req[1].desc = NULL;
2151 pch->thread->req_running = -1;
2152 power_down = pch->active;
2153 pch->active = false;
2154
2155 /* Mark all desc done */
2156 list_for_each_entry(desc, &pch->submitted_list, node) {
2157 desc->status = FREE;
2158 dma_cookie_complete(&desc->txd);
2159 }
2160
2161 list_for_each_entry(desc, &pch->work_list , node) {
2162 desc->status = FREE;
2163 dma_cookie_complete(&desc->txd);
2164 }
2165
2166 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2167 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2168 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2169 spin_unlock_irqrestore(&pch->lock, flags);
2170 pm_runtime_mark_last_busy(pl330->ddma.dev);
2171 if (power_down)
2172 pm_runtime_put_autosuspend(pl330->ddma.dev);
2173 pm_runtime_put_autosuspend(pl330->ddma.dev);
2174
2175 return 0;
2176}
2177
2178/*
2179 * We don't support DMA_RESUME command because of hardware
2180 * limitations, so after pausing the channel we cannot restore
2181 * it to active state. We have to terminate channel and setup
2182 * DMA transfer again. This pause feature was implemented to
2183 * allow safely read residue before channel termination.
2184 */
2185static int pl330_pause(struct dma_chan *chan)
2186{
2187 struct dma_pl330_chan *pch = to_pchan(chan);
2188 struct pl330_dmac *pl330 = pch->dmac;
2189 unsigned long flags;
2190
2191 pm_runtime_get_sync(pl330->ddma.dev);
2192 spin_lock_irqsave(&pch->lock, flags);
2193
2194 spin_lock(&pl330->lock);
2195 _stop(pch->thread);
2196 spin_unlock(&pl330->lock);
2197
2198 spin_unlock_irqrestore(&pch->lock, flags);
2199 pm_runtime_mark_last_busy(pl330->ddma.dev);
2200 pm_runtime_put_autosuspend(pl330->ddma.dev);
2201
2202 return 0;
2203}
2204
2205static void pl330_free_chan_resources(struct dma_chan *chan)
2206{
2207 struct dma_pl330_chan *pch = to_pchan(chan);
2208 struct pl330_dmac *pl330 = pch->dmac;
2209 unsigned long flags;
2210
2211 tasklet_kill(&pch->task);
2212
2213 pm_runtime_get_sync(pch->dmac->ddma.dev);
2214 spin_lock_irqsave(&pl330->lock, flags);
2215
2216 pl330_release_channel(pch->thread);
2217 pch->thread = NULL;
2218
2219 if (pch->cyclic)
2220 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2221
2222 spin_unlock_irqrestore(&pl330->lock, flags);
2223 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2224 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2225 pl330_unprep_slave_fifo(pch);
2226}
2227
2228static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2229 struct dma_pl330_desc *desc)
2230{
2231 struct pl330_thread *thrd = pch->thread;
2232 struct pl330_dmac *pl330 = pch->dmac;
2233 void __iomem *regs = thrd->dmac->base;
2234 u32 val, addr;
2235
2236 pm_runtime_get_sync(pl330->ddma.dev);
2237 val = addr = 0;
2238 if (desc->rqcfg.src_inc) {
2239 val = readl(regs + SA(thrd->id));
2240 addr = desc->px.src_addr;
2241 } else {
2242 val = readl(regs + DA(thrd->id));
2243 addr = desc->px.dst_addr;
2244 }
2245 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2246 pm_runtime_put_autosuspend(pl330->ddma.dev);
2247
2248 /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
2249 if (!val)
2250 return 0;
2251
2252 return val - addr;
2253}
2254
2255static enum dma_status
2256pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2257 struct dma_tx_state *txstate)
2258{
2259 enum dma_status ret;
2260 unsigned long flags;
2261 struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL;
2262 struct dma_pl330_chan *pch = to_pchan(chan);
2263 unsigned int transferred, residual = 0;
2264
2265 ret = dma_cookie_status(chan, cookie, txstate);
2266
2267 if (!txstate)
2268 return ret;
2269
2270 if (ret == DMA_COMPLETE)
2271 goto out;
2272
2273 spin_lock_irqsave(&pch->lock, flags);
2274 spin_lock(&pch->thread->dmac->lock);
2275
2276 if (pch->thread->req_running != -1)
2277 running = pch->thread->req[pch->thread->req_running].desc;
2278
2279 last_enq = pch->thread->req[pch->thread->lstenq].desc;
2280
2281 /* Check in pending list */
2282 list_for_each_entry(desc, &pch->work_list, node) {
2283 if (desc->status == DONE)
2284 transferred = desc->bytes_requested;
2285 else if (running && desc == running)
2286 transferred =
2287 pl330_get_current_xferred_count(pch, desc);
2288 else if (desc->status == BUSY)
2289 /*
2290 * Busy but not running means either just enqueued,
2291 * or finished and not yet marked done
2292 */
2293 if (desc == last_enq)
2294 transferred = 0;
2295 else
2296 transferred = desc->bytes_requested;
2297 else
2298 transferred = 0;
2299 residual += desc->bytes_requested - transferred;
2300 if (desc->txd.cookie == cookie) {
2301 switch (desc->status) {
2302 case DONE:
2303 ret = DMA_COMPLETE;
2304 break;
2305 case PREP:
2306 case BUSY:
2307 ret = DMA_IN_PROGRESS;
2308 break;
2309 default:
2310 WARN_ON(1);
2311 }
2312 break;
2313 }
2314 if (desc->last)
2315 residual = 0;
2316 }
2317 spin_unlock(&pch->thread->dmac->lock);
2318 spin_unlock_irqrestore(&pch->lock, flags);
2319
2320out:
2321 dma_set_residue(txstate, residual);
2322
2323 return ret;
2324}
2325
2326static void pl330_issue_pending(struct dma_chan *chan)
2327{
2328 struct dma_pl330_chan *pch = to_pchan(chan);
2329 unsigned long flags;
2330
2331 spin_lock_irqsave(&pch->lock, flags);
2332 if (list_empty(&pch->work_list)) {
2333 /*
2334 * Warn on nothing pending. Empty submitted_list may
2335 * break our pm_runtime usage counter as it is
2336 * updated on work_list emptiness status.
2337 */
2338 WARN_ON(list_empty(&pch->submitted_list));
2339 pch->active = true;
2340 pm_runtime_get_sync(pch->dmac->ddma.dev);
2341 }
2342 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2343 spin_unlock_irqrestore(&pch->lock, flags);
2344
2345 pl330_tasklet((unsigned long)pch);
2346}
2347
2348/*
2349 * We returned the last one of the circular list of descriptor(s)
2350 * from prep_xxx, so the argument to submit corresponds to the last
2351 * descriptor of the list.
2352 */
2353static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2354{
2355 struct dma_pl330_desc *desc, *last = to_desc(tx);
2356 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2357 dma_cookie_t cookie;
2358 unsigned long flags;
2359
2360 spin_lock_irqsave(&pch->lock, flags);
2361
2362 /* Assign cookies to all nodes */
2363 while (!list_empty(&last->node)) {
2364 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2365 if (pch->cyclic) {
2366 desc->txd.callback = last->txd.callback;
2367 desc->txd.callback_param = last->txd.callback_param;
2368 }
2369 desc->last = false;
2370
2371 dma_cookie_assign(&desc->txd);
2372
2373 list_move_tail(&desc->node, &pch->submitted_list);
2374 }
2375
2376 last->last = true;
2377 cookie = dma_cookie_assign(&last->txd);
2378 list_add_tail(&last->node, &pch->submitted_list);
2379 spin_unlock_irqrestore(&pch->lock, flags);
2380
2381 return cookie;
2382}
2383
2384static inline void _init_desc(struct dma_pl330_desc *desc)
2385{
2386 desc->rqcfg.swap = SWAP_NO;
2387 desc->rqcfg.scctl = CCTRL0;
2388 desc->rqcfg.dcctl = CCTRL0;
2389 desc->txd.tx_submit = pl330_tx_submit;
2390
2391 INIT_LIST_HEAD(&desc->node);
2392}
2393
2394/* Returns the number of descriptors added to the DMAC pool */
2395static int add_desc(struct list_head *pool, spinlock_t *lock,
2396 gfp_t flg, int count)
2397{
2398 struct dma_pl330_desc *desc;
2399 unsigned long flags;
2400 int i;
2401
2402 desc = kcalloc(count, sizeof(*desc), flg);
2403 if (!desc)
2404 return 0;
2405
2406 spin_lock_irqsave(lock, flags);
2407
2408 for (i = 0; i < count; i++) {
2409 _init_desc(&desc[i]);
2410 list_add_tail(&desc[i].node, pool);
2411 }
2412
2413 spin_unlock_irqrestore(lock, flags);
2414
2415 return count;
2416}
2417
2418static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
2419 spinlock_t *lock)
2420{
2421 struct dma_pl330_desc *desc = NULL;
2422 unsigned long flags;
2423
2424 spin_lock_irqsave(lock, flags);
2425
2426 if (!list_empty(pool)) {
2427 desc = list_entry(pool->next,
2428 struct dma_pl330_desc, node);
2429
2430 list_del_init(&desc->node);
2431
2432 desc->status = PREP;
2433 desc->txd.callback = NULL;
2434 }
2435
2436 spin_unlock_irqrestore(lock, flags);
2437
2438 return desc;
2439}
2440
2441static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2442{
2443 struct pl330_dmac *pl330 = pch->dmac;
2444 u8 *peri_id = pch->chan.private;
2445 struct dma_pl330_desc *desc;
2446
2447 /* Pluck one desc from the pool of DMAC */
2448 desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock);
2449
2450 /* If the DMAC pool is empty, alloc new */
2451 if (!desc) {
2452 DEFINE_SPINLOCK(lock);
2453 LIST_HEAD(pool);
2454
2455 if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
2456 return NULL;
2457
2458 desc = pluck_desc(&pool, &lock);
2459 WARN_ON(!desc || !list_empty(&pool));
2460 }
2461
2462 /* Initialize the descriptor */
2463 desc->pchan = pch;
2464 desc->txd.cookie = 0;
2465 async_tx_ack(&desc->txd);
2466
2467 desc->peri = peri_id ? pch->chan.chan_id : 0;
2468 desc->rqcfg.pcfg = &pch->dmac->pcfg;
2469
2470 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2471
2472 return desc;
2473}
2474
2475static inline void fill_px(struct pl330_xfer *px,
2476 dma_addr_t dst, dma_addr_t src, size_t len)
2477{
2478 px->bytes = len;
2479 px->dst_addr = dst;
2480 px->src_addr = src;
2481}
2482
2483static struct dma_pl330_desc *
2484__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2485 dma_addr_t src, size_t len)
2486{
2487 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2488
2489 if (!desc) {
2490 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2491 __func__, __LINE__);
2492 return NULL;
2493 }
2494
2495 /*
2496 * Ideally we should lookout for reqs bigger than
2497 * those that can be programmed with 256 bytes of
2498 * MC buffer, but considering a req size is seldom
2499 * going to be word-unaligned and more than 200MB,
2500 * we take it easy.
2501 * Also, should the limit is reached we'd rather
2502 * have the platform increase MC buffer size than
2503 * complicating this API driver.
2504 */
2505 fill_px(&desc->px, dst, src, len);
2506
2507 return desc;
2508}
2509
2510/* Call after fixing burst size */
2511static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2512{
2513 struct dma_pl330_chan *pch = desc->pchan;
2514 struct pl330_dmac *pl330 = pch->dmac;
2515 int burst_len;
2516
2517 burst_len = pl330->pcfg.data_bus_width / 8;
2518 burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan;
2519 burst_len >>= desc->rqcfg.brst_size;
2520
2521 /* src/dst_burst_len can't be more than 16 */
2522 if (burst_len > 16)
2523 burst_len = 16;
2524
2525 while (burst_len > 1) {
2526 if (!(len % (burst_len << desc->rqcfg.brst_size)))
2527 break;
2528 burst_len--;
2529 }
2530
2531 return burst_len;
2532}
2533
2534static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2535 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
2536 size_t period_len, enum dma_transfer_direction direction,
2537 unsigned long flags)
2538{
2539 struct dma_pl330_desc *desc = NULL, *first = NULL;
2540 struct dma_pl330_chan *pch = to_pchan(chan);
2541 struct pl330_dmac *pl330 = pch->dmac;
2542 unsigned int i;
2543 dma_addr_t dst;
2544 dma_addr_t src;
2545
2546 if (len % period_len != 0)
2547 return NULL;
2548
2549 if (!is_slave_direction(direction)) {
2550 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
2551 __func__, __LINE__);
2552 return NULL;
2553 }
2554
2555 if (!pl330_prep_slave_fifo(pch, direction))
2556 return NULL;
2557
2558 for (i = 0; i < len / period_len; i++) {
2559 desc = pl330_get_desc(pch);
2560 if (!desc) {
2561 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2562 __func__, __LINE__);
2563
2564 if (!first)
2565 return NULL;
2566
2567 spin_lock_irqsave(&pl330->pool_lock, flags);
2568
2569 while (!list_empty(&first->node)) {
2570 desc = list_entry(first->node.next,
2571 struct dma_pl330_desc, node);
2572 list_move_tail(&desc->node, &pl330->desc_pool);
2573 }
2574
2575 list_move_tail(&first->node, &pl330->desc_pool);
2576
2577 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2578
2579 return NULL;
2580 }
2581
2582 switch (direction) {
2583 case DMA_MEM_TO_DEV:
2584 desc->rqcfg.src_inc = 1;
2585 desc->rqcfg.dst_inc = 0;
2586 src = dma_addr;
2587 dst = pch->fifo_dma;
2588 break;
2589 case DMA_DEV_TO_MEM:
2590 desc->rqcfg.src_inc = 0;
2591 desc->rqcfg.dst_inc = 1;
2592 src = pch->fifo_dma;
2593 dst = dma_addr;
2594 break;
2595 default:
2596 break;
2597 }
2598
2599 desc->rqtype = direction;
2600 desc->rqcfg.brst_size = pch->burst_sz;
2601 desc->rqcfg.brst_len = 1;
2602 desc->bytes_requested = period_len;
2603 fill_px(&desc->px, dst, src, period_len);
2604
2605 if (!first)
2606 first = desc;
2607 else
2608 list_add_tail(&desc->node, &first->node);
2609
2610 dma_addr += period_len;
2611 }
2612
2613 if (!desc)
2614 return NULL;
2615
2616 pch->cyclic = true;
2617 desc->txd.flags = flags;
2618
2619 return &desc->txd;
2620}
2621
2622static struct dma_async_tx_descriptor *
2623pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2624 dma_addr_t src, size_t len, unsigned long flags)
2625{
2626 struct dma_pl330_desc *desc;
2627 struct dma_pl330_chan *pch = to_pchan(chan);
2628 struct pl330_dmac *pl330;
2629 int burst;
2630
2631 if (unlikely(!pch || !len))
2632 return NULL;
2633
2634 pl330 = pch->dmac;
2635
2636 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2637 if (!desc)
2638 return NULL;
2639
2640 desc->rqcfg.src_inc = 1;
2641 desc->rqcfg.dst_inc = 1;
2642 desc->rqtype = DMA_MEM_TO_MEM;
2643
2644 /* Select max possible burst size */
2645 burst = pl330->pcfg.data_bus_width / 8;
2646
2647 /*
2648 * Make sure we use a burst size that aligns with all the memcpy
2649 * parameters because our DMA programming algorithm doesn't cope with
2650 * transfers which straddle an entry in the DMA device's MFIFO.
2651 */
2652 while ((src | dst | len) & (burst - 1))
2653 burst /= 2;
2654
2655 desc->rqcfg.brst_size = 0;
2656 while (burst != (1 << desc->rqcfg.brst_size))
2657 desc->rqcfg.brst_size++;
2658
2659 /*
2660 * If burst size is smaller than bus width then make sure we only
2661 * transfer one at a time to avoid a burst stradling an MFIFO entry.
2662 */
2663 if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
2664 desc->rqcfg.brst_len = 1;
2665
2666 desc->rqcfg.brst_len = get_burst_len(desc, len);
2667 desc->bytes_requested = len;
2668
2669 desc->txd.flags = flags;
2670
2671 return &desc->txd;
2672}
2673
2674static void __pl330_giveback_desc(struct pl330_dmac *pl330,
2675 struct dma_pl330_desc *first)
2676{
2677 unsigned long flags;
2678 struct dma_pl330_desc *desc;
2679
2680 if (!first)
2681 return;
2682
2683 spin_lock_irqsave(&pl330->pool_lock, flags);
2684
2685 while (!list_empty(&first->node)) {
2686 desc = list_entry(first->node.next,
2687 struct dma_pl330_desc, node);
2688 list_move_tail(&desc->node, &pl330->desc_pool);
2689 }
2690
2691 list_move_tail(&first->node, &pl330->desc_pool);
2692
2693 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2694}
2695
2696static struct dma_async_tx_descriptor *
2697pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2698 unsigned int sg_len, enum dma_transfer_direction direction,
2699 unsigned long flg, void *context)
2700{
2701 struct dma_pl330_desc *first, *desc = NULL;
2702 struct dma_pl330_chan *pch = to_pchan(chan);
2703 struct scatterlist *sg;
2704 int i;
2705
2706 if (unlikely(!pch || !sgl || !sg_len))
2707 return NULL;
2708
2709 if (!pl330_prep_slave_fifo(pch, direction))
2710 return NULL;
2711
2712 first = NULL;
2713
2714 for_each_sg(sgl, sg, sg_len, i) {
2715
2716 desc = pl330_get_desc(pch);
2717 if (!desc) {
2718 struct pl330_dmac *pl330 = pch->dmac;
2719
2720 dev_err(pch->dmac->ddma.dev,
2721 "%s:%d Unable to fetch desc\n",
2722 __func__, __LINE__);
2723 __pl330_giveback_desc(pl330, first);
2724
2725 return NULL;
2726 }
2727
2728 if (!first)
2729 first = desc;
2730 else
2731 list_add_tail(&desc->node, &first->node);
2732
2733 if (direction == DMA_MEM_TO_DEV) {
2734 desc->rqcfg.src_inc = 1;
2735 desc->rqcfg.dst_inc = 0;
2736 fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg),
2737 sg_dma_len(sg));
2738 } else {
2739 desc->rqcfg.src_inc = 0;
2740 desc->rqcfg.dst_inc = 1;
2741 fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma,
2742 sg_dma_len(sg));
2743 }
2744
2745 desc->rqcfg.brst_size = pch->burst_sz;
2746 desc->rqcfg.brst_len = 1;
2747 desc->rqtype = direction;
2748 desc->bytes_requested = sg_dma_len(sg);
2749 }
2750
2751 /* Return the last desc in the chain */
2752 desc->txd.flags = flg;
2753 return &desc->txd;
2754}
2755
2756static irqreturn_t pl330_irq_handler(int irq, void *data)
2757{
2758 if (pl330_update(data))
2759 return IRQ_HANDLED;
2760 else
2761 return IRQ_NONE;
2762}
2763
2764#define PL330_DMA_BUSWIDTHS \
2765 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2766 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2767 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2768 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2769 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2770
2771/*
2772 * Runtime PM callbacks are provided by amba/bus.c driver.
2773 *
2774 * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
2775 * bus driver will only disable/enable the clock in runtime PM callbacks.
2776 */
2777static int __maybe_unused pl330_suspend(struct device *dev)
2778{
2779 struct amba_device *pcdev = to_amba_device(dev);
2780
2781 pm_runtime_disable(dev);
2782
2783 if (!pm_runtime_status_suspended(dev)) {
2784 /* amba did not disable the clock */
2785 amba_pclk_disable(pcdev);
2786 }
2787 amba_pclk_unprepare(pcdev);
2788
2789 return 0;
2790}
2791
2792static int __maybe_unused pl330_resume(struct device *dev)
2793{
2794 struct amba_device *pcdev = to_amba_device(dev);
2795 int ret;
2796
2797 ret = amba_pclk_prepare(pcdev);
2798 if (ret)
2799 return ret;
2800
2801 if (!pm_runtime_status_suspended(dev))
2802 ret = amba_pclk_enable(pcdev);
2803
2804 pm_runtime_enable(dev);
2805
2806 return ret;
2807}
2808
2809static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
2810
2811static int
2812pl330_probe(struct amba_device *adev, const struct amba_id *id)
2813{
2814 struct pl330_config *pcfg;
2815 struct pl330_dmac *pl330;
2816 struct dma_pl330_chan *pch, *_p;
2817 struct dma_device *pd;
2818 struct resource *res;
2819 int i, ret, irq;
2820 int num_chan;
2821 struct device_node *np = adev->dev.of_node;
2822
2823 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2824 if (ret)
2825 return ret;
2826
2827 /* Allocate a new DMAC and its Channels */
2828 pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
2829 if (!pl330)
2830 return -ENOMEM;
2831
2832 pd = &pl330->ddma;
2833 pd->dev = &adev->dev;
2834
2835 pl330->mcbufsz = 0;
2836
2837 /* get quirk */
2838 for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
2839 if (of_property_read_bool(np, of_quirks[i].quirk))
2840 pl330->quirks |= of_quirks[i].id;
2841
2842 res = &adev->res;
2843 pl330->base = devm_ioremap_resource(&adev->dev, res);
2844 if (IS_ERR(pl330->base))
2845 return PTR_ERR(pl330->base);
2846
2847 amba_set_drvdata(adev, pl330);
2848
2849 for (i = 0; i < AMBA_NR_IRQS; i++) {
2850 irq = adev->irq[i];
2851 if (irq) {
2852 ret = devm_request_irq(&adev->dev, irq,
2853 pl330_irq_handler, 0,
2854 dev_name(&adev->dev), pl330);
2855 if (ret)
2856 return ret;
2857 } else {
2858 break;
2859 }
2860 }
2861
2862 pcfg = &pl330->pcfg;
2863
2864 pcfg->periph_id = adev->periphid;
2865 ret = pl330_add(pl330);
2866 if (ret)
2867 return ret;
2868
2869 INIT_LIST_HEAD(&pl330->desc_pool);
2870 spin_lock_init(&pl330->pool_lock);
2871
2872 /* Create a descriptor pool of default size */
2873 if (!add_desc(&pl330->desc_pool, &pl330->pool_lock,
2874 GFP_KERNEL, NR_DEFAULT_DESC))
2875 dev_warn(&adev->dev, "unable to allocate desc\n");
2876
2877 INIT_LIST_HEAD(&pd->channels);
2878
2879 /* Initialize channel parameters */
2880 num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
2881
2882 pl330->num_peripherals = num_chan;
2883
2884 pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2885 if (!pl330->peripherals) {
2886 ret = -ENOMEM;
2887 goto probe_err2;
2888 }
2889
2890 for (i = 0; i < num_chan; i++) {
2891 pch = &pl330->peripherals[i];
2892
2893 pch->chan.private = adev->dev.of_node;
2894 INIT_LIST_HEAD(&pch->submitted_list);
2895 INIT_LIST_HEAD(&pch->work_list);
2896 INIT_LIST_HEAD(&pch->completed_list);
2897 spin_lock_init(&pch->lock);
2898 pch->thread = NULL;
2899 pch->chan.device = pd;
2900 pch->dmac = pl330;
2901 pch->dir = DMA_NONE;
2902
2903 /* Add the channel to the DMAC list */
2904 list_add_tail(&pch->chan.device_node, &pd->channels);
2905 }
2906
2907 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
2908 if (pcfg->num_peri) {
2909 dma_cap_set(DMA_SLAVE, pd->cap_mask);
2910 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
2911 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
2912 }
2913
2914 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
2915 pd->device_free_chan_resources = pl330_free_chan_resources;
2916 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
2917 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
2918 pd->device_tx_status = pl330_tx_status;
2919 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2920 pd->device_config = pl330_config;
2921 pd->device_pause = pl330_pause;
2922 pd->device_terminate_all = pl330_terminate_all;
2923 pd->device_issue_pending = pl330_issue_pending;
2924 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
2925 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
2926 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2927 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2928 pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
2929 1 : PL330_MAX_BURST);
2930
2931 ret = dma_async_device_register(pd);
2932 if (ret) {
2933 dev_err(&adev->dev, "unable to register DMAC\n");
2934 goto probe_err3;
2935 }
2936
2937 if (adev->dev.of_node) {
2938 ret = of_dma_controller_register(adev->dev.of_node,
2939 of_dma_pl330_xlate, pl330);
2940 if (ret) {
2941 dev_err(&adev->dev,
2942 "unable to register DMA to the generic DT DMA helpers\n");
2943 }
2944 }
2945
2946 adev->dev.dma_parms = &pl330->dma_parms;
2947
2948 /*
2949 * This is the limit for transfers with a buswidth of 1, larger
2950 * buswidths will have larger limits.
2951 */
2952 ret = dma_set_max_seg_size(&adev->dev, 1900800);
2953 if (ret)
2954 dev_err(&adev->dev, "unable to set the seg size\n");
2955
2956
2957 dev_info(&adev->dev,
2958 "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
2959 dev_info(&adev->dev,
2960 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2961 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
2962 pcfg->num_peri, pcfg->num_events);
2963
2964 pm_runtime_irq_safe(&adev->dev);
2965 pm_runtime_use_autosuspend(&adev->dev);
2966 pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
2967 pm_runtime_mark_last_busy(&adev->dev);
2968 pm_runtime_put_autosuspend(&adev->dev);
2969
2970 return 0;
2971probe_err3:
2972 /* Idle the DMAC */
2973 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
2974 chan.device_node) {
2975
2976 /* Remove the channel */
2977 list_del(&pch->chan.device_node);
2978
2979 /* Flush the channel */
2980 if (pch->thread) {
2981 pl330_terminate_all(&pch->chan);
2982 pl330_free_chan_resources(&pch->chan);
2983 }
2984 }
2985probe_err2:
2986 pl330_del(pl330);
2987
2988 return ret;
2989}
2990
2991static int pl330_remove(struct amba_device *adev)
2992{
2993 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
2994 struct dma_pl330_chan *pch, *_p;
2995 int i, irq;
2996
2997 pm_runtime_get_noresume(pl330->ddma.dev);
2998
2999 if (adev->dev.of_node)
3000 of_dma_controller_free(adev->dev.of_node);
3001
3002 for (i = 0; i < AMBA_NR_IRQS; i++) {
3003 irq = adev->irq[i];
3004 if (irq)
3005 devm_free_irq(&adev->dev, irq, pl330);
3006 }
3007
3008 dma_async_device_unregister(&pl330->ddma);
3009
3010 /* Idle the DMAC */
3011 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3012 chan.device_node) {
3013
3014 /* Remove the channel */
3015 list_del(&pch->chan.device_node);
3016
3017 /* Flush the channel */
3018 if (pch->thread) {
3019 pl330_terminate_all(&pch->chan);
3020 pl330_free_chan_resources(&pch->chan);
3021 }
3022 }
3023
3024 pl330_del(pl330);
3025
3026 return 0;
3027}
3028
3029static const struct amba_id pl330_ids[] = {
3030 {
3031 .id = 0x00041330,
3032 .mask = 0x000fffff,
3033 },
3034 { 0, 0 },
3035};
3036
3037MODULE_DEVICE_TABLE(amba, pl330_ids);
3038
3039static struct amba_driver pl330_driver = {
3040 .drv = {
3041 .owner = THIS_MODULE,
3042 .name = "dma-pl330",
3043 .pm = &pl330_pm,
3044 },
3045 .id_table = pl330_ids,
3046 .probe = pl330_probe,
3047 .remove = pl330_remove,
3048};
3049
3050module_amba_driver(pl330_driver);
3051
3052MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
3053MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3054MODULE_LICENSE("GPL");