Loading...
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright 2013-2014 Freescale Semiconductor, Inc.
4 * Copyright 2018 Angelo Dureghello <angelo@sysam.it>
5 */
6#ifndef _FSL_EDMA_COMMON_H_
7#define _FSL_EDMA_COMMON_H_
8
9#include <linux/dma-direction.h>
10#include <linux/platform_device.h>
11#include "virt-dma.h"
12
13#define EDMA_CR_EDBG BIT(1)
14#define EDMA_CR_ERCA BIT(2)
15#define EDMA_CR_ERGA BIT(3)
16#define EDMA_CR_HOE BIT(4)
17#define EDMA_CR_HALT BIT(5)
18#define EDMA_CR_CLM BIT(6)
19#define EDMA_CR_EMLM BIT(7)
20#define EDMA_CR_ECX BIT(16)
21#define EDMA_CR_CX BIT(17)
22
23#define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
24#define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
25#define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
26#define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
27
28#define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
29#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
30#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
31#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
32
33#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
34#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
35#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
36
37#define EDMA_TCD_CSR_START BIT(0)
38#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
39#define EDMA_TCD_CSR_INT_HALF BIT(2)
40#define EDMA_TCD_CSR_D_REQ BIT(3)
41#define EDMA_TCD_CSR_E_SG BIT(4)
42#define EDMA_TCD_CSR_E_LINK BIT(5)
43#define EDMA_TCD_CSR_ACTIVE BIT(6)
44#define EDMA_TCD_CSR_DONE BIT(7)
45
46#define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0))
47#define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10)
48#define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30)
49#define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31)
50
51#define EDMAMUX_CHCFG_DIS 0x0
52#define EDMAMUX_CHCFG_ENBL 0x80
53#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
54
55#define DMAMUX_NR 2
56
57#define EDMA_TCD 0x1000
58
59#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
60 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
61 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
62 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
63
64#define EDMA_V3_CH_SBR_RD BIT(22)
65#define EDMA_V3_CH_SBR_WR BIT(21)
66#define EDMA_V3_CH_CSR_ERQ BIT(0)
67#define EDMA_V3_CH_CSR_EARQ BIT(1)
68#define EDMA_V3_CH_CSR_EEI BIT(2)
69#define EDMA_V3_CH_CSR_DONE BIT(30)
70#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
71
72enum fsl_edma_pm_state {
73 RUNNING = 0,
74 SUSPENDED,
75};
76
77struct fsl_edma_hw_tcd {
78 __le32 saddr;
79 __le16 soff;
80 __le16 attr;
81 __le32 nbytes;
82 __le32 slast;
83 __le32 daddr;
84 __le16 doff;
85 __le16 citer;
86 __le32 dlast_sga;
87 __le16 csr;
88 __le16 biter;
89};
90
91struct fsl_edma3_ch_reg {
92 __le32 ch_csr;
93 __le32 ch_es;
94 __le32 ch_int;
95 __le32 ch_sbr;
96 __le32 ch_pri;
97 __le32 ch_mux;
98 __le32 ch_mattr; /* edma4, reserved for edma3 */
99 __le32 ch_reserved;
100 struct fsl_edma_hw_tcd tcd;
101} __packed;
102
103/*
104 * These are iomem pointers, for both v32 and v64.
105 */
106struct edma_regs {
107 void __iomem *cr;
108 void __iomem *es;
109 void __iomem *erqh;
110 void __iomem *erql; /* aka erq on v32 */
111 void __iomem *eeih;
112 void __iomem *eeil; /* aka eei on v32 */
113 void __iomem *seei;
114 void __iomem *ceei;
115 void __iomem *serq;
116 void __iomem *cerq;
117 void __iomem *cint;
118 void __iomem *cerr;
119 void __iomem *ssrt;
120 void __iomem *cdne;
121 void __iomem *inth;
122 void __iomem *intl;
123 void __iomem *errh;
124 void __iomem *errl;
125};
126
127struct fsl_edma_sw_tcd {
128 dma_addr_t ptcd;
129 struct fsl_edma_hw_tcd *vtcd;
130};
131
132struct fsl_edma_chan {
133 struct virt_dma_chan vchan;
134 enum dma_status status;
135 enum fsl_edma_pm_state pm_state;
136 bool idle;
137 u32 slave_id;
138 struct fsl_edma_engine *edma;
139 struct fsl_edma_desc *edesc;
140 struct dma_slave_config cfg;
141 u32 attr;
142 bool is_sw;
143 struct dma_pool *tcd_pool;
144 dma_addr_t dma_dev_addr;
145 u32 dma_dev_size;
146 enum dma_data_direction dma_dir;
147 char chan_name[32];
148 struct fsl_edma_hw_tcd __iomem *tcd;
149 u32 real_count;
150 struct work_struct issue_worker;
151 struct platform_device *pdev;
152 struct device *pd_dev;
153 u32 srcid;
154 struct clk *clk;
155 int priority;
156 int hw_chanid;
157 int txirq;
158 bool is_rxchan;
159 bool is_remote;
160 bool is_multi_fifo;
161};
162
163struct fsl_edma_desc {
164 struct virt_dma_desc vdesc;
165 struct fsl_edma_chan *echan;
166 bool iscyclic;
167 enum dma_transfer_direction dirn;
168 unsigned int n_tcds;
169 struct fsl_edma_sw_tcd tcd[];
170};
171
172#define FSL_EDMA_DRV_HAS_DMACLK BIT(0)
173#define FSL_EDMA_DRV_MUX_SWAP BIT(1)
174#define FSL_EDMA_DRV_CONFIG32 BIT(2)
175#define FSL_EDMA_DRV_WRAP_IO BIT(3)
176#define FSL_EDMA_DRV_EDMA64 BIT(4)
177#define FSL_EDMA_DRV_HAS_PD BIT(5)
178#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
179#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
180/* imx8 QM audio edma remote local swapped */
181#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
182/* control and status register is in tcd address space, edma3 reg layout */
183#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
184#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
185#define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
186#define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
187/* Need clean CHn_CSR DONE before enable TCD's ESG */
188#define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
189/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
190#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
191
192#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
193 FSL_EDMA_DRV_BUS_8BYTE | \
194 FSL_EDMA_DRV_DEV_TO_DEV | \
195 FSL_EDMA_DRV_ALIGN_64BYTE | \
196 FSL_EDMA_DRV_CLEAR_DONE_E_SG | \
197 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
198
199#define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \
200 FSL_EDMA_DRV_BUS_8BYTE | \
201 FSL_EDMA_DRV_DEV_TO_DEV | \
202 FSL_EDMA_DRV_ALIGN_64BYTE | \
203 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
204
205struct fsl_edma_drvdata {
206 u32 dmamuxs; /* only used before v3 */
207 u32 chreg_off;
208 u32 chreg_space_sz;
209 u32 flags;
210 int (*setup_irq)(struct platform_device *pdev,
211 struct fsl_edma_engine *fsl_edma);
212};
213
214struct fsl_edma_engine {
215 struct dma_device dma_dev;
216 void __iomem *membase;
217 void __iomem *muxbase[DMAMUX_NR];
218 struct clk *muxclk[DMAMUX_NR];
219 struct clk *dmaclk;
220 struct clk *chclk;
221 struct mutex fsl_edma_mutex;
222 const struct fsl_edma_drvdata *drvdata;
223 u32 n_chans;
224 int txirq;
225 int errirq;
226 bool big_endian;
227 struct edma_regs regs;
228 u64 chan_masked;
229 struct fsl_edma_chan chans[] __counted_by(n_chans);
230};
231
232#define edma_read_tcdreg(chan, __name) \
233(sizeof(chan->tcd->__name) == sizeof(u32) ? \
234 edma_readl(chan->edma, &chan->tcd->__name) : \
235 edma_readw(chan->edma, &chan->tcd->__name))
236
237#define edma_write_tcdreg(chan, val, __name) \
238(sizeof(chan->tcd->__name) == sizeof(u32) ? \
239 edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \
240 edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name))
241
242#define edma_readl_chreg(chan, __name) \
243 edma_readl(chan->edma, \
244 (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
245
246#define edma_writel_chreg(chan, val, __name) \
247 edma_writel(chan->edma, val, \
248 (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
249
250/*
251 * R/W functions for big- or little-endian registers:
252 * The eDMA controller's endian is independent of the CPU core's endian.
253 * For the big-endian IP module, the offset for 8-bit or 16-bit registers
254 * should also be swapped opposite to that in little-endian IP.
255 */
256static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
257{
258 if (edma->big_endian)
259 return ioread32be(addr);
260 else
261 return ioread32(addr);
262}
263
264static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
265{
266 if (edma->big_endian)
267 return ioread16be(addr);
268 else
269 return ioread16(addr);
270}
271
272static inline void edma_writeb(struct fsl_edma_engine *edma,
273 u8 val, void __iomem *addr)
274{
275 /* swap the reg offset for these in big-endian mode */
276 if (edma->big_endian)
277 iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
278 else
279 iowrite8(val, addr);
280}
281
282static inline void edma_writew(struct fsl_edma_engine *edma,
283 u16 val, void __iomem *addr)
284{
285 /* swap the reg offset for these in big-endian mode */
286 if (edma->big_endian)
287 iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
288 else
289 iowrite16(val, addr);
290}
291
292static inline void edma_writel(struct fsl_edma_engine *edma,
293 u32 val, void __iomem *addr)
294{
295 if (edma->big_endian)
296 iowrite32be(val, addr);
297 else
298 iowrite32(val, addr);
299}
300
301static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
302{
303 return container_of(chan, struct fsl_edma_chan, vchan.chan);
304}
305
306static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
307{
308 return fsl_chan->edma->drvdata->flags;
309}
310
311static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
312{
313 return container_of(vd, struct fsl_edma_desc, vdesc);
314}
315
316static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
317{
318 fsl_chan->status = DMA_ERROR;
319 fsl_chan->idle = true;
320}
321
322void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
323void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
324void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
325 unsigned int slot, bool enable);
326void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
327int fsl_edma_terminate_all(struct dma_chan *chan);
328int fsl_edma_pause(struct dma_chan *chan);
329int fsl_edma_resume(struct dma_chan *chan);
330int fsl_edma_slave_config(struct dma_chan *chan,
331 struct dma_slave_config *cfg);
332enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
333 dma_cookie_t cookie, struct dma_tx_state *txstate);
334struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
335 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
336 size_t period_len, enum dma_transfer_direction direction,
337 unsigned long flags);
338struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
339 struct dma_chan *chan, struct scatterlist *sgl,
340 unsigned int sg_len, enum dma_transfer_direction direction,
341 unsigned long flags, void *context);
342struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(
343 struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
344 size_t len, unsigned long flags);
345void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
346void fsl_edma_issue_pending(struct dma_chan *chan);
347int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
348void fsl_edma_free_chan_resources(struct dma_chan *chan);
349void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
350void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
351
352#endif /* _FSL_EDMA_COMMON_H_ */
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright 2013-2014 Freescale Semiconductor, Inc.
4 * Copyright 2018 Angelo Dureghello <angelo@sysam.it>
5 */
6#ifndef _FSL_EDMA_COMMON_H_
7#define _FSL_EDMA_COMMON_H_
8
9#include <linux/dma-direction.h>
10#include <linux/platform_device.h>
11#include "virt-dma.h"
12
13#define EDMA_CR_EDBG BIT(1)
14#define EDMA_CR_ERCA BIT(2)
15#define EDMA_CR_ERGA BIT(3)
16#define EDMA_CR_HOE BIT(4)
17#define EDMA_CR_HALT BIT(5)
18#define EDMA_CR_CLM BIT(6)
19#define EDMA_CR_EMLM BIT(7)
20#define EDMA_CR_ECX BIT(16)
21#define EDMA_CR_CX BIT(17)
22
23#define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
24#define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
25#define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
26#define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
27
28#define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
29#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
30#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
31#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
32
33#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
34#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
35#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
36
37#define EDMA_TCD_CSR_START BIT(0)
38#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
39#define EDMA_TCD_CSR_INT_HALF BIT(2)
40#define EDMA_TCD_CSR_D_REQ BIT(3)
41#define EDMA_TCD_CSR_E_SG BIT(4)
42#define EDMA_TCD_CSR_E_LINK BIT(5)
43#define EDMA_TCD_CSR_ACTIVE BIT(6)
44#define EDMA_TCD_CSR_DONE BIT(7)
45
46#define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0))
47#define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10)
48#define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30)
49#define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31)
50
51#define EDMAMUX_CHCFG_DIS 0x0
52#define EDMAMUX_CHCFG_ENBL 0x80
53#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
54
55#define DMAMUX_NR 2
56
57#define EDMA_TCD 0x1000
58
59#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
60 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
61 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
62 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
63
64#define EDMA_V3_CH_SBR_RD BIT(22)
65#define EDMA_V3_CH_SBR_WR BIT(21)
66#define EDMA_V3_CH_CSR_ERQ BIT(0)
67#define EDMA_V3_CH_CSR_EARQ BIT(1)
68#define EDMA_V3_CH_CSR_EEI BIT(2)
69#define EDMA_V3_CH_CSR_DONE BIT(30)
70#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
71
72enum fsl_edma_pm_state {
73 RUNNING = 0,
74 SUSPENDED,
75};
76
77struct fsl_edma_hw_tcd {
78 __le32 saddr;
79 __le16 soff;
80 __le16 attr;
81 __le32 nbytes;
82 __le32 slast;
83 __le32 daddr;
84 __le16 doff;
85 __le16 citer;
86 __le32 dlast_sga;
87 __le16 csr;
88 __le16 biter;
89};
90
91struct fsl_edma_hw_tcd64 {
92 __le64 saddr;
93 __le16 soff;
94 __le16 attr;
95 __le32 nbytes;
96 __le64 slast;
97 __le64 daddr;
98 __le64 dlast_sga;
99 __le16 doff;
100 __le16 citer;
101 __le16 csr;
102 __le16 biter;
103} __packed;
104
105struct fsl_edma3_ch_reg {
106 __le32 ch_csr;
107 __le32 ch_es;
108 __le32 ch_int;
109 __le32 ch_sbr;
110 __le32 ch_pri;
111 __le32 ch_mux;
112 __le32 ch_mattr; /* edma4, reserved for edma3 */
113 __le32 ch_reserved;
114 union {
115 struct fsl_edma_hw_tcd tcd;
116 struct fsl_edma_hw_tcd64 tcd64;
117 };
118} __packed;
119
120/*
121 * These are iomem pointers, for both v32 and v64.
122 */
123struct edma_regs {
124 void __iomem *cr;
125 void __iomem *es;
126 void __iomem *erqh;
127 void __iomem *erql; /* aka erq on v32 */
128 void __iomem *eeih;
129 void __iomem *eeil; /* aka eei on v32 */
130 void __iomem *seei;
131 void __iomem *ceei;
132 void __iomem *serq;
133 void __iomem *cerq;
134 void __iomem *cint;
135 void __iomem *cerr;
136 void __iomem *ssrt;
137 void __iomem *cdne;
138 void __iomem *inth;
139 void __iomem *intl;
140 void __iomem *errh;
141 void __iomem *errl;
142};
143
144struct fsl_edma_sw_tcd {
145 dma_addr_t ptcd;
146 void *vtcd;
147};
148
149struct fsl_edma_chan {
150 struct virt_dma_chan vchan;
151 enum dma_status status;
152 enum fsl_edma_pm_state pm_state;
153 struct fsl_edma_engine *edma;
154 struct fsl_edma_desc *edesc;
155 struct dma_slave_config cfg;
156 u32 attr;
157 bool is_sw;
158 struct dma_pool *tcd_pool;
159 dma_addr_t dma_dev_addr;
160 u32 dma_dev_size;
161 enum dma_data_direction dma_dir;
162 char chan_name[32];
163 void __iomem *tcd;
164 void __iomem *mux_addr;
165 u32 real_count;
166 struct work_struct issue_worker;
167 struct platform_device *pdev;
168 struct device *pd_dev;
169 struct device_link *pd_dev_link;
170 u32 srcid;
171 struct clk *clk;
172 int priority;
173 int hw_chanid;
174 int txirq;
175 irqreturn_t (*irq_handler)(int irq, void *dev_id);
176 bool is_rxchan;
177 bool is_remote;
178 bool is_multi_fifo;
179};
180
181struct fsl_edma_desc {
182 struct virt_dma_desc vdesc;
183 struct fsl_edma_chan *echan;
184 bool iscyclic;
185 enum dma_transfer_direction dirn;
186 unsigned int n_tcds;
187 struct fsl_edma_sw_tcd tcd[];
188};
189
190#define FSL_EDMA_DRV_HAS_DMACLK BIT(0)
191#define FSL_EDMA_DRV_MUX_SWAP BIT(1)
192#define FSL_EDMA_DRV_CONFIG32 BIT(2)
193#define FSL_EDMA_DRV_WRAP_IO BIT(3)
194#define FSL_EDMA_DRV_EDMA64 BIT(4)
195#define FSL_EDMA_DRV_HAS_PD BIT(5)
196#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
197#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
198#define FSL_EDMA_DRV_MEM_REMOTE BIT(8)
199/* control and status register is in tcd address space, edma3 reg layout */
200#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
201#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
202#define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
203#define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
204/* Need clean CHn_CSR DONE before enable TCD's ESG */
205#define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
206/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
207#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
208#define FSL_EDMA_DRV_TCD64 BIT(15)
209
210#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
211 FSL_EDMA_DRV_BUS_8BYTE | \
212 FSL_EDMA_DRV_DEV_TO_DEV | \
213 FSL_EDMA_DRV_ALIGN_64BYTE | \
214 FSL_EDMA_DRV_CLEAR_DONE_E_SG | \
215 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
216
217#define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \
218 FSL_EDMA_DRV_BUS_8BYTE | \
219 FSL_EDMA_DRV_DEV_TO_DEV | \
220 FSL_EDMA_DRV_ALIGN_64BYTE | \
221 FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
222
223struct fsl_edma_drvdata {
224 u32 dmamuxs; /* only used before v3 */
225 u32 chreg_off;
226 u32 chreg_space_sz;
227 u32 flags;
228 u32 mux_off; /* channel mux register offset */
229 u32 mux_skip; /* how much skip for each channel */
230 int (*setup_irq)(struct platform_device *pdev,
231 struct fsl_edma_engine *fsl_edma);
232};
233
234struct fsl_edma_engine {
235 struct dma_device dma_dev;
236 void __iomem *membase;
237 void __iomem *muxbase[DMAMUX_NR];
238 struct clk *muxclk[DMAMUX_NR];
239 struct clk *dmaclk;
240 struct mutex fsl_edma_mutex;
241 const struct fsl_edma_drvdata *drvdata;
242 u32 n_chans;
243 int txirq;
244 int errirq;
245 bool big_endian;
246 struct edma_regs regs;
247 u64 chan_masked;
248 struct fsl_edma_chan chans[] __counted_by(n_chans);
249};
250
251static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
252{
253 return fsl_chan->edma->drvdata->flags;
254}
255
256#define edma_read_tcdreg_c(chan, _tcd, __name) \
257_Generic(((_tcd)->__name), \
258 __iomem __le64 : edma_readq(chan->edma, &(_tcd)->__name), \
259 __iomem __le32 : edma_readl(chan->edma, &(_tcd)->__name), \
260 __iomem __le16 : edma_readw(chan->edma, &(_tcd)->__name) \
261 )
262
263#define edma_read_tcdreg(chan, __name) \
264((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \
265 edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd64 __iomem *)chan->tcd), __name) : \
266 edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \
267)
268
269#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
270_Generic((_tcd->__name), \
271 __iomem __le64 : edma_writeq(chan->edma, (u64 __force)(_val), &_tcd->__name), \
272 __iomem __le32 : edma_writel(chan->edma, (u32 __force)(_val), &_tcd->__name), \
273 __iomem __le16 : edma_writew(chan->edma, (u16 __force)(_val), &_tcd->__name), \
274 __iomem u8 : edma_writeb(chan->edma, _val, &_tcd->__name) \
275 )
276
277#define edma_write_tcdreg(chan, val, __name) \
278do { \
279 struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
280 struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
281 \
282 if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
283 edma_write_tcdreg_c(chan, tcd64_r, val, __name); \
284 else \
285 edma_write_tcdreg_c(chan, tcd_r, val, __name); \
286} while (0)
287
288#define edma_cp_tcd_to_reg(chan, __tcd, __name) \
289do { \
290 struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
291 struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
292 struct fsl_edma_hw_tcd64 *tcd64_m = (struct fsl_edma_hw_tcd64 *)__tcd; \
293 struct fsl_edma_hw_tcd *tcd_m = (struct fsl_edma_hw_tcd *)__tcd; \
294 \
295 if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
296 edma_write_tcdreg_c(chan, tcd64_r, tcd64_m->__name, __name); \
297 else \
298 edma_write_tcdreg_c(chan, tcd_r, tcd_m->__name, __name); \
299} while (0)
300
301#define edma_readl_chreg(chan, __name) \
302 edma_readl(chan->edma, \
303 (void __iomem *)&(container_of(((__force void *)chan->tcd),\
304 struct fsl_edma3_ch_reg, tcd)->__name))
305
306#define edma_writel_chreg(chan, val, __name) \
307 edma_writel(chan->edma, val, \
308 (void __iomem *)&(container_of(((__force void *)chan->tcd),\
309 struct fsl_edma3_ch_reg, tcd)->__name))
310
311#define fsl_edma_get_tcd(_chan, _tcd, _field) \
312(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? (((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
313 (((struct fsl_edma_hw_tcd *)_tcd)->_field))
314
315#define fsl_edma_le_to_cpu(x) \
316_Generic((x), \
317 __le64 : le64_to_cpu((x)), \
318 __le32 : le32_to_cpu((x)), \
319 __le16 : le16_to_cpu((x)) \
320)
321
322#define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \
323(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \
324 fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
325 fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field))
326
327#define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \
328_Generic(((_tcd)->_field), \
329 __le64 : (_tcd)->_field = cpu_to_le64(_val), \
330 __le32 : (_tcd)->_field = cpu_to_le32(_val), \
331 __le16 : (_tcd)->_field = cpu_to_le16(_val) \
332)
333
334#define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \
335do { \
336 if (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64) \
337 fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd64 *)_tcd, _val, _field); \
338 else \
339 fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \
340} while (0)
341
342/* Need after struct defination */
343#include "fsl-edma-trace.h"
344
345/*
346 * R/W functions for big- or little-endian registers:
347 * The eDMA controller's endian is independent of the CPU core's endian.
348 * For the big-endian IP module, the offset for 8-bit or 16-bit registers
349 * should also be swapped opposite to that in little-endian IP.
350 */
351static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr)
352{
353 u64 l, h;
354
355 if (edma->big_endian) {
356 l = ioread32be(addr);
357 h = ioread32be(addr + 4);
358 } else {
359 l = ioread32(addr);
360 h = ioread32(addr + 4);
361 }
362
363 trace_edma_readl(edma, addr, l);
364 trace_edma_readl(edma, addr + 4, h);
365
366 return (h << 32) | l;
367}
368
369static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
370{
371 u32 val;
372
373 if (edma->big_endian)
374 val = ioread32be(addr);
375 else
376 val = ioread32(addr);
377
378 trace_edma_readl(edma, addr, val);
379
380 return val;
381}
382
383static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
384{
385 u16 val;
386
387 if (edma->big_endian)
388 val = ioread16be(addr);
389 else
390 val = ioread16(addr);
391
392 trace_edma_readw(edma, addr, val);
393
394 return val;
395}
396
397static inline void edma_writeb(struct fsl_edma_engine *edma,
398 u8 val, void __iomem *addr)
399{
400 /* swap the reg offset for these in big-endian mode */
401 if (edma->big_endian)
402 iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
403 else
404 iowrite8(val, addr);
405
406 trace_edma_writeb(edma, addr, val);
407}
408
409static inline void edma_writew(struct fsl_edma_engine *edma,
410 u16 val, void __iomem *addr)
411{
412 /* swap the reg offset for these in big-endian mode */
413 if (edma->big_endian)
414 iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
415 else
416 iowrite16(val, addr);
417
418 trace_edma_writew(edma, addr, val);
419}
420
421static inline void edma_writel(struct fsl_edma_engine *edma,
422 u32 val, void __iomem *addr)
423{
424 if (edma->big_endian)
425 iowrite32be(val, addr);
426 else
427 iowrite32(val, addr);
428
429 trace_edma_writel(edma, addr, val);
430}
431
432static inline void edma_writeq(struct fsl_edma_engine *edma,
433 u64 val, void __iomem *addr)
434{
435 if (edma->big_endian) {
436 iowrite32be(val & 0xFFFFFFFF, addr);
437 iowrite32be(val >> 32, addr + 4);
438 } else {
439 iowrite32(val & 0xFFFFFFFF, addr);
440 iowrite32(val >> 32, addr + 4);
441 }
442
443 trace_edma_writel(edma, addr, val & 0xFFFFFFFF);
444 trace_edma_writel(edma, addr + 4, val >> 32);
445}
446
447static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
448{
449 return container_of(chan, struct fsl_edma_chan, vchan.chan);
450}
451
452static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
453{
454 return container_of(vd, struct fsl_edma_desc, vdesc);
455}
456
457static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
458{
459 fsl_chan->status = DMA_ERROR;
460}
461
462void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
463void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
464void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
465 unsigned int slot, bool enable);
466void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
467int fsl_edma_terminate_all(struct dma_chan *chan);
468int fsl_edma_pause(struct dma_chan *chan);
469int fsl_edma_resume(struct dma_chan *chan);
470int fsl_edma_slave_config(struct dma_chan *chan,
471 struct dma_slave_config *cfg);
472enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
473 dma_cookie_t cookie, struct dma_tx_state *txstate);
474struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
475 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
476 size_t period_len, enum dma_transfer_direction direction,
477 unsigned long flags);
478struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
479 struct dma_chan *chan, struct scatterlist *sgl,
480 unsigned int sg_len, enum dma_transfer_direction direction,
481 unsigned long flags, void *context);
482struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(
483 struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
484 size_t len, unsigned long flags);
485void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
486void fsl_edma_issue_pending(struct dma_chan *chan);
487int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
488void fsl_edma_free_chan_resources(struct dma_chan *chan);
489void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
490void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
491
492#endif /* _FSL_EDMA_COMMON_H_ */