Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2//
  3// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
  4// Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
  5
  6#include <linux/dmapool.h>
  7#include <linux/module.h>
  8#include <linux/slab.h>
  9#include <linux/dma-mapping.h>
 10#include <linux/pm_runtime.h>
 11#include <linux/pm_domain.h>
 12
 13#include "fsl-edma-common.h"
 14
 15#define EDMA_CR			0x00
 16#define EDMA_ES			0x04
 17#define EDMA_ERQ		0x0C
 18#define EDMA_EEI		0x14
 19#define EDMA_SERQ		0x1B
 20#define EDMA_CERQ		0x1A
 21#define EDMA_SEEI		0x19
 22#define EDMA_CEEI		0x18
 23#define EDMA_CINT		0x1F
 24#define EDMA_CERR		0x1E
 25#define EDMA_SSRT		0x1D
 26#define EDMA_CDNE		0x1C
 27#define EDMA_INTR		0x24
 28#define EDMA_ERR		0x2C
 29
 30#define EDMA64_ERQH		0x08
 31#define EDMA64_EEIH		0x10
 32#define EDMA64_SERQ		0x18
 33#define EDMA64_CERQ		0x19
 34#define EDMA64_SEEI		0x1a
 35#define EDMA64_CEEI		0x1b
 36#define EDMA64_CINT		0x1c
 37#define EDMA64_CERR		0x1d
 38#define EDMA64_SSRT		0x1e
 39#define EDMA64_CDNE		0x1f
 40#define EDMA64_INTH		0x20
 41#define EDMA64_INTL		0x24
 42#define EDMA64_ERRH		0x28
 43#define EDMA64_ERRL		0x2c
 44
 45void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
 46{
 47	spin_lock(&fsl_chan->vchan.lock);
 48
 49	if (!fsl_chan->edesc) {
 50		/* terminate_all called before */
 51		spin_unlock(&fsl_chan->vchan.lock);
 52		return;
 53	}
 54
 55	if (!fsl_chan->edesc->iscyclic) {
 56		list_del(&fsl_chan->edesc->vdesc.node);
 57		vchan_cookie_complete(&fsl_chan->edesc->vdesc);
 58		fsl_chan->edesc = NULL;
 59		fsl_chan->status = DMA_COMPLETE;
 60		fsl_chan->idle = true;
 61	} else {
 62		vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
 63	}
 64
 65	if (!fsl_chan->edesc)
 66		fsl_edma_xfer_desc(fsl_chan);
 67
 68	spin_unlock(&fsl_chan->vchan.lock);
 69}
 70
 71static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
 72{
 73	u32 val, flags;
 74
 75	flags = fsl_edma_drvflags(fsl_chan);
 76	val = edma_readl_chreg(fsl_chan, ch_sbr);
 77	/* Remote/local swapped wrongly on iMX8 QM Audio edma */
 78	if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
 79		if (!fsl_chan->is_rxchan)
 80			val |= EDMA_V3_CH_SBR_RD;
 81		else
 82			val |= EDMA_V3_CH_SBR_WR;
 83	} else {
 84		if (fsl_chan->is_rxchan)
 85			val |= EDMA_V3_CH_SBR_RD;
 86		else
 87			val |= EDMA_V3_CH_SBR_WR;
 88	}
 89
 90	if (fsl_chan->is_remote)
 91		val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
 92
 93	edma_writel_chreg(fsl_chan, val, ch_sbr);
 94
 95	if (flags & FSL_EDMA_DRV_HAS_CHMUX) {
 96		/*
 97		 * ch_mux: With the exception of 0, attempts to write a value
 98		 * already in use will be forced to 0.
 99		 */
100		if (!edma_readl_chreg(fsl_chan, ch_mux))
101			edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
102	}
103
104	val = edma_readl_chreg(fsl_chan, ch_csr);
105	val |= EDMA_V3_CH_CSR_ERQ;
106	edma_writel_chreg(fsl_chan, val, ch_csr);
107}
108
109static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
110{
111	struct edma_regs *regs = &fsl_chan->edma->regs;
112	u32 ch = fsl_chan->vchan.chan.chan_id;
113
114	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
115		return fsl_edma3_enable_request(fsl_chan);
116
117	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
118		edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
119		edma_writeb(fsl_chan->edma, ch, regs->serq);
120	} else {
121		/* ColdFire is big endian, and accesses natively
122		 * big endian I/O peripherals
123		 */
124		iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
125		iowrite8(ch, regs->serq);
126	}
127}
128
129static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
130{
131	u32 val = edma_readl_chreg(fsl_chan, ch_csr);
132	u32 flags;
133
134	flags = fsl_edma_drvflags(fsl_chan);
135
136	if (flags & FSL_EDMA_DRV_HAS_CHMUX)
137		edma_writel_chreg(fsl_chan, 0, ch_mux);
138
139	val &= ~EDMA_V3_CH_CSR_ERQ;
140	edma_writel_chreg(fsl_chan, val, ch_csr);
141}
142
143void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
144{
145	struct edma_regs *regs = &fsl_chan->edma->regs;
146	u32 ch = fsl_chan->vchan.chan.chan_id;
147
148	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
149		return fsl_edma3_disable_request(fsl_chan);
150
151	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
152		edma_writeb(fsl_chan->edma, ch, regs->cerq);
153		edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
154	} else {
155		/* ColdFire is big endian, and accesses natively
156		 * big endian I/O peripherals
157		 */
158		iowrite8(ch, regs->cerq);
159		iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
160	}
161}
 
162
163static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
164			   u32 off, u32 slot, bool enable)
165{
166	u8 val8;
167
168	if (enable)
169		val8 = EDMAMUX_CHCFG_ENBL | slot;
170	else
171		val8 = EDMAMUX_CHCFG_DIS;
172
173	iowrite8(val8, addr + off);
174}
175
176static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
177			    u32 off, u32 slot, bool enable)
178{
179	u32 val;
180
181	if (enable)
182		val = EDMAMUX_CHCFG_ENBL << 24 | slot;
183	else
184		val = EDMAMUX_CHCFG_DIS;
185
186	iowrite32(val, addr + off * 4);
187}
188
189void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
190		       unsigned int slot, bool enable)
191{
192	u32 ch = fsl_chan->vchan.chan.chan_id;
193	void __iomem *muxaddr;
194	unsigned int chans_per_mux, ch_off;
195	int endian_diff[4] = {3, 1, -1, -3};
196	u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
197
198	if (!dmamux_nr)
199		return;
200
201	chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
202	ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
203
204	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
205		ch_off += endian_diff[ch_off % 4];
206
207	muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
208	slot = EDMAMUX_CHCFG_SOURCE(slot);
209
210	if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
211		mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
212	else
213		mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
214}
 
215
216static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
217{
218	u32 val;
219
220	if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
221		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
222
223	val = ffs(addr_width) - 1;
224	return val | (val << 8);
 
 
 
 
 
225}
226
227void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
228{
229	struct fsl_edma_desc *fsl_desc;
230	int i;
231
232	fsl_desc = to_fsl_edma_desc(vdesc);
233	for (i = 0; i < fsl_desc->n_tcds; i++)
234		dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
235			      fsl_desc->tcd[i].ptcd);
236	kfree(fsl_desc);
237}
 
238
239int fsl_edma_terminate_all(struct dma_chan *chan)
240{
241	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
242	unsigned long flags;
243	LIST_HEAD(head);
244
245	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
246	fsl_edma_disable_request(fsl_chan);
247	fsl_chan->edesc = NULL;
248	fsl_chan->idle = true;
249	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
250	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
251	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
252
253	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
254		pm_runtime_allow(fsl_chan->pd_dev);
255
256	return 0;
257}
 
258
259int fsl_edma_pause(struct dma_chan *chan)
260{
261	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
262	unsigned long flags;
263
264	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
265	if (fsl_chan->edesc) {
266		fsl_edma_disable_request(fsl_chan);
267		fsl_chan->status = DMA_PAUSED;
268		fsl_chan->idle = true;
269	}
270	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
271	return 0;
272}
 
273
274int fsl_edma_resume(struct dma_chan *chan)
275{
276	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
277	unsigned long flags;
278
279	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
280	if (fsl_chan->edesc) {
281		fsl_edma_enable_request(fsl_chan);
282		fsl_chan->status = DMA_IN_PROGRESS;
283		fsl_chan->idle = false;
284	}
285	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
286	return 0;
287}
 
288
289static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
290{
291	if (fsl_chan->dma_dir != DMA_NONE)
292		dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
293				   fsl_chan->dma_dev_addr,
294				   fsl_chan->dma_dev_size,
295				   fsl_chan->dma_dir, 0);
296	fsl_chan->dma_dir = DMA_NONE;
297}
298
299static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
300				    enum dma_transfer_direction dir)
301{
302	struct device *dev = fsl_chan->vchan.chan.device->dev;
303	enum dma_data_direction dma_dir;
304	phys_addr_t addr = 0;
305	u32 size = 0;
306
307	switch (dir) {
308	case DMA_MEM_TO_DEV:
309		dma_dir = DMA_FROM_DEVICE;
310		addr = fsl_chan->cfg.dst_addr;
311		size = fsl_chan->cfg.dst_maxburst;
312		break;
313	case DMA_DEV_TO_MEM:
314		dma_dir = DMA_TO_DEVICE;
315		addr = fsl_chan->cfg.src_addr;
316		size = fsl_chan->cfg.src_maxburst;
317		break;
318	default:
319		dma_dir = DMA_NONE;
320		break;
321	}
322
323	/* Already mapped for this config? */
324	if (fsl_chan->dma_dir == dma_dir)
325		return true;
326
327	fsl_edma_unprep_slave_dma(fsl_chan);
328
329	fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
330	if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
331		return false;
332	fsl_chan->dma_dev_size = size;
333	fsl_chan->dma_dir = dma_dir;
334
335	return true;
336}
337
338int fsl_edma_slave_config(struct dma_chan *chan,
339				 struct dma_slave_config *cfg)
340{
341	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
342
343	memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
344	fsl_edma_unprep_slave_dma(fsl_chan);
345
346	return 0;
347}
 
348
349static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
350		struct virt_dma_desc *vdesc, bool in_progress)
351{
352	struct fsl_edma_desc *edesc = fsl_chan->edesc;
 
 
353	enum dma_transfer_direction dir = edesc->dirn;
354	dma_addr_t cur_addr, dma_addr;
355	size_t len, size;
356	u32 nbytes = 0;
357	int i;
358
359	/* calculate the total size in this desc */
360	for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
361		nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
362		if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
363			nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
364		len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
365	}
366
367	if (!in_progress)
368		return len;
369
370	if (dir == DMA_MEM_TO_DEV)
371		cur_addr = edma_read_tcdreg(fsl_chan, saddr);
372	else
373		cur_addr = edma_read_tcdreg(fsl_chan, daddr);
374
375	/* figure out the finished and calculate the residue */
376	for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
377		nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
378		if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
379			nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
380
381		size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
382
383		if (dir == DMA_MEM_TO_DEV)
384			dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
385		else
386			dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
387
388		len -= size;
389		if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
390			len += dma_addr + size - cur_addr;
391			break;
392		}
393	}
394
395	return len;
396}
397
398enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
399		dma_cookie_t cookie, struct dma_tx_state *txstate)
400{
401	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
402	struct virt_dma_desc *vdesc;
403	enum dma_status status;
404	unsigned long flags;
405
406	status = dma_cookie_status(chan, cookie, txstate);
407	if (status == DMA_COMPLETE)
408		return status;
409
410	if (!txstate)
411		return fsl_chan->status;
412
413	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
414	vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
415	if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
416		txstate->residue =
417			fsl_edma_desc_residue(fsl_chan, vdesc, true);
418	else if (vdesc)
419		txstate->residue =
420			fsl_edma_desc_residue(fsl_chan, vdesc, false);
421	else
422		txstate->residue = 0;
423
424	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
425
426	return fsl_chan->status;
427}
 
428
429static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
430				  struct fsl_edma_hw_tcd *tcd)
431{
432	u16 csr = 0;
 
 
433
434	/*
435	 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
436	 * endian format. However, we need to load the TCD registers in
437	 * big- or little-endian obeying the eDMA engine model endian,
438	 * and this is performed from specific edma_write functions
439	 */
440	edma_write_tcdreg(fsl_chan, 0, csr);
441
442	edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
443	edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
444
445	edma_write_tcdreg(fsl_chan, tcd->attr, attr);
446	edma_write_tcdreg(fsl_chan, tcd->soff, soff);
447
448	edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
449	edma_write_tcdreg(fsl_chan, tcd->slast, slast);
450
451	edma_write_tcdreg(fsl_chan, tcd->citer, citer);
452	edma_write_tcdreg(fsl_chan, tcd->biter, biter);
453	edma_write_tcdreg(fsl_chan, tcd->doff, doff);
454
455	edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
456
457	csr = le16_to_cpu(tcd->csr);
458
459	if (fsl_chan->is_sw) {
460		csr |= EDMA_TCD_CSR_START;
461		tcd->csr = cpu_to_le16(csr);
462	}
463
464	/*
465	 * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3
466	 * eDMAv4 have not such requirement.
467	 * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4.
468	 */
469	if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) &&
470		(csr & EDMA_TCD_CSR_E_SG)) ||
471	    ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) &&
472		(csr & EDMA_TCD_CSR_E_LINK)))
473		edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
474
 
 
475
476	edma_write_tcdreg(fsl_chan, tcd->csr, csr);
477}
478
479static inline
480void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
481		       struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
482		       u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
483		       u16 biter, u16 doff, u32 dlast_sga, bool major_int,
484		       bool disable_req, bool enable_sg)
485{
486	struct dma_slave_config *cfg = &fsl_chan->cfg;
487	u16 csr = 0;
488	u32 burst;
489
490	/*
491	 * eDMA hardware SGs require the TCDs to be stored in little
492	 * endian format irrespective of the register endian model.
493	 * So we put the value in little endian in memory, waiting
494	 * for fsl_edma_set_tcd_regs doing the swap.
495	 */
496	tcd->saddr = cpu_to_le32(src);
497	tcd->daddr = cpu_to_le32(dst);
498
499	tcd->attr = cpu_to_le16(attr);
500
501	tcd->soff = cpu_to_le16(soff);
502
503	if (fsl_chan->is_multi_fifo) {
504		/* set mloff to support multiple fifo */
505		burst = cfg->direction == DMA_DEV_TO_MEM ?
506				cfg->src_maxburst : cfg->dst_maxburst;
507		nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
508		/* enable DMLOE/SMLOE */
509		if (cfg->direction == DMA_MEM_TO_DEV) {
510			nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
511			nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
512		} else {
513			nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
514			nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
515		}
516	}
517
518	tcd->nbytes = cpu_to_le32(nbytes);
519	tcd->slast = cpu_to_le32(slast);
520
521	tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
522	tcd->doff = cpu_to_le16(doff);
523
524	tcd->dlast_sga = cpu_to_le32(dlast_sga);
525
526	tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
527	if (major_int)
528		csr |= EDMA_TCD_CSR_INT_MAJOR;
529
530	if (disable_req)
531		csr |= EDMA_TCD_CSR_D_REQ;
532
533	if (enable_sg)
534		csr |= EDMA_TCD_CSR_E_SG;
535
536	if (fsl_chan->is_rxchan)
537		csr |= EDMA_TCD_CSR_ACTIVE;
538
539	if (fsl_chan->is_sw)
540		csr |= EDMA_TCD_CSR_START;
541
542	tcd->csr = cpu_to_le16(csr);
543}
544
545static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
546		int sg_len)
547{
548	struct fsl_edma_desc *fsl_desc;
549	int i;
550
551	fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
552	if (!fsl_desc)
553		return NULL;
554
555	fsl_desc->echan = fsl_chan;
556	fsl_desc->n_tcds = sg_len;
557	for (i = 0; i < sg_len; i++) {
558		fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
559					GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
560		if (!fsl_desc->tcd[i].vtcd)
561			goto err;
562	}
563	return fsl_desc;
564
565err:
566	while (--i >= 0)
567		dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
568				fsl_desc->tcd[i].ptcd);
569	kfree(fsl_desc);
570	return NULL;
571}
572
573struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
574		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
575		size_t period_len, enum dma_transfer_direction direction,
576		unsigned long flags)
577{
578	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
579	struct fsl_edma_desc *fsl_desc;
580	dma_addr_t dma_buf_next;
581	bool major_int = true;
582	int sg_len, i;
583	u32 src_addr, dst_addr, last_sg, nbytes;
584	u16 soff, doff, iter;
585
586	if (!is_slave_direction(direction))
587		return NULL;
588
589	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
590		return NULL;
591
592	sg_len = buf_len / period_len;
593	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
594	if (!fsl_desc)
595		return NULL;
596	fsl_desc->iscyclic = true;
597	fsl_desc->dirn = direction;
598
599	dma_buf_next = dma_addr;
600	if (direction == DMA_MEM_TO_DEV) {
601		fsl_chan->attr =
602			fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
603		nbytes = fsl_chan->cfg.dst_addr_width *
604			fsl_chan->cfg.dst_maxburst;
605	} else {
606		fsl_chan->attr =
607			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
608		nbytes = fsl_chan->cfg.src_addr_width *
609			fsl_chan->cfg.src_maxburst;
610	}
611
612	iter = period_len / nbytes;
613
614	for (i = 0; i < sg_len; i++) {
615		if (dma_buf_next >= dma_addr + buf_len)
616			dma_buf_next = dma_addr;
617
618		/* get next sg's physical address */
619		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
620
621		if (direction == DMA_MEM_TO_DEV) {
622			src_addr = dma_buf_next;
623			dst_addr = fsl_chan->dma_dev_addr;
624			soff = fsl_chan->cfg.dst_addr_width;
625			doff = fsl_chan->is_multi_fifo ? 4 : 0;
626		} else if (direction == DMA_DEV_TO_MEM) {
627			src_addr = fsl_chan->dma_dev_addr;
628			dst_addr = dma_buf_next;
629			soff = fsl_chan->is_multi_fifo ? 4 : 0;
630			doff = fsl_chan->cfg.src_addr_width;
631		} else {
632			/* DMA_DEV_TO_DEV */
633			src_addr = fsl_chan->cfg.src_addr;
634			dst_addr = fsl_chan->cfg.dst_addr;
635			soff = doff = 0;
636			major_int = false;
637		}
638
639		fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
640				  fsl_chan->attr, soff, nbytes, 0, iter,
641				  iter, doff, last_sg, major_int, false, true);
642		dma_buf_next += period_len;
643	}
644
645	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
646}
 
647
648struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
649		struct dma_chan *chan, struct scatterlist *sgl,
650		unsigned int sg_len, enum dma_transfer_direction direction,
651		unsigned long flags, void *context)
652{
653	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
654	struct fsl_edma_desc *fsl_desc;
655	struct scatterlist *sg;
656	u32 src_addr, dst_addr, last_sg, nbytes;
657	u16 soff, doff, iter;
658	int i;
659
660	if (!is_slave_direction(direction))
661		return NULL;
662
663	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
664		return NULL;
665
666	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
667	if (!fsl_desc)
668		return NULL;
669	fsl_desc->iscyclic = false;
670	fsl_desc->dirn = direction;
671
672	if (direction == DMA_MEM_TO_DEV) {
673		fsl_chan->attr =
674			fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
675		nbytes = fsl_chan->cfg.dst_addr_width *
676			fsl_chan->cfg.dst_maxburst;
677	} else {
678		fsl_chan->attr =
679			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
680		nbytes = fsl_chan->cfg.src_addr_width *
681			fsl_chan->cfg.src_maxburst;
682	}
683
684	for_each_sg(sgl, sg, sg_len, i) {
 
 
 
685		if (direction == DMA_MEM_TO_DEV) {
686			src_addr = sg_dma_address(sg);
687			dst_addr = fsl_chan->dma_dev_addr;
688			soff = fsl_chan->cfg.dst_addr_width;
689			doff = 0;
690		} else if (direction == DMA_DEV_TO_MEM) {
691			src_addr = fsl_chan->dma_dev_addr;
692			dst_addr = sg_dma_address(sg);
693			soff = 0;
694			doff = fsl_chan->cfg.src_addr_width;
695		} else {
696			/* DMA_DEV_TO_DEV */
697			src_addr = fsl_chan->cfg.src_addr;
698			dst_addr = fsl_chan->cfg.dst_addr;
699			soff = 0;
700			doff = 0;
701		}
702
703		/*
704		 * Choose the suitable burst length if sg_dma_len is not
705		 * multiple of burst length so that the whole transfer length is
706		 * multiple of minor loop(burst length).
707		 */
708		if (sg_dma_len(sg) % nbytes) {
709			u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
710			u32 burst = (direction == DMA_DEV_TO_MEM) ?
711						fsl_chan->cfg.src_maxburst :
712						fsl_chan->cfg.dst_maxburst;
713			int j;
714
715			for (j = burst; j > 1; j--) {
716				if (!(sg_dma_len(sg) % (j * width))) {
717					nbytes = j * width;
718					break;
719				}
720			}
721			/* Set burst size as 1 if there's no suitable one */
722			if (j == 1)
723				nbytes = width;
724		}
725		iter = sg_dma_len(sg) / nbytes;
726		if (i < sg_len - 1) {
727			last_sg = fsl_desc->tcd[(i + 1)].ptcd;
728			fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
729					  dst_addr, fsl_chan->attr, soff,
730					  nbytes, 0, iter, iter, doff, last_sg,
731					  false, false, true);
732		} else {
733			last_sg = 0;
734			fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
735					  dst_addr, fsl_chan->attr, soff,
736					  nbytes, 0, iter, iter, doff, last_sg,
737					  true, true, false);
738		}
739	}
740
741	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
742}
743
744struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
745						     dma_addr_t dma_dst, dma_addr_t dma_src,
746						     size_t len, unsigned long flags)
747{
748	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
749	struct fsl_edma_desc *fsl_desc;
750
751	fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
752	if (!fsl_desc)
753		return NULL;
754	fsl_desc->iscyclic = false;
755
756	fsl_chan->is_sw = true;
757
758	/* To match with copy_align and max_seg_size so 1 tcd is enough */
759	fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
760			fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
761			32, len, 0, 1, 1, 32, 0, true, true, false);
762
763	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
764}
765
766void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
767{
768	struct virt_dma_desc *vdesc;
769
770	lockdep_assert_held(&fsl_chan->vchan.lock);
771
772	vdesc = vchan_next_desc(&fsl_chan->vchan);
773	if (!vdesc)
774		return;
775	fsl_chan->edesc = to_fsl_edma_desc(vdesc);
776	fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
777	fsl_edma_enable_request(fsl_chan);
778	fsl_chan->status = DMA_IN_PROGRESS;
779	fsl_chan->idle = false;
780}
 
781
782void fsl_edma_issue_pending(struct dma_chan *chan)
783{
784	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
785	unsigned long flags;
786
787	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
788
789	if (unlikely(fsl_chan->pm_state != RUNNING)) {
790		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
791		/* cannot submit due to suspend */
792		return;
793	}
794
795	if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
796		fsl_edma_xfer_desc(fsl_chan);
797
798	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
799}
 
800
801int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
802{
803	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
804
805	fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
806				sizeof(struct fsl_edma_hw_tcd),
807				32, 0);
808	return 0;
809}
 
810
811void fsl_edma_free_chan_resources(struct dma_chan *chan)
812{
813	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
814	struct fsl_edma_engine *edma = fsl_chan->edma;
815	unsigned long flags;
816	LIST_HEAD(head);
817
818	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
819	fsl_edma_disable_request(fsl_chan);
820	if (edma->drvdata->dmamuxs)
821		fsl_edma_chan_mux(fsl_chan, 0, false);
822	fsl_chan->edesc = NULL;
823	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
824	fsl_edma_unprep_slave_dma(fsl_chan);
825	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
826
827	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
828	dma_pool_destroy(fsl_chan->tcd_pool);
829	fsl_chan->tcd_pool = NULL;
830	fsl_chan->is_sw = false;
831	fsl_chan->srcid = 0;
832}
 
833
834void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
835{
836	struct fsl_edma_chan *chan, *_chan;
837
838	list_for_each_entry_safe(chan, _chan,
839				&dmadev->channels, vchan.chan.device_node) {
840		list_del(&chan->vchan.chan.device_node);
841		tasklet_kill(&chan->vchan.task);
842	}
843}
 
844
845/*
846 * On the 32 channels Vybrid/mpc577x edma version, register offsets are
847 * different compared to ColdFire mcf5441x 64 channels edma.
 
848 *
849 * This function sets up register offsets as per proper declared version
850 * so must be called in xxx_edma_probe() just after setting the
851 * edma "version" and "membase" appropriately.
852 */
853void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
854{
855	bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
856
857	edma->regs.cr = edma->membase + EDMA_CR;
858	edma->regs.es = edma->membase + EDMA_ES;
859	edma->regs.erql = edma->membase + EDMA_ERQ;
860	edma->regs.eeil = edma->membase + EDMA_EEI;
861
862	edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
863	edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
864	edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
865	edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
866	edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
867	edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
868	edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
869	edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
870	edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
871	edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
 
 
 
 
 
 
 
 
 
 
872
873	if (is64) {
874		edma->regs.erqh = edma->membase + EDMA64_ERQH;
875		edma->regs.eeih = edma->membase + EDMA64_EEIH;
876		edma->regs.errh = edma->membase + EDMA64_ERRH;
877		edma->regs.inth = edma->membase + EDMA64_INTH;
878	}
 
 
879}
 
880
881MODULE_LICENSE("GPL v2");
v5.4
  1// SPDX-License-Identifier: GPL-2.0+
  2//
  3// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
  4// Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
  5
  6#include <linux/dmapool.h>
  7#include <linux/module.h>
  8#include <linux/slab.h>
  9#include <linux/dma-mapping.h>
 
 
 10
 11#include "fsl-edma-common.h"
 12
 13#define EDMA_CR			0x00
 14#define EDMA_ES			0x04
 15#define EDMA_ERQ		0x0C
 16#define EDMA_EEI		0x14
 17#define EDMA_SERQ		0x1B
 18#define EDMA_CERQ		0x1A
 19#define EDMA_SEEI		0x19
 20#define EDMA_CEEI		0x18
 21#define EDMA_CINT		0x1F
 22#define EDMA_CERR		0x1E
 23#define EDMA_SSRT		0x1D
 24#define EDMA_CDNE		0x1C
 25#define EDMA_INTR		0x24
 26#define EDMA_ERR		0x2C
 27
 28#define EDMA64_ERQH		0x08
 29#define EDMA64_EEIH		0x10
 30#define EDMA64_SERQ		0x18
 31#define EDMA64_CERQ		0x19
 32#define EDMA64_SEEI		0x1a
 33#define EDMA64_CEEI		0x1b
 34#define EDMA64_CINT		0x1c
 35#define EDMA64_CERR		0x1d
 36#define EDMA64_SSRT		0x1e
 37#define EDMA64_CDNE		0x1f
 38#define EDMA64_INTH		0x20
 39#define EDMA64_INTL		0x24
 40#define EDMA64_ERRH		0x28
 41#define EDMA64_ERRL		0x2c
 42
 43#define EDMA_TCD		0x1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44
 45static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
 46{
 47	struct edma_regs *regs = &fsl_chan->edma->regs;
 48	u32 ch = fsl_chan->vchan.chan.chan_id;
 49
 50	if (fsl_chan->edma->drvdata->version == v1) {
 
 
 
 51		edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
 52		edma_writeb(fsl_chan->edma, ch, regs->serq);
 53	} else {
 54		/* ColdFire is big endian, and accesses natively
 55		 * big endian I/O peripherals
 56		 */
 57		iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
 58		iowrite8(ch, regs->serq);
 59	}
 60}
 61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
 63{
 64	struct edma_regs *regs = &fsl_chan->edma->regs;
 65	u32 ch = fsl_chan->vchan.chan.chan_id;
 66
 67	if (fsl_chan->edma->drvdata->version == v1) {
 
 
 
 68		edma_writeb(fsl_chan->edma, ch, regs->cerq);
 69		edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
 70	} else {
 71		/* ColdFire is big endian, and accesses natively
 72		 * big endian I/O peripherals
 73		 */
 74		iowrite8(ch, regs->cerq);
 75		iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
 76	}
 77}
 78EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
 79
 80static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
 81			   u32 off, u32 slot, bool enable)
 82{
 83	u8 val8;
 84
 85	if (enable)
 86		val8 = EDMAMUX_CHCFG_ENBL | slot;
 87	else
 88		val8 = EDMAMUX_CHCFG_DIS;
 89
 90	iowrite8(val8, addr + off);
 91}
 92
 93static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
 94			    u32 off, u32 slot, bool enable)
 95{
 96	u32 val;
 97
 98	if (enable)
 99		val = EDMAMUX_CHCFG_ENBL << 24 | slot;
100	else
101		val = EDMAMUX_CHCFG_DIS;
102
103	iowrite32(val, addr + off * 4);
104}
105
106void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
107		       unsigned int slot, bool enable)
108{
109	u32 ch = fsl_chan->vchan.chan.chan_id;
110	void __iomem *muxaddr;
111	unsigned int chans_per_mux, ch_off;
 
112	u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
113
 
 
 
114	chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
115	ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
 
 
 
 
116	muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
117	slot = EDMAMUX_CHCFG_SOURCE(slot);
118
119	if (fsl_chan->edma->drvdata->version == v3)
120		mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
121	else
122		mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
123}
124EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
125
126static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
127{
128	switch (addr_width) {
129	case 1:
130		return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
131	case 2:
132		return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
133	case 4:
134		return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
135	case 8:
136		return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
137	default:
138		return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
139	}
140}
141
142void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
143{
144	struct fsl_edma_desc *fsl_desc;
145	int i;
146
147	fsl_desc = to_fsl_edma_desc(vdesc);
148	for (i = 0; i < fsl_desc->n_tcds; i++)
149		dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
150			      fsl_desc->tcd[i].ptcd);
151	kfree(fsl_desc);
152}
153EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
154
155int fsl_edma_terminate_all(struct dma_chan *chan)
156{
157	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
158	unsigned long flags;
159	LIST_HEAD(head);
160
161	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
162	fsl_edma_disable_request(fsl_chan);
163	fsl_chan->edesc = NULL;
164	fsl_chan->idle = true;
165	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
166	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
167	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
 
 
 
 
168	return 0;
169}
170EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
171
172int fsl_edma_pause(struct dma_chan *chan)
173{
174	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
175	unsigned long flags;
176
177	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
178	if (fsl_chan->edesc) {
179		fsl_edma_disable_request(fsl_chan);
180		fsl_chan->status = DMA_PAUSED;
181		fsl_chan->idle = true;
182	}
183	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
184	return 0;
185}
186EXPORT_SYMBOL_GPL(fsl_edma_pause);
187
188int fsl_edma_resume(struct dma_chan *chan)
189{
190	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
191	unsigned long flags;
192
193	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
194	if (fsl_chan->edesc) {
195		fsl_edma_enable_request(fsl_chan);
196		fsl_chan->status = DMA_IN_PROGRESS;
197		fsl_chan->idle = false;
198	}
199	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
200	return 0;
201}
202EXPORT_SYMBOL_GPL(fsl_edma_resume);
203
204static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
205{
206	if (fsl_chan->dma_dir != DMA_NONE)
207		dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
208				   fsl_chan->dma_dev_addr,
209				   fsl_chan->dma_dev_size,
210				   fsl_chan->dma_dir, 0);
211	fsl_chan->dma_dir = DMA_NONE;
212}
213
214static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
215				    enum dma_transfer_direction dir)
216{
217	struct device *dev = fsl_chan->vchan.chan.device->dev;
218	enum dma_data_direction dma_dir;
219	phys_addr_t addr = 0;
220	u32 size = 0;
221
222	switch (dir) {
223	case DMA_MEM_TO_DEV:
224		dma_dir = DMA_FROM_DEVICE;
225		addr = fsl_chan->cfg.dst_addr;
226		size = fsl_chan->cfg.dst_maxburst;
227		break;
228	case DMA_DEV_TO_MEM:
229		dma_dir = DMA_TO_DEVICE;
230		addr = fsl_chan->cfg.src_addr;
231		size = fsl_chan->cfg.src_maxburst;
232		break;
233	default:
234		dma_dir = DMA_NONE;
235		break;
236	}
237
238	/* Already mapped for this config? */
239	if (fsl_chan->dma_dir == dma_dir)
240		return true;
241
242	fsl_edma_unprep_slave_dma(fsl_chan);
243
244	fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
245	if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
246		return false;
247	fsl_chan->dma_dev_size = size;
248	fsl_chan->dma_dir = dma_dir;
249
250	return true;
251}
252
253int fsl_edma_slave_config(struct dma_chan *chan,
254				 struct dma_slave_config *cfg)
255{
256	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
257
258	memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
259	fsl_edma_unprep_slave_dma(fsl_chan);
260
261	return 0;
262}
263EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
264
265static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
266		struct virt_dma_desc *vdesc, bool in_progress)
267{
268	struct fsl_edma_desc *edesc = fsl_chan->edesc;
269	struct edma_regs *regs = &fsl_chan->edma->regs;
270	u32 ch = fsl_chan->vchan.chan.chan_id;
271	enum dma_transfer_direction dir = edesc->dirn;
272	dma_addr_t cur_addr, dma_addr;
273	size_t len, size;
 
274	int i;
275
276	/* calculate the total size in this desc */
277	for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
278		len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
279			* le16_to_cpu(edesc->tcd[i].vtcd->biter);
 
 
 
280
281	if (!in_progress)
282		return len;
283
284	if (dir == DMA_MEM_TO_DEV)
285		cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
286	else
287		cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
288
289	/* figure out the finished and calculate the residue */
290	for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
291		size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
292			* le16_to_cpu(edesc->tcd[i].vtcd->biter);
 
 
 
 
293		if (dir == DMA_MEM_TO_DEV)
294			dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
295		else
296			dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
297
298		len -= size;
299		if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
300			len += dma_addr + size - cur_addr;
301			break;
302		}
303	}
304
305	return len;
306}
307
308enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
309		dma_cookie_t cookie, struct dma_tx_state *txstate)
310{
311	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
312	struct virt_dma_desc *vdesc;
313	enum dma_status status;
314	unsigned long flags;
315
316	status = dma_cookie_status(chan, cookie, txstate);
317	if (status == DMA_COMPLETE)
318		return status;
319
320	if (!txstate)
321		return fsl_chan->status;
322
323	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
324	vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
325	if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
326		txstate->residue =
327			fsl_edma_desc_residue(fsl_chan, vdesc, true);
328	else if (vdesc)
329		txstate->residue =
330			fsl_edma_desc_residue(fsl_chan, vdesc, false);
331	else
332		txstate->residue = 0;
333
334	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
335
336	return fsl_chan->status;
337}
338EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
339
340static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
341				  struct fsl_edma_hw_tcd *tcd)
342{
343	struct fsl_edma_engine *edma = fsl_chan->edma;
344	struct edma_regs *regs = &fsl_chan->edma->regs;
345	u32 ch = fsl_chan->vchan.chan.chan_id;
346
347	/*
348	 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
349	 * endian format. However, we need to load the TCD registers in
350	 * big- or little-endian obeying the eDMA engine model endian.
 
351	 */
352	edma_writew(edma, 0,  &regs->tcd[ch].csr);
353	edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
354	edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
 
 
 
 
355
356	edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
357	edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
358
359	edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
360	edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
 
361
362	edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
363	edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
364	edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365
366	edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
367			&regs->tcd[ch].dlast_sga);
368
369	edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
370}
371
372static inline
373void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
 
374		       u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
375		       u16 biter, u16 doff, u32 dlast_sga, bool major_int,
376		       bool disable_req, bool enable_sg)
377{
 
378	u16 csr = 0;
 
379
380	/*
381	 * eDMA hardware SGs require the TCDs to be stored in little
382	 * endian format irrespective of the register endian model.
383	 * So we put the value in little endian in memory, waiting
384	 * for fsl_edma_set_tcd_regs doing the swap.
385	 */
386	tcd->saddr = cpu_to_le32(src);
387	tcd->daddr = cpu_to_le32(dst);
388
389	tcd->attr = cpu_to_le16(attr);
390
391	tcd->soff = cpu_to_le16(soff);
392
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393	tcd->nbytes = cpu_to_le32(nbytes);
394	tcd->slast = cpu_to_le32(slast);
395
396	tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
397	tcd->doff = cpu_to_le16(doff);
398
399	tcd->dlast_sga = cpu_to_le32(dlast_sga);
400
401	tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
402	if (major_int)
403		csr |= EDMA_TCD_CSR_INT_MAJOR;
404
405	if (disable_req)
406		csr |= EDMA_TCD_CSR_D_REQ;
407
408	if (enable_sg)
409		csr |= EDMA_TCD_CSR_E_SG;
410
 
 
 
 
 
 
411	tcd->csr = cpu_to_le16(csr);
412}
413
414static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
415		int sg_len)
416{
417	struct fsl_edma_desc *fsl_desc;
418	int i;
419
420	fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
421	if (!fsl_desc)
422		return NULL;
423
424	fsl_desc->echan = fsl_chan;
425	fsl_desc->n_tcds = sg_len;
426	for (i = 0; i < sg_len; i++) {
427		fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
428					GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
429		if (!fsl_desc->tcd[i].vtcd)
430			goto err;
431	}
432	return fsl_desc;
433
434err:
435	while (--i >= 0)
436		dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
437				fsl_desc->tcd[i].ptcd);
438	kfree(fsl_desc);
439	return NULL;
440}
441
442struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
443		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
444		size_t period_len, enum dma_transfer_direction direction,
445		unsigned long flags)
446{
447	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
448	struct fsl_edma_desc *fsl_desc;
449	dma_addr_t dma_buf_next;
 
450	int sg_len, i;
451	u32 src_addr, dst_addr, last_sg, nbytes;
452	u16 soff, doff, iter;
453
454	if (!is_slave_direction(direction))
455		return NULL;
456
457	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
458		return NULL;
459
460	sg_len = buf_len / period_len;
461	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
462	if (!fsl_desc)
463		return NULL;
464	fsl_desc->iscyclic = true;
465	fsl_desc->dirn = direction;
466
467	dma_buf_next = dma_addr;
468	if (direction == DMA_MEM_TO_DEV) {
469		fsl_chan->attr =
470			fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
471		nbytes = fsl_chan->cfg.dst_addr_width *
472			fsl_chan->cfg.dst_maxburst;
473	} else {
474		fsl_chan->attr =
475			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
476		nbytes = fsl_chan->cfg.src_addr_width *
477			fsl_chan->cfg.src_maxburst;
478	}
479
480	iter = period_len / nbytes;
481
482	for (i = 0; i < sg_len; i++) {
483		if (dma_buf_next >= dma_addr + buf_len)
484			dma_buf_next = dma_addr;
485
486		/* get next sg's physical address */
487		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
488
489		if (direction == DMA_MEM_TO_DEV) {
490			src_addr = dma_buf_next;
491			dst_addr = fsl_chan->dma_dev_addr;
492			soff = fsl_chan->cfg.dst_addr_width;
493			doff = 0;
494		} else {
495			src_addr = fsl_chan->dma_dev_addr;
496			dst_addr = dma_buf_next;
497			soff = 0;
498			doff = fsl_chan->cfg.src_addr_width;
 
 
 
 
 
 
499		}
500
501		fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
502				  fsl_chan->attr, soff, nbytes, 0, iter,
503				  iter, doff, last_sg, true, false, true);
504		dma_buf_next += period_len;
505	}
506
507	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
508}
509EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
510
511struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
512		struct dma_chan *chan, struct scatterlist *sgl,
513		unsigned int sg_len, enum dma_transfer_direction direction,
514		unsigned long flags, void *context)
515{
516	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
517	struct fsl_edma_desc *fsl_desc;
518	struct scatterlist *sg;
519	u32 src_addr, dst_addr, last_sg, nbytes;
520	u16 soff, doff, iter;
521	int i;
522
523	if (!is_slave_direction(direction))
524		return NULL;
525
526	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
527		return NULL;
528
529	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
530	if (!fsl_desc)
531		return NULL;
532	fsl_desc->iscyclic = false;
533	fsl_desc->dirn = direction;
534
535	if (direction == DMA_MEM_TO_DEV) {
536		fsl_chan->attr =
537			fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
538		nbytes = fsl_chan->cfg.dst_addr_width *
539			fsl_chan->cfg.dst_maxburst;
540	} else {
541		fsl_chan->attr =
542			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
543		nbytes = fsl_chan->cfg.src_addr_width *
544			fsl_chan->cfg.src_maxburst;
545	}
546
547	for_each_sg(sgl, sg, sg_len, i) {
548		/* get next sg's physical address */
549		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
550
551		if (direction == DMA_MEM_TO_DEV) {
552			src_addr = sg_dma_address(sg);
553			dst_addr = fsl_chan->dma_dev_addr;
554			soff = fsl_chan->cfg.dst_addr_width;
555			doff = 0;
556		} else {
557			src_addr = fsl_chan->dma_dev_addr;
558			dst_addr = sg_dma_address(sg);
559			soff = 0;
560			doff = fsl_chan->cfg.src_addr_width;
 
 
 
 
 
 
561		}
562
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
563		iter = sg_dma_len(sg) / nbytes;
564		if (i < sg_len - 1) {
565			last_sg = fsl_desc->tcd[(i + 1)].ptcd;
566			fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
567					  dst_addr, fsl_chan->attr, soff,
568					  nbytes, 0, iter, iter, doff, last_sg,
569					  false, false, true);
570		} else {
571			last_sg = 0;
572			fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
573					  dst_addr, fsl_chan->attr, soff,
574					  nbytes, 0, iter, iter, doff, last_sg,
575					  true, true, false);
576		}
577	}
578
579	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
580}
581EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582
583void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
584{
585	struct virt_dma_desc *vdesc;
586
 
 
587	vdesc = vchan_next_desc(&fsl_chan->vchan);
588	if (!vdesc)
589		return;
590	fsl_chan->edesc = to_fsl_edma_desc(vdesc);
591	fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
592	fsl_edma_enable_request(fsl_chan);
593	fsl_chan->status = DMA_IN_PROGRESS;
594	fsl_chan->idle = false;
595}
596EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
597
598void fsl_edma_issue_pending(struct dma_chan *chan)
599{
600	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
601	unsigned long flags;
602
603	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
604
605	if (unlikely(fsl_chan->pm_state != RUNNING)) {
606		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
607		/* cannot submit due to suspend */
608		return;
609	}
610
611	if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
612		fsl_edma_xfer_desc(fsl_chan);
613
614	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
615}
616EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
617
618int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
619{
620	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
621
622	fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
623				sizeof(struct fsl_edma_hw_tcd),
624				32, 0);
625	return 0;
626}
627EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
628
629void fsl_edma_free_chan_resources(struct dma_chan *chan)
630{
631	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
 
632	unsigned long flags;
633	LIST_HEAD(head);
634
635	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
636	fsl_edma_disable_request(fsl_chan);
637	fsl_edma_chan_mux(fsl_chan, 0, false);
 
638	fsl_chan->edesc = NULL;
639	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
640	fsl_edma_unprep_slave_dma(fsl_chan);
641	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
642
643	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
644	dma_pool_destroy(fsl_chan->tcd_pool);
645	fsl_chan->tcd_pool = NULL;
 
 
646}
647EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
648
649void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
650{
651	struct fsl_edma_chan *chan, *_chan;
652
653	list_for_each_entry_safe(chan, _chan,
654				&dmadev->channels, vchan.chan.device_node) {
655		list_del(&chan->vchan.chan.device_node);
656		tasklet_kill(&chan->vchan.task);
657	}
658}
659EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
660
661/*
662 * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
663 * register offsets are different compared to ColdFire mcf5441x 64 channels
664 * edma (here called "v2").
665 *
666 * This function sets up register offsets as per proper declared version
667 * so must be called in xxx_edma_probe() just after setting the
668 * edma "version" and "membase" appropriately.
669 */
670void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
671{
 
 
672	edma->regs.cr = edma->membase + EDMA_CR;
673	edma->regs.es = edma->membase + EDMA_ES;
674	edma->regs.erql = edma->membase + EDMA_ERQ;
675	edma->regs.eeil = edma->membase + EDMA_EEI;
676
677	edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
678			EDMA64_SERQ : EDMA_SERQ);
679	edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
680			EDMA64_CERQ : EDMA_CERQ);
681	edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
682			EDMA64_SEEI : EDMA_SEEI);
683	edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
684			EDMA64_CEEI : EDMA_CEEI);
685	edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
686			EDMA64_CINT : EDMA_CINT);
687	edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
688			EDMA64_CERR : EDMA_CERR);
689	edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
690			EDMA64_SSRT : EDMA_SSRT);
691	edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
692			EDMA64_CDNE : EDMA_CDNE);
693	edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
694			EDMA64_INTL : EDMA_INTR);
695	edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
696			EDMA64_ERRL : EDMA_ERR);
697
698	if (edma->drvdata->version == v2) {
699		edma->regs.erqh = edma->membase + EDMA64_ERQH;
700		edma->regs.eeih = edma->membase + EDMA64_EEIH;
701		edma->regs.errh = edma->membase + EDMA64_ERRH;
702		edma->regs.inth = edma->membase + EDMA64_INTH;
703	}
704
705	edma->regs.tcd = edma->membase + EDMA_TCD;
706}
707EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
708
709MODULE_LICENSE("GPL v2");