Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Texas Instruments CPDMA Driver
  3 *
  4 * Copyright (C) 2010 Texas Instruments
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License as
  8 * published by the Free Software Foundation version 2.
  9 *
 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 11 * kind, whether express or implied; without even the implied warranty
 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 */
 15#include <linux/kernel.h>
 16#include <linux/spinlock.h>
 17#include <linux/device.h>
 18#include <linux/module.h>
 19#include <linux/slab.h>
 20#include <linux/err.h>
 21#include <linux/dma-mapping.h>
 22#include <linux/io.h>
 23
 24#include "davinci_cpdma.h"
 25
 26/* DMA Registers */
 27#define CPDMA_TXIDVER		0x00
 28#define CPDMA_TXCONTROL		0x04
 29#define CPDMA_TXTEARDOWN	0x08
 30#define CPDMA_RXIDVER		0x10
 31#define CPDMA_RXCONTROL		0x14
 32#define CPDMA_SOFTRESET		0x1c
 33#define CPDMA_RXTEARDOWN	0x18
 34#define CPDMA_TXINTSTATRAW	0x80
 35#define CPDMA_TXINTSTATMASKED	0x84
 36#define CPDMA_TXINTMASKSET	0x88
 37#define CPDMA_TXINTMASKCLEAR	0x8c
 38#define CPDMA_MACINVECTOR	0x90
 39#define CPDMA_MACEOIVECTOR	0x94
 40#define CPDMA_RXINTSTATRAW	0xa0
 41#define CPDMA_RXINTSTATMASKED	0xa4
 42#define CPDMA_RXINTMASKSET	0xa8
 43#define CPDMA_RXINTMASKCLEAR	0xac
 44#define CPDMA_DMAINTSTATRAW	0xb0
 45#define CPDMA_DMAINTSTATMASKED	0xb4
 46#define CPDMA_DMAINTMASKSET	0xb8
 47#define CPDMA_DMAINTMASKCLEAR	0xbc
 48#define CPDMA_DMAINT_HOSTERR	BIT(1)
 49
 50/* the following exist only if has_ext_regs is set */
 51#define CPDMA_DMACONTROL	0x20
 52#define CPDMA_DMASTATUS		0x24
 53#define CPDMA_RXBUFFOFS		0x28
 54#define CPDMA_EM_CONTROL	0x2c
 55
 56/* Descriptor mode bits */
 57#define CPDMA_DESC_SOP		BIT(31)
 58#define CPDMA_DESC_EOP		BIT(30)
 59#define CPDMA_DESC_OWNER	BIT(29)
 60#define CPDMA_DESC_EOQ		BIT(28)
 61#define CPDMA_DESC_TD_COMPLETE	BIT(27)
 62#define CPDMA_DESC_PASS_CRC	BIT(26)
 63
 64#define CPDMA_TEARDOWN_VALUE	0xfffffffc
 65
 66struct cpdma_desc {
 67	/* hardware fields */
 68	u32			hw_next;
 69	u32			hw_buffer;
 70	u32			hw_len;
 71	u32			hw_mode;
 72	/* software fields */
 73	void			*sw_token;
 74	u32			sw_buffer;
 75	u32			sw_len;
 76};
 77
 78struct cpdma_desc_pool {
 79	u32			phys;
 80	u32			hw_addr;
 81	void __iomem		*iomap;		/* ioremap map */
 82	void			*cpumap;	/* dma_alloc map */
 83	int			desc_size, mem_size;
 84	int			num_desc, used_desc;
 85	unsigned long		*bitmap;
 86	struct device		*dev;
 87	spinlock_t		lock;
 88};
 89
 90enum cpdma_state {
 91	CPDMA_STATE_IDLE,
 92	CPDMA_STATE_ACTIVE,
 93	CPDMA_STATE_TEARDOWN,
 94};
 95
 96static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
 97
 98struct cpdma_ctlr {
 99	enum cpdma_state	state;
100	struct cpdma_params	params;
101	struct device		*dev;
102	struct cpdma_desc_pool	*pool;
103	spinlock_t		lock;
104	struct cpdma_chan	*channels[2 * CPDMA_MAX_CHANNELS];
105};
106
107struct cpdma_chan {
108	enum cpdma_state		state;
109	struct cpdma_ctlr		*ctlr;
110	int				chan_num;
111	spinlock_t			lock;
112	struct cpdma_desc __iomem	*head, *tail;
113	int				count;
114	void __iomem			*hdp, *cp, *rxfree;
115	u32				mask;
116	cpdma_handler_fn		handler;
117	enum dma_data_direction		dir;
118	struct cpdma_chan_stats		stats;
119	/* offsets into dmaregs */
120	int	int_set, int_clear, td;
121};
122
123/* The following make access to common cpdma_ctlr params more readable */
124#define dmaregs		params.dmaregs
125#define num_chan	params.num_chan
126
127/* various accessors */
128#define dma_reg_read(ctlr, ofs)		__raw_readl((ctlr)->dmaregs + (ofs))
129#define chan_read(chan, fld)		__raw_readl((chan)->fld)
130#define desc_read(desc, fld)		__raw_readl(&(desc)->fld)
131#define dma_reg_write(ctlr, ofs, v)	__raw_writel(v, (ctlr)->dmaregs + (ofs))
132#define chan_write(chan, fld, v)	__raw_writel(v, (chan)->fld)
133#define desc_write(desc, fld, v)	__raw_writel((u32)(v), &(desc)->fld)
134
135/*
136 * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
137 * emac) have dedicated on-chip memory for these descriptors.  Some other
138 * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
139 * abstract out these details
140 */
141static struct cpdma_desc_pool *
142cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
143				int size, int align)
144{
145	int bitmap_size;
146	struct cpdma_desc_pool *pool;
147
148	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
149	if (!pool)
150		return NULL;
151
152	spin_lock_init(&pool->lock);
153
154	pool->dev	= dev;
155	pool->mem_size	= size;
156	pool->desc_size	= ALIGN(sizeof(struct cpdma_desc), align);
157	pool->num_desc	= size / pool->desc_size;
158
159	bitmap_size  = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
160	pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
161	if (!pool->bitmap)
162		goto fail;
163
164	if (phys) {
165		pool->phys  = phys;
166		pool->iomap = ioremap(phys, size);
167		pool->hw_addr = hw_addr;
168	} else {
169		pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
170						  GFP_KERNEL);
171		pool->iomap = pool->cpumap;
172		pool->hw_addr = pool->phys;
173	}
174
175	if (pool->iomap)
176		return pool;
177
178fail:
179	kfree(pool->bitmap);
180	kfree(pool);
181	return NULL;
182}
183
184static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
185{
186	unsigned long flags;
187
188	if (!pool)
189		return;
190
191	spin_lock_irqsave(&pool->lock, flags);
192	WARN_ON(pool->used_desc);
193	kfree(pool->bitmap);
194	if (pool->cpumap) {
195		dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
196				  pool->phys);
197	} else {
198		iounmap(pool->iomap);
199	}
200	spin_unlock_irqrestore(&pool->lock, flags);
201	kfree(pool);
202}
203
204static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
205		  struct cpdma_desc __iomem *desc)
206{
207	if (!desc)
208		return 0;
209	return pool->hw_addr + (__force dma_addr_t)desc -
210			    (__force dma_addr_t)pool->iomap;
211}
212
213static inline struct cpdma_desc __iomem *
214desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
215{
216	return dma ? pool->iomap + dma - pool->hw_addr : NULL;
217}
218
219static struct cpdma_desc __iomem *
220cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
221{
222	unsigned long flags;
223	int index;
224	struct cpdma_desc __iomem *desc = NULL;
225
226	spin_lock_irqsave(&pool->lock, flags);
227
228	index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
229					   num_desc, 0);
230	if (index < pool->num_desc) {
231		bitmap_set(pool->bitmap, index, num_desc);
232		desc = pool->iomap + pool->desc_size * index;
233		pool->used_desc++;
234	}
235
236	spin_unlock_irqrestore(&pool->lock, flags);
237	return desc;
238}
239
240static void cpdma_desc_free(struct cpdma_desc_pool *pool,
241			    struct cpdma_desc __iomem *desc, int num_desc)
242{
243	unsigned long flags, index;
244
245	index = ((unsigned long)desc - (unsigned long)pool->iomap) /
246		pool->desc_size;
247	spin_lock_irqsave(&pool->lock, flags);
248	bitmap_clear(pool->bitmap, index, num_desc);
249	pool->used_desc--;
250	spin_unlock_irqrestore(&pool->lock, flags);
251}
252
253struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
254{
255	struct cpdma_ctlr *ctlr;
256
257	ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
258	if (!ctlr)
259		return NULL;
260
261	ctlr->state = CPDMA_STATE_IDLE;
262	ctlr->params = *params;
263	ctlr->dev = params->dev;
264	spin_lock_init(&ctlr->lock);
265
266	ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
267					    ctlr->params.desc_mem_phys,
268					    ctlr->params.desc_hw_addr,
269					    ctlr->params.desc_mem_size,
270					    ctlr->params.desc_align);
271	if (!ctlr->pool) {
272		kfree(ctlr);
273		return NULL;
274	}
275
276	if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
277		ctlr->num_chan = CPDMA_MAX_CHANNELS;
278	return ctlr;
279}
280EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
281
282int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
283{
284	unsigned long flags;
285	int i;
286
287	spin_lock_irqsave(&ctlr->lock, flags);
288	if (ctlr->state != CPDMA_STATE_IDLE) {
289		spin_unlock_irqrestore(&ctlr->lock, flags);
290		return -EBUSY;
291	}
292
293	if (ctlr->params.has_soft_reset) {
294		unsigned long timeout = jiffies + HZ/10;
295
296		dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
297		while (time_before(jiffies, timeout)) {
298			if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
299				break;
300		}
301		WARN_ON(!time_before(jiffies, timeout));
302	}
303
304	for (i = 0; i < ctlr->num_chan; i++) {
305		__raw_writel(0, ctlr->params.txhdp + 4 * i);
306		__raw_writel(0, ctlr->params.rxhdp + 4 * i);
307		__raw_writel(0, ctlr->params.txcp + 4 * i);
308		__raw_writel(0, ctlr->params.rxcp + 4 * i);
309	}
310
311	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
312	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
313
314	dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
315	dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
316
317	ctlr->state = CPDMA_STATE_ACTIVE;
318
319	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
320		if (ctlr->channels[i])
321			cpdma_chan_start(ctlr->channels[i]);
322	}
323	spin_unlock_irqrestore(&ctlr->lock, flags);
324	return 0;
325}
326EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
327
328int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
329{
330	unsigned long flags;
331	int i;
332
333	spin_lock_irqsave(&ctlr->lock, flags);
334	if (ctlr->state != CPDMA_STATE_ACTIVE) {
335		spin_unlock_irqrestore(&ctlr->lock, flags);
336		return -EINVAL;
337	}
338
339	ctlr->state = CPDMA_STATE_TEARDOWN;
340
341	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
342		if (ctlr->channels[i])
343			cpdma_chan_stop(ctlr->channels[i]);
344	}
345
346	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
347	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
348
349	dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
350	dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
351
352	ctlr->state = CPDMA_STATE_IDLE;
353
354	spin_unlock_irqrestore(&ctlr->lock, flags);
355	return 0;
356}
357EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
358
359int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
360{
361	struct device *dev = ctlr->dev;
362	unsigned long flags;
363	int i;
364
365	spin_lock_irqsave(&ctlr->lock, flags);
366
367	dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
368
369	dev_info(dev, "CPDMA: txidver: %x",
370		 dma_reg_read(ctlr, CPDMA_TXIDVER));
371	dev_info(dev, "CPDMA: txcontrol: %x",
372		 dma_reg_read(ctlr, CPDMA_TXCONTROL));
373	dev_info(dev, "CPDMA: txteardown: %x",
374		 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
375	dev_info(dev, "CPDMA: rxidver: %x",
376		 dma_reg_read(ctlr, CPDMA_RXIDVER));
377	dev_info(dev, "CPDMA: rxcontrol: %x",
378		 dma_reg_read(ctlr, CPDMA_RXCONTROL));
379	dev_info(dev, "CPDMA: softreset: %x",
380		 dma_reg_read(ctlr, CPDMA_SOFTRESET));
381	dev_info(dev, "CPDMA: rxteardown: %x",
382		 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
383	dev_info(dev, "CPDMA: txintstatraw: %x",
384		 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
385	dev_info(dev, "CPDMA: txintstatmasked: %x",
386		 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
387	dev_info(dev, "CPDMA: txintmaskset: %x",
388		 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
389	dev_info(dev, "CPDMA: txintmaskclear: %x",
390		 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
391	dev_info(dev, "CPDMA: macinvector: %x",
392		 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
393	dev_info(dev, "CPDMA: maceoivector: %x",
394		 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
395	dev_info(dev, "CPDMA: rxintstatraw: %x",
396		 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
397	dev_info(dev, "CPDMA: rxintstatmasked: %x",
398		 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
399	dev_info(dev, "CPDMA: rxintmaskset: %x",
400		 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
401	dev_info(dev, "CPDMA: rxintmaskclear: %x",
402		 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
403	dev_info(dev, "CPDMA: dmaintstatraw: %x",
404		 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
405	dev_info(dev, "CPDMA: dmaintstatmasked: %x",
406		 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
407	dev_info(dev, "CPDMA: dmaintmaskset: %x",
408		 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
409	dev_info(dev, "CPDMA: dmaintmaskclear: %x",
410		 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
411
412	if (!ctlr->params.has_ext_regs) {
413		dev_info(dev, "CPDMA: dmacontrol: %x",
414			 dma_reg_read(ctlr, CPDMA_DMACONTROL));
415		dev_info(dev, "CPDMA: dmastatus: %x",
416			 dma_reg_read(ctlr, CPDMA_DMASTATUS));
417		dev_info(dev, "CPDMA: rxbuffofs: %x",
418			 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
419	}
420
421	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
422		if (ctlr->channels[i])
423			cpdma_chan_dump(ctlr->channels[i]);
424
425	spin_unlock_irqrestore(&ctlr->lock, flags);
426	return 0;
427}
428EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
429
430int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
431{
432	unsigned long flags;
433	int ret = 0, i;
434
435	if (!ctlr)
436		return -EINVAL;
437
438	spin_lock_irqsave(&ctlr->lock, flags);
439	if (ctlr->state != CPDMA_STATE_IDLE)
440		cpdma_ctlr_stop(ctlr);
441
442	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
443		if (ctlr->channels[i])
444			cpdma_chan_destroy(ctlr->channels[i]);
445	}
446
447	cpdma_desc_pool_destroy(ctlr->pool);
448	spin_unlock_irqrestore(&ctlr->lock, flags);
449	kfree(ctlr);
450	return ret;
451}
452EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
453
454int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
455{
456	unsigned long flags;
457	int i, reg;
458
459	spin_lock_irqsave(&ctlr->lock, flags);
460	if (ctlr->state != CPDMA_STATE_ACTIVE) {
461		spin_unlock_irqrestore(&ctlr->lock, flags);
462		return -EINVAL;
463	}
464
465	reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
466	dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
467
468	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
469		if (ctlr->channels[i])
470			cpdma_chan_int_ctrl(ctlr->channels[i], enable);
471	}
472
473	spin_unlock_irqrestore(&ctlr->lock, flags);
474	return 0;
475}
476
477void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
478{
479	dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
480}
481
482struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
483				     cpdma_handler_fn handler)
484{
485	struct cpdma_chan *chan;
486	int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
487	unsigned long flags;
488
489	if (__chan_linear(chan_num) >= ctlr->num_chan)
490		return NULL;
491
492	ret = -ENOMEM;
493	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
494	if (!chan)
495		goto err_chan_alloc;
496
497	spin_lock_irqsave(&ctlr->lock, flags);
498	ret = -EBUSY;
499	if (ctlr->channels[chan_num])
500		goto err_chan_busy;
501
502	chan->ctlr	= ctlr;
503	chan->state	= CPDMA_STATE_IDLE;
504	chan->chan_num	= chan_num;
505	chan->handler	= handler;
506
507	if (is_rx_chan(chan)) {
508		chan->hdp	= ctlr->params.rxhdp + offset;
509		chan->cp	= ctlr->params.rxcp + offset;
510		chan->rxfree	= ctlr->params.rxfree + offset;
511		chan->int_set	= CPDMA_RXINTMASKSET;
512		chan->int_clear	= CPDMA_RXINTMASKCLEAR;
513		chan->td	= CPDMA_RXTEARDOWN;
514		chan->dir	= DMA_FROM_DEVICE;
515	} else {
516		chan->hdp	= ctlr->params.txhdp + offset;
517		chan->cp	= ctlr->params.txcp + offset;
518		chan->int_set	= CPDMA_TXINTMASKSET;
519		chan->int_clear	= CPDMA_TXINTMASKCLEAR;
520		chan->td	= CPDMA_TXTEARDOWN;
521		chan->dir	= DMA_TO_DEVICE;
522	}
523	chan->mask = BIT(chan_linear(chan));
524
525	spin_lock_init(&chan->lock);
526
527	ctlr->channels[chan_num] = chan;
528	spin_unlock_irqrestore(&ctlr->lock, flags);
529	return chan;
530
531err_chan_busy:
532	spin_unlock_irqrestore(&ctlr->lock, flags);
533	kfree(chan);
534err_chan_alloc:
535	return ERR_PTR(ret);
536}
537EXPORT_SYMBOL_GPL(cpdma_chan_create);
538
539int cpdma_chan_destroy(struct cpdma_chan *chan)
540{
541	struct cpdma_ctlr *ctlr = chan->ctlr;
542	unsigned long flags;
543
544	if (!chan)
545		return -EINVAL;
546
547	spin_lock_irqsave(&ctlr->lock, flags);
548	if (chan->state != CPDMA_STATE_IDLE)
549		cpdma_chan_stop(chan);
550	ctlr->channels[chan->chan_num] = NULL;
551	spin_unlock_irqrestore(&ctlr->lock, flags);
552	kfree(chan);
553	return 0;
554}
555EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
556
557int cpdma_chan_get_stats(struct cpdma_chan *chan,
558			 struct cpdma_chan_stats *stats)
559{
560	unsigned long flags;
561	if (!chan)
562		return -EINVAL;
563	spin_lock_irqsave(&chan->lock, flags);
564	memcpy(stats, &chan->stats, sizeof(*stats));
565	spin_unlock_irqrestore(&chan->lock, flags);
566	return 0;
567}
568
569int cpdma_chan_dump(struct cpdma_chan *chan)
570{
571	unsigned long flags;
572	struct device *dev = chan->ctlr->dev;
573
574	spin_lock_irqsave(&chan->lock, flags);
575
576	dev_info(dev, "channel %d (%s %d) state %s",
577		 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
578		 chan_linear(chan), cpdma_state_str[chan->state]);
579	dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
580	dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
581	if (chan->rxfree) {
582		dev_info(dev, "\trxfree: %x\n",
583			 chan_read(chan, rxfree));
584	}
585
586	dev_info(dev, "\tstats head_enqueue: %d\n",
587		 chan->stats.head_enqueue);
588	dev_info(dev, "\tstats tail_enqueue: %d\n",
589		 chan->stats.tail_enqueue);
590	dev_info(dev, "\tstats pad_enqueue: %d\n",
591		 chan->stats.pad_enqueue);
592	dev_info(dev, "\tstats misqueued: %d\n",
593		 chan->stats.misqueued);
594	dev_info(dev, "\tstats desc_alloc_fail: %d\n",
595		 chan->stats.desc_alloc_fail);
596	dev_info(dev, "\tstats pad_alloc_fail: %d\n",
597		 chan->stats.pad_alloc_fail);
598	dev_info(dev, "\tstats runt_receive_buff: %d\n",
599		 chan->stats.runt_receive_buff);
600	dev_info(dev, "\tstats runt_transmit_buff: %d\n",
601		 chan->stats.runt_transmit_buff);
602	dev_info(dev, "\tstats empty_dequeue: %d\n",
603		 chan->stats.empty_dequeue);
604	dev_info(dev, "\tstats busy_dequeue: %d\n",
605		 chan->stats.busy_dequeue);
606	dev_info(dev, "\tstats good_dequeue: %d\n",
607		 chan->stats.good_dequeue);
608	dev_info(dev, "\tstats requeue: %d\n",
609		 chan->stats.requeue);
610	dev_info(dev, "\tstats teardown_dequeue: %d\n",
611		 chan->stats.teardown_dequeue);
612
613	spin_unlock_irqrestore(&chan->lock, flags);
614	return 0;
615}
616
617static void __cpdma_chan_submit(struct cpdma_chan *chan,
618				struct cpdma_desc __iomem *desc)
619{
620	struct cpdma_ctlr		*ctlr = chan->ctlr;
621	struct cpdma_desc __iomem	*prev = chan->tail;
622	struct cpdma_desc_pool		*pool = ctlr->pool;
623	dma_addr_t			desc_dma;
624	u32				mode;
625
626	desc_dma = desc_phys(pool, desc);
627
628	/* simple case - idle channel */
629	if (!chan->head) {
630		chan->stats.head_enqueue++;
631		chan->head = desc;
632		chan->tail = desc;
633		if (chan->state == CPDMA_STATE_ACTIVE)
634			chan_write(chan, hdp, desc_dma);
635		return;
636	}
637
638	/* first chain the descriptor at the tail of the list */
639	desc_write(prev, hw_next, desc_dma);
640	chan->tail = desc;
641	chan->stats.tail_enqueue++;
642
643	/* next check if EOQ has been triggered already */
644	mode = desc_read(prev, hw_mode);
645	if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
646	    (chan->state == CPDMA_STATE_ACTIVE)) {
647		desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
648		chan_write(chan, hdp, desc_dma);
649		chan->stats.misqueued++;
650	}
651}
652
653int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
654		      int len, gfp_t gfp_mask)
655{
656	struct cpdma_ctlr		*ctlr = chan->ctlr;
657	struct cpdma_desc __iomem	*desc;
658	dma_addr_t			buffer;
659	unsigned long			flags;
660	u32				mode;
661	int				ret = 0;
662
663	spin_lock_irqsave(&chan->lock, flags);
664
665	if (chan->state == CPDMA_STATE_TEARDOWN) {
666		ret = -EINVAL;
667		goto unlock_ret;
668	}
669
670	desc = cpdma_desc_alloc(ctlr->pool, 1);
671	if (!desc) {
672		chan->stats.desc_alloc_fail++;
673		ret = -ENOMEM;
674		goto unlock_ret;
675	}
676
677	if (len < ctlr->params.min_packet_size) {
678		len = ctlr->params.min_packet_size;
679		chan->stats.runt_transmit_buff++;
680	}
681
682	buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
683	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
684
685	desc_write(desc, hw_next,   0);
686	desc_write(desc, hw_buffer, buffer);
687	desc_write(desc, hw_len,    len);
688	desc_write(desc, hw_mode,   mode | len);
689	desc_write(desc, sw_token,  token);
690	desc_write(desc, sw_buffer, buffer);
691	desc_write(desc, sw_len,    len);
692
693	__cpdma_chan_submit(chan, desc);
694
695	if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
696		chan_write(chan, rxfree, 1);
697
698	chan->count++;
699
700unlock_ret:
701	spin_unlock_irqrestore(&chan->lock, flags);
702	return ret;
703}
704EXPORT_SYMBOL_GPL(cpdma_chan_submit);
705
706static void __cpdma_chan_free(struct cpdma_chan *chan,
707			      struct cpdma_desc __iomem *desc,
708			      int outlen, int status)
709{
710	struct cpdma_ctlr		*ctlr = chan->ctlr;
711	struct cpdma_desc_pool		*pool = ctlr->pool;
712	dma_addr_t			buff_dma;
713	int				origlen;
714	void				*token;
715
716	token      = (void *)desc_read(desc, sw_token);
717	buff_dma   = desc_read(desc, sw_buffer);
718	origlen    = desc_read(desc, sw_len);
719
720	dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
721	cpdma_desc_free(pool, desc, 1);
722	(*chan->handler)(token, outlen, status);
723}
724
725static int __cpdma_chan_process(struct cpdma_chan *chan)
726{
727	struct cpdma_ctlr		*ctlr = chan->ctlr;
728	struct cpdma_desc __iomem	*desc;
729	int				status, outlen;
730	struct cpdma_desc_pool		*pool = ctlr->pool;
731	dma_addr_t			desc_dma;
732	unsigned long			flags;
733
734	spin_lock_irqsave(&chan->lock, flags);
735
736	desc = chan->head;
737	if (!desc) {
738		chan->stats.empty_dequeue++;
739		status = -ENOENT;
740		goto unlock_ret;
741	}
742	desc_dma = desc_phys(pool, desc);
743
744	status	= __raw_readl(&desc->hw_mode);
745	outlen	= status & 0x7ff;
746	if (status & CPDMA_DESC_OWNER) {
747		chan->stats.busy_dequeue++;
748		status = -EBUSY;
749		goto unlock_ret;
750	}
751	status	= status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
752
753	chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
754	chan_write(chan, cp, desc_dma);
755	chan->count--;
756	chan->stats.good_dequeue++;
757
758	if (status & CPDMA_DESC_EOQ) {
759		chan->stats.requeue++;
760		chan_write(chan, hdp, desc_phys(pool, chan->head));
761	}
762
763	spin_unlock_irqrestore(&chan->lock, flags);
764
765	__cpdma_chan_free(chan, desc, outlen, status);
766	return status;
767
768unlock_ret:
769	spin_unlock_irqrestore(&chan->lock, flags);
770	return status;
771}
772
773int cpdma_chan_process(struct cpdma_chan *chan, int quota)
774{
775	int used = 0, ret = 0;
776
777	if (chan->state != CPDMA_STATE_ACTIVE)
778		return -EINVAL;
779
780	while (used < quota) {
781		ret = __cpdma_chan_process(chan);
782		if (ret < 0)
783			break;
784		used++;
785	}
786	return used;
787}
788EXPORT_SYMBOL_GPL(cpdma_chan_process);
789
790int cpdma_chan_start(struct cpdma_chan *chan)
791{
792	struct cpdma_ctlr	*ctlr = chan->ctlr;
793	struct cpdma_desc_pool	*pool = ctlr->pool;
794	unsigned long		flags;
795
796	spin_lock_irqsave(&chan->lock, flags);
797	if (chan->state != CPDMA_STATE_IDLE) {
798		spin_unlock_irqrestore(&chan->lock, flags);
799		return -EBUSY;
800	}
801	if (ctlr->state != CPDMA_STATE_ACTIVE) {
802		spin_unlock_irqrestore(&chan->lock, flags);
803		return -EINVAL;
804	}
805	dma_reg_write(ctlr, chan->int_set, chan->mask);
806	chan->state = CPDMA_STATE_ACTIVE;
807	if (chan->head) {
808		chan_write(chan, hdp, desc_phys(pool, chan->head));
809		if (chan->rxfree)
810			chan_write(chan, rxfree, chan->count);
811	}
812
813	spin_unlock_irqrestore(&chan->lock, flags);
814	return 0;
815}
816EXPORT_SYMBOL_GPL(cpdma_chan_start);
817
818int cpdma_chan_stop(struct cpdma_chan *chan)
819{
820	struct cpdma_ctlr	*ctlr = chan->ctlr;
821	struct cpdma_desc_pool	*pool = ctlr->pool;
822	unsigned long		flags;
823	int			ret;
824	unsigned long		timeout;
825
826	spin_lock_irqsave(&chan->lock, flags);
827	if (chan->state != CPDMA_STATE_ACTIVE) {
828		spin_unlock_irqrestore(&chan->lock, flags);
829		return -EINVAL;
830	}
831
832	chan->state = CPDMA_STATE_TEARDOWN;
833	dma_reg_write(ctlr, chan->int_clear, chan->mask);
834
835	/* trigger teardown */
836	dma_reg_write(ctlr, chan->td, chan_linear(chan));
837
838	/* wait for teardown complete */
839	timeout = jiffies + HZ/10;	/* 100 msec */
840	while (time_before(jiffies, timeout)) {
841		u32 cp = chan_read(chan, cp);
842		if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
843			break;
844		cpu_relax();
845	}
846	WARN_ON(!time_before(jiffies, timeout));
847	chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
848
849	/* handle completed packets */
850	spin_unlock_irqrestore(&chan->lock, flags);
851	do {
852		ret = __cpdma_chan_process(chan);
853		if (ret < 0)
854			break;
855	} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
856	spin_lock_irqsave(&chan->lock, flags);
857
858	/* remaining packets haven't been tx/rx'ed, clean them up */
859	while (chan->head) {
860		struct cpdma_desc __iomem *desc = chan->head;
861		dma_addr_t next_dma;
862
863		next_dma = desc_read(desc, hw_next);
864		chan->head = desc_from_phys(pool, next_dma);
865		chan->stats.teardown_dequeue++;
866
867		/* issue callback without locks held */
868		spin_unlock_irqrestore(&chan->lock, flags);
869		__cpdma_chan_free(chan, desc, 0, -ENOSYS);
870		spin_lock_irqsave(&chan->lock, flags);
871	}
872
873	chan->state = CPDMA_STATE_IDLE;
874	spin_unlock_irqrestore(&chan->lock, flags);
875	return 0;
876}
877EXPORT_SYMBOL_GPL(cpdma_chan_stop);
878
879int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
880{
881	unsigned long flags;
882
883	spin_lock_irqsave(&chan->lock, flags);
884	if (chan->state != CPDMA_STATE_ACTIVE) {
885		spin_unlock_irqrestore(&chan->lock, flags);
886		return -EINVAL;
887	}
888
889	dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
890		      chan->mask);
891	spin_unlock_irqrestore(&chan->lock, flags);
892
893	return 0;
894}
895
896struct cpdma_control_info {
897	u32		reg;
898	u32		shift, mask;
899	int		access;
900#define ACCESS_RO	BIT(0)
901#define ACCESS_WO	BIT(1)
902#define ACCESS_RW	(ACCESS_RO | ACCESS_WO)
903};
904
905struct cpdma_control_info controls[] = {
906	[CPDMA_CMD_IDLE]	  = {CPDMA_DMACONTROL,	3,  1,      ACCESS_WO},
907	[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,	4,  1,      ACCESS_RW},
908	[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,	2,  1,      ACCESS_RW},
909	[CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,	1,  1,      ACCESS_RW},
910	[CPDMA_TX_PRIO_FIXED]	  = {CPDMA_DMACONTROL,	0,  1,      ACCESS_RW},
911	[CPDMA_STAT_IDLE]	  = {CPDMA_DMASTATUS,	31, 1,      ACCESS_RO},
912	[CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,	20, 0xf,    ACCESS_RW},
913	[CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,	16, 0x7,    ACCESS_RW},
914	[CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,	12, 0xf,    ACCESS_RW},
915	[CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,	8,  0x7,    ACCESS_RW},
916	[CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,	0,  0xffff, ACCESS_RW},
917};
918
919int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
920{
921	unsigned long flags;
922	struct cpdma_control_info *info = &controls[control];
923	int ret;
924
925	spin_lock_irqsave(&ctlr->lock, flags);
926
927	ret = -ENOTSUPP;
928	if (!ctlr->params.has_ext_regs)
929		goto unlock_ret;
930
931	ret = -EINVAL;
932	if (ctlr->state != CPDMA_STATE_ACTIVE)
933		goto unlock_ret;
934
935	ret = -ENOENT;
936	if (control < 0 || control >= ARRAY_SIZE(controls))
937		goto unlock_ret;
938
939	ret = -EPERM;
940	if ((info->access & ACCESS_RO) != ACCESS_RO)
941		goto unlock_ret;
942
943	ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
944
945unlock_ret:
946	spin_unlock_irqrestore(&ctlr->lock, flags);
947	return ret;
948}
949
950int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
951{
952	unsigned long flags;
953	struct cpdma_control_info *info = &controls[control];
954	int ret;
955	u32 val;
956
957	spin_lock_irqsave(&ctlr->lock, flags);
958
959	ret = -ENOTSUPP;
960	if (!ctlr->params.has_ext_regs)
961		goto unlock_ret;
962
963	ret = -EINVAL;
964	if (ctlr->state != CPDMA_STATE_ACTIVE)
965		goto unlock_ret;
966
967	ret = -ENOENT;
968	if (control < 0 || control >= ARRAY_SIZE(controls))
969		goto unlock_ret;
970
971	ret = -EPERM;
972	if ((info->access & ACCESS_WO) != ACCESS_WO)
973		goto unlock_ret;
974
975	val  = dma_reg_read(ctlr, info->reg);
976	val &= ~(info->mask << info->shift);
977	val |= (value & info->mask) << info->shift;
978	dma_reg_write(ctlr, info->reg, val);
979	ret = 0;
980
981unlock_ret:
982	spin_unlock_irqrestore(&ctlr->lock, flags);
983	return ret;
984}