Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * DMA controller driver for CSR SiRFprimaII
  3 *
  4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
  5 *
  6 * Licensed under GPLv2 or later.
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/dmaengine.h>
 11#include <linux/dma-mapping.h>
 12#include <linux/pm_runtime.h>
 13#include <linux/interrupt.h>
 14#include <linux/io.h>
 15#include <linux/slab.h>
 16#include <linux/of_irq.h>
 17#include <linux/of_address.h>
 18#include <linux/of_device.h>
 19#include <linux/of_platform.h>
 20#include <linux/clk.h>
 21#include <linux/of_dma.h>
 22#include <linux/sirfsoc_dma.h>
 23
 24#include "dmaengine.h"
 25
 
 
 
 
 26#define SIRFSOC_DMA_DESCRIPTORS                 16
 27#define SIRFSOC_DMA_CHANNELS                    16
 
 28
 29#define SIRFSOC_DMA_CH_ADDR                     0x00
 30#define SIRFSOC_DMA_CH_XLEN                     0x04
 31#define SIRFSOC_DMA_CH_YLEN                     0x08
 32#define SIRFSOC_DMA_CH_CTRL                     0x0C
 33
 34#define SIRFSOC_DMA_WIDTH_0                     0x100
 35#define SIRFSOC_DMA_CH_VALID                    0x140
 36#define SIRFSOC_DMA_CH_INT                      0x144
 37#define SIRFSOC_DMA_INT_EN                      0x148
 38#define SIRFSOC_DMA_INT_EN_CLR			0x14C
 39#define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
 40#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR            0x15C
 
 
 
 
 
 
 
 
 
 
 
 
 41
 42#define SIRFSOC_DMA_MODE_CTRL_BIT               4
 43#define SIRFSOC_DMA_DIR_CTRL_BIT                5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44
 45/* xlen and dma_width register is in 4 bytes boundary */
 46#define SIRFSOC_DMA_WORD_LEN			4
 
 
 47
 48struct sirfsoc_dma_desc {
 49	struct dma_async_tx_descriptor	desc;
 50	struct list_head		node;
 51
 52	/* SiRFprimaII 2D-DMA parameters */
 53
 54	int             xlen;           /* DMA xlen */
 55	int             ylen;           /* DMA ylen */
 56	int             width;          /* DMA width */
 57	int             dir;
 58	bool            cyclic;         /* is loop DMA? */
 
 59	u32             addr;		/* DMA buffer address */
 
 60};
 61
 62struct sirfsoc_dma_chan {
 63	struct dma_chan			chan;
 64	struct list_head		free;
 65	struct list_head		prepared;
 66	struct list_head		queued;
 67	struct list_head		active;
 68	struct list_head		completed;
 69	unsigned long			happened_cyclic;
 70	unsigned long			completed_cyclic;
 71
 72	/* Lock for this structure */
 73	spinlock_t			lock;
 74
 75	int				mode;
 76};
 77
 78struct sirfsoc_dma_regs {
 79	u32				ctrl[SIRFSOC_DMA_CHANNELS];
 80	u32				interrupt_en;
 81};
 82
 83struct sirfsoc_dma {
 84	struct dma_device		dma;
 85	struct tasklet_struct		tasklet;
 86	struct sirfsoc_dma_chan		channels[SIRFSOC_DMA_CHANNELS];
 87	void __iomem			*base;
 88	int				irq;
 89	struct clk			*clk;
 90	bool				is_marco;
 
 
 91	struct sirfsoc_dma_regs		regs_save;
 92};
 93
 
 
 
 
 
 
 
 
 
 
 
 
 
 94#define DRV_NAME	"sirfsoc_dma"
 95
 96static int sirfsoc_dma_runtime_suspend(struct device *dev);
 97
 98/* Convert struct dma_chan to struct sirfsoc_dma_chan */
 99static inline
100struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
101{
102	return container_of(c, struct sirfsoc_dma_chan, chan);
103}
104
105/* Convert struct dma_chan to struct sirfsoc_dma */
106static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
107{
108	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
109	return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
110}
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112/* Execute all queued DMA descriptors */
113static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
114{
115	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
116	int cid = schan->chan.chan_id;
117	struct sirfsoc_dma_desc *sdesc = NULL;
 
118
119	/*
120	 * lock has been held by functions calling this, so we don't hold
121	 * lock again
122	 */
123
124	sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
125		node);
126	/* Move the first queued descriptor to active list */
127	list_move_tail(&sdesc->node, &schan->active);
128
129	/* Start the DMA transfer */
130	writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
131		cid * 4);
132	writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
133		(sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
134		sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
135	writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
136		SIRFSOC_DMA_CH_XLEN);
137	writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
138		SIRFSOC_DMA_CH_YLEN);
139	writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
140		(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
141
142	/*
143	 * writel has an implict memory write barrier to make sure data is
144	 * flushed into memory before starting DMA
145	 */
146	writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
147
148	if (sdesc->cyclic) {
149		writel((1 << cid) | 1 << (cid + 16) |
150			readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
151			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
152		schan->happened_cyclic = schan->completed_cyclic = 0;
153	}
154}
155
156/* Interrupt handler */
157static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
158{
159	struct sirfsoc_dma *sdma = data;
160	struct sirfsoc_dma_chan *schan;
161	struct sirfsoc_dma_desc *sdesc = NULL;
162	u32 is;
 
163	int ch;
 
164
165	is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
166	while ((ch = fls(is) - 1) >= 0) {
167		is &= ~(1 << ch);
168		writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
169		schan = &sdma->channels[ch];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
171		spin_lock(&schan->lock);
 
172
173		sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
174			node);
 
 
 
 
175		if (!sdesc->cyclic) {
176			/* Execute queued descriptors */
177			list_splice_tail_init(&schan->active, &schan->completed);
178			if (!list_empty(&schan->queued))
179				sirfsoc_dma_execute(schan);
180		} else
 
 
 
 
 
 
 
 
181			schan->happened_cyclic++;
182
183		spin_unlock(&schan->lock);
 
 
 
 
184	}
185
186	/* Schedule tasklet */
187	tasklet_schedule(&sdma->tasklet);
188
189	return IRQ_HANDLED;
190}
191
192/* process completed descriptors */
193static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
194{
195	dma_cookie_t last_cookie = 0;
196	struct sirfsoc_dma_chan *schan;
197	struct sirfsoc_dma_desc *sdesc;
198	struct dma_async_tx_descriptor *desc;
199	unsigned long flags;
200	unsigned long happened_cyclic;
201	LIST_HEAD(list);
202	int i;
203
204	for (i = 0; i < sdma->dma.chancnt; i++) {
205		schan = &sdma->channels[i];
206
207		/* Get all completed descriptors */
208		spin_lock_irqsave(&schan->lock, flags);
209		if (!list_empty(&schan->completed)) {
210			list_splice_tail_init(&schan->completed, &list);
211			spin_unlock_irqrestore(&schan->lock, flags);
212
213			/* Execute callbacks and run dependencies */
214			list_for_each_entry(sdesc, &list, node) {
215				desc = &sdesc->desc;
216
217				if (desc->callback)
218					desc->callback(desc->callback_param);
219
220				last_cookie = desc->cookie;
221				dma_run_dependencies(desc);
222			}
223
224			/* Free descriptors */
225			spin_lock_irqsave(&schan->lock, flags);
226			list_splice_tail_init(&list, &schan->free);
227			schan->chan.completed_cookie = last_cookie;
228			spin_unlock_irqrestore(&schan->lock, flags);
229		} else {
230			/* for cyclic channel, desc is always in active list */
231			sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
232				node);
233
234			if (!sdesc || (sdesc && !sdesc->cyclic)) {
235				/* without active cyclic DMA */
236				spin_unlock_irqrestore(&schan->lock, flags);
237				continue;
238			}
239
 
 
 
 
240			/* cyclic DMA */
241			happened_cyclic = schan->happened_cyclic;
242			spin_unlock_irqrestore(&schan->lock, flags);
243
244			desc = &sdesc->desc;
245			while (happened_cyclic != schan->completed_cyclic) {
246				if (desc->callback)
247					desc->callback(desc->callback_param);
248				schan->completed_cyclic++;
249			}
250		}
251	}
252}
253
254/* DMA Tasklet */
255static void sirfsoc_dma_tasklet(unsigned long data)
256{
257	struct sirfsoc_dma *sdma = (void *)data;
258
259	sirfsoc_dma_process_completed(sdma);
260}
261
262/* Submit descriptor to hardware */
263static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
264{
265	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
266	struct sirfsoc_dma_desc *sdesc;
267	unsigned long flags;
268	dma_cookie_t cookie;
269
270	sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
271
272	spin_lock_irqsave(&schan->lock, flags);
273
274	/* Move descriptor to queue */
275	list_move_tail(&sdesc->node, &schan->queued);
276
277	cookie = dma_cookie_assign(txd);
278
279	spin_unlock_irqrestore(&schan->lock, flags);
280
281	return cookie;
282}
283
284static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
285	struct dma_slave_config *config)
286{
 
287	unsigned long flags;
288
289	if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
290		(config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
291		return -EINVAL;
292
293	spin_lock_irqsave(&schan->lock, flags);
294	schan->mode = (config->src_maxburst == 4 ? 1 : 0);
295	spin_unlock_irqrestore(&schan->lock, flags);
296
297	return 0;
298}
299
300static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
301{
 
302	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
303	int cid = schan->chan.chan_id;
304	unsigned long flags;
305
306	spin_lock_irqsave(&schan->lock, flags);
307
308	if (!sdma->is_marco) {
309		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
310			~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
311		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
312			& ~((1 << cid) | 1 << (cid + 16)),
313			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
314	} else {
315		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
 
316		writel_relaxed((1 << cid) | 1 << (cid + 16),
317			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318	}
319
320	writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
321
322	list_splice_tail_init(&schan->active, &schan->free);
323	list_splice_tail_init(&schan->queued, &schan->free);
324
325	spin_unlock_irqrestore(&schan->lock, flags);
326
327	return 0;
328}
329
330static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
331{
 
332	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
333	int cid = schan->chan.chan_id;
334	unsigned long flags;
335
336	spin_lock_irqsave(&schan->lock, flags);
337
338	if (!sdma->is_marco)
339		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
340			& ~((1 << cid) | 1 << (cid + 16)),
341			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
342	else
343		writel_relaxed((1 << cid) | 1 << (cid + 16),
344			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
346	spin_unlock_irqrestore(&schan->lock, flags);
347
348	return 0;
349}
350
351static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
352{
 
353	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
354	int cid = schan->chan.chan_id;
355	unsigned long flags;
356
357	spin_lock_irqsave(&schan->lock, flags);
358
359	if (!sdma->is_marco)
360		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
361			| ((1 << cid) | 1 << (cid + 16)),
362			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
363	else
364		writel_relaxed((1 << cid) | 1 << (cid + 16),
365			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
366
367	spin_unlock_irqrestore(&schan->lock, flags);
368
369	return 0;
370}
371
372static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
373	unsigned long arg)
374{
375	struct dma_slave_config *config;
376	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
377
378	switch (cmd) {
379	case DMA_PAUSE:
380		return sirfsoc_dma_pause_chan(schan);
381	case DMA_RESUME:
382		return sirfsoc_dma_resume_chan(schan);
383	case DMA_TERMINATE_ALL:
384		return sirfsoc_dma_terminate_all(schan);
385	case DMA_SLAVE_CONFIG:
386		config = (struct dma_slave_config *)arg;
387		return sirfsoc_dma_slave_config(schan, config);
388
389	default:
390		break;
391	}
392
393	return -ENOSYS;
 
 
394}
395
396/* Alloc channel resources */
397static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
398{
399	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
400	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
401	struct sirfsoc_dma_desc *sdesc;
402	unsigned long flags;
403	LIST_HEAD(descs);
404	int i;
405
406	pm_runtime_get_sync(sdma->dma.dev);
407
408	/* Alloc descriptors for this channel */
409	for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
410		sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
411		if (!sdesc) {
412			dev_notice(sdma->dma.dev, "Memory allocation error. "
413				"Allocated only %u descriptors\n", i);
414			break;
415		}
416
417		dma_async_tx_descriptor_init(&sdesc->desc, chan);
418		sdesc->desc.flags = DMA_CTRL_ACK;
419		sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
420
421		list_add_tail(&sdesc->node, &descs);
422	}
423
424	/* Return error only if no descriptors were allocated */
425	if (i == 0)
426		return -ENOMEM;
427
428	spin_lock_irqsave(&schan->lock, flags);
429
430	list_splice_tail_init(&descs, &schan->free);
431	spin_unlock_irqrestore(&schan->lock, flags);
432
433	return i;
434}
435
436/* Free channel resources */
437static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
438{
439	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
440	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
441	struct sirfsoc_dma_desc *sdesc, *tmp;
442	unsigned long flags;
443	LIST_HEAD(descs);
444
445	spin_lock_irqsave(&schan->lock, flags);
446
447	/* Channel must be idle */
448	BUG_ON(!list_empty(&schan->prepared));
449	BUG_ON(!list_empty(&schan->queued));
450	BUG_ON(!list_empty(&schan->active));
451	BUG_ON(!list_empty(&schan->completed));
452
453	/* Move data */
454	list_splice_tail_init(&schan->free, &descs);
455
456	spin_unlock_irqrestore(&schan->lock, flags);
457
458	/* Free descriptors */
459	list_for_each_entry_safe(sdesc, tmp, &descs, node)
460		kfree(sdesc);
461
462	pm_runtime_put(sdma->dma.dev);
463}
464
465/* Send pending descriptor to hardware */
466static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
467{
468	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
469	unsigned long flags;
470
471	spin_lock_irqsave(&schan->lock, flags);
472
473	if (list_empty(&schan->active) && !list_empty(&schan->queued))
474		sirfsoc_dma_execute(schan);
475
476	spin_unlock_irqrestore(&schan->lock, flags);
477}
478
479/* Check request completion status */
480static enum dma_status
481sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
482	struct dma_tx_state *txstate)
483{
484	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
485	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
486	unsigned long flags;
487	enum dma_status ret;
488	struct sirfsoc_dma_desc *sdesc;
489	int cid = schan->chan.chan_id;
490	unsigned long dma_pos;
491	unsigned long dma_request_bytes;
492	unsigned long residue;
493
494	spin_lock_irqsave(&schan->lock, flags);
495
496	sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
497			node);
498	dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
499		(sdesc->width * SIRFSOC_DMA_WORD_LEN);
 
 
 
 
 
 
 
 
500
501	ret = dma_cookie_status(chan, cookie, txstate);
502	dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR)
503		<< 2;
 
 
 
 
 
 
 
 
 
504	residue = dma_request_bytes - (dma_pos - sdesc->addr);
505	dma_set_residue(txstate, residue);
506
507	spin_unlock_irqrestore(&schan->lock, flags);
508
509	return ret;
510}
511
512static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
513	struct dma_chan *chan, struct dma_interleaved_template *xt,
514	unsigned long flags)
515{
516	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
517	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
518	struct sirfsoc_dma_desc *sdesc = NULL;
519	unsigned long iflags;
520	int ret;
521
522	if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
523		ret = -EINVAL;
524		goto err_dir;
525	}
526
527	/* Get free descriptor */
528	spin_lock_irqsave(&schan->lock, iflags);
529	if (!list_empty(&schan->free)) {
530		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
531			node);
532		list_del(&sdesc->node);
533	}
534	spin_unlock_irqrestore(&schan->lock, iflags);
535
536	if (!sdesc) {
537		/* try to free completed descriptors */
538		sirfsoc_dma_process_completed(sdma);
539		ret = 0;
540		goto no_desc;
541	}
542
543	/* Place descriptor in prepared list */
544	spin_lock_irqsave(&schan->lock, iflags);
545
546	/*
547	 * Number of chunks in a frame can only be 1 for prima2
548	 * and ylen (number of frame - 1) must be at least 0
549	 */
550	if ((xt->frame_size == 1) && (xt->numf > 0)) {
551		sdesc->cyclic = 0;
552		sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
553		sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
554				SIRFSOC_DMA_WORD_LEN;
555		sdesc->ylen = xt->numf - 1;
556		if (xt->dir == DMA_MEM_TO_DEV) {
557			sdesc->addr = xt->src_start;
558			sdesc->dir = 1;
559		} else {
560			sdesc->addr = xt->dst_start;
561			sdesc->dir = 0;
562		}
563
564		list_add_tail(&sdesc->node, &schan->prepared);
565	} else {
566		pr_err("sirfsoc DMA Invalid xfer\n");
567		ret = -EINVAL;
568		goto err_xfer;
569	}
570	spin_unlock_irqrestore(&schan->lock, iflags);
571
572	return &sdesc->desc;
573err_xfer:
574	spin_unlock_irqrestore(&schan->lock, iflags);
575no_desc:
576err_dir:
577	return ERR_PTR(ret);
578}
579
580static struct dma_async_tx_descriptor *
581sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
582	size_t buf_len, size_t period_len,
583	enum dma_transfer_direction direction, unsigned long flags, void *context)
584{
585	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
586	struct sirfsoc_dma_desc *sdesc = NULL;
587	unsigned long iflags;
588
589	/*
590	 * we only support cycle transfer with 2 period
591	 * If the X-length is set to 0, it would be the loop mode.
592	 * The DMA address keeps increasing until reaching the end of a loop
593	 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
594	 * the DMA address goes back to the beginning of this area.
595	 * In loop mode, the DMA data region is divided into two parts, BUFA
596	 * and BUFB. DMA controller generates interrupts twice in each loop:
597	 * when the DMA address reaches the end of BUFA or the end of the
598	 * BUFB
599	 */
600	if (buf_len !=  2 * period_len)
601		return ERR_PTR(-EINVAL);
602
603	/* Get free descriptor */
604	spin_lock_irqsave(&schan->lock, iflags);
605	if (!list_empty(&schan->free)) {
606		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
607			node);
608		list_del(&sdesc->node);
609	}
610	spin_unlock_irqrestore(&schan->lock, iflags);
611
612	if (!sdesc)
613		return NULL;
614
615	/* Place descriptor in prepared list */
616	spin_lock_irqsave(&schan->lock, iflags);
617	sdesc->addr = addr;
618	sdesc->cyclic = 1;
619	sdesc->xlen = 0;
620	sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
621	sdesc->width = 1;
622	list_add_tail(&sdesc->node, &schan->prepared);
623	spin_unlock_irqrestore(&schan->lock, iflags);
624
625	return &sdesc->desc;
626}
627
628/*
629 * The DMA controller consists of 16 independent DMA channels.
630 * Each channel is allocated to a different function
631 */
632bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
633{
634	unsigned int ch_nr = (unsigned int) chan_id;
635
636	if (ch_nr == chan->chan_id +
637		chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
638		return true;
639
640	return false;
641}
642EXPORT_SYMBOL(sirfsoc_dma_filter_id);
643
644#define SIRFSOC_DMA_BUSWIDTHS \
645	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
646	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
647	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
648	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
649	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
650
651static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
652	struct dma_slave_caps *caps)
653{
654	caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
655	caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
656	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
657	caps->cmd_pause = true;
658	caps->cmd_terminate = true;
659
660	return 0;
661}
662
663static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
664	struct of_dma *ofdma)
665{
666	struct sirfsoc_dma *sdma = ofdma->of_dma_data;
667	unsigned int request = dma_spec->args[0];
668
669	if (request >= SIRFSOC_DMA_CHANNELS)
670		return NULL;
671
672	return dma_get_slave_channel(&sdma->channels[request].chan);
673}
674
675static int sirfsoc_dma_probe(struct platform_device *op)
676{
677	struct device_node *dn = op->dev.of_node;
678	struct device *dev = &op->dev;
679	struct dma_device *dma;
680	struct sirfsoc_dma *sdma;
681	struct sirfsoc_dma_chan *schan;
 
682	struct resource res;
683	ulong regs_start, regs_size;
684	u32 id;
685	int ret, i;
686
687	sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
688	if (!sdma) {
689		dev_err(dev, "Memory exhausted!\n");
690		return -ENOMEM;
691	}
692
693	if (of_device_is_compatible(dn, "sirf,marco-dmac"))
694		sdma->is_marco = true;
 
 
 
695
696	if (of_property_read_u32(dn, "cell-index", &id)) {
697		dev_err(dev, "Fail to get DMAC index\n");
698		return -ENODEV;
699	}
700
701	sdma->irq = irq_of_parse_and_map(dn, 0);
702	if (sdma->irq == NO_IRQ) {
703		dev_err(dev, "Error mapping IRQ!\n");
704		return -EINVAL;
705	}
706
707	sdma->clk = devm_clk_get(dev, NULL);
708	if (IS_ERR(sdma->clk)) {
709		dev_err(dev, "failed to get a clock.\n");
710		return PTR_ERR(sdma->clk);
711	}
712
713	ret = of_address_to_resource(dn, 0, &res);
714	if (ret) {
715		dev_err(dev, "Error parsing memory region!\n");
716		goto irq_dispose;
717	}
718
719	regs_start = res.start;
720	regs_size = resource_size(&res);
721
722	sdma->base = devm_ioremap(dev, regs_start, regs_size);
723	if (!sdma->base) {
724		dev_err(dev, "Error mapping memory region!\n");
725		ret = -ENOMEM;
726		goto irq_dispose;
727	}
728
729	ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
730	if (ret) {
731		dev_err(dev, "Error requesting IRQ!\n");
732		ret = -EINVAL;
733		goto irq_dispose;
734	}
735
736	dma = &sdma->dma;
737	dma->dev = dev;
738	dma->chancnt = SIRFSOC_DMA_CHANNELS;
739
740	dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
741	dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
742	dma->device_issue_pending = sirfsoc_dma_issue_pending;
743	dma->device_control = sirfsoc_dma_control;
 
 
 
744	dma->device_tx_status = sirfsoc_dma_tx_status;
745	dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
746	dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
747	dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
 
 
748
749	INIT_LIST_HEAD(&dma->channels);
750	dma_cap_set(DMA_SLAVE, dma->cap_mask);
751	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
752	dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
753	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
754
755	for (i = 0; i < dma->chancnt; i++) {
756		schan = &sdma->channels[i];
757
758		schan->chan.device = dma;
759		dma_cookie_init(&schan->chan);
760
761		INIT_LIST_HEAD(&schan->free);
762		INIT_LIST_HEAD(&schan->prepared);
763		INIT_LIST_HEAD(&schan->queued);
764		INIT_LIST_HEAD(&schan->active);
765		INIT_LIST_HEAD(&schan->completed);
766
767		spin_lock_init(&schan->lock);
768		list_add_tail(&schan->chan.device_node, &dma->channels);
769	}
770
771	tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
772
773	/* Register DMA engine */
774	dev_set_drvdata(dev, sdma);
775
776	ret = dma_async_device_register(dma);
777	if (ret)
778		goto free_irq;
779
780	/* Device-tree DMA controller registration */
781	ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
782	if (ret) {
783		dev_err(dev, "failed to register DMA controller\n");
784		goto unreg_dma_dev;
785	}
786
787	pm_runtime_enable(&op->dev);
788	dev_info(dev, "initialized SIRFSOC DMAC driver\n");
789
790	return 0;
791
792unreg_dma_dev:
793	dma_async_device_unregister(dma);
794free_irq:
795	free_irq(sdma->irq, sdma);
796irq_dispose:
797	irq_dispose_mapping(sdma->irq);
798	return ret;
799}
800
801static int sirfsoc_dma_remove(struct platform_device *op)
802{
803	struct device *dev = &op->dev;
804	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
805
806	of_dma_controller_free(op->dev.of_node);
807	dma_async_device_unregister(&sdma->dma);
808	free_irq(sdma->irq, sdma);
 
809	irq_dispose_mapping(sdma->irq);
810	pm_runtime_disable(&op->dev);
811	if (!pm_runtime_status_suspended(&op->dev))
812		sirfsoc_dma_runtime_suspend(&op->dev);
813
814	return 0;
815}
816
817static int sirfsoc_dma_runtime_suspend(struct device *dev)
818{
819	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
820
821	clk_disable_unprepare(sdma->clk);
822	return 0;
823}
824
825static int sirfsoc_dma_runtime_resume(struct device *dev)
826{
827	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
828	int ret;
829
830	ret = clk_prepare_enable(sdma->clk);
831	if (ret < 0) {
832		dev_err(dev, "clk_enable failed: %d\n", ret);
833		return ret;
834	}
835	return 0;
836}
837
838static int sirfsoc_dma_pm_suspend(struct device *dev)
839{
840	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
841	struct sirfsoc_dma_regs *save = &sdma->regs_save;
842	struct sirfsoc_dma_desc *sdesc;
843	struct sirfsoc_dma_chan *schan;
844	int ch;
845	int ret;
 
 
846
847	/*
848	 * if we were runtime-suspended before, resume to enable clock
849	 * before accessing register
850	 */
851	if (pm_runtime_status_suspended(dev)) {
852		ret = sirfsoc_dma_runtime_resume(dev);
853		if (ret < 0)
854			return ret;
855	}
856
 
 
 
 
 
 
 
 
857	/*
858	 * DMA controller will lose all registers while suspending
859	 * so we need to save registers for active channels
860	 */
861	for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
862		schan = &sdma->channels[ch];
863		if (list_empty(&schan->active))
864			continue;
865		sdesc = list_first_entry(&schan->active,
866			struct sirfsoc_dma_desc,
867			node);
868		save->ctrl[ch] = readl_relaxed(sdma->base +
869			ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
870	}
871	save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN);
872
873	/* Disable clock */
874	sirfsoc_dma_runtime_suspend(dev);
875
876	return 0;
877}
878
879static int sirfsoc_dma_pm_resume(struct device *dev)
880{
881	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
882	struct sirfsoc_dma_regs *save = &sdma->regs_save;
883	struct sirfsoc_dma_desc *sdesc;
884	struct sirfsoc_dma_chan *schan;
885	int ch;
886	int ret;
 
 
 
887
888	/* Enable clock before accessing register */
889	ret = sirfsoc_dma_runtime_resume(dev);
890	if (ret < 0)
891		return ret;
892
893	writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN);
894	for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
 
 
 
 
 
 
 
 
 
 
895		schan = &sdma->channels[ch];
896		if (list_empty(&schan->active))
897			continue;
898		sdesc = list_first_entry(&schan->active,
899			struct sirfsoc_dma_desc,
900			node);
901		writel_relaxed(sdesc->width,
902			sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4);
903		writel_relaxed(sdesc->xlen,
904			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
905		writel_relaxed(sdesc->ylen,
906			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
907		writel_relaxed(save->ctrl[ch],
908			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
909		writel_relaxed(sdesc->addr >> 2,
910			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
 
 
 
 
 
 
911	}
912
913	/* if we were runtime-suspended before, suspend again */
914	if (pm_runtime_status_suspended(dev))
915		sirfsoc_dma_runtime_suspend(dev);
916
917	return 0;
918}
919
920static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
921	SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
922	SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
923};
924
925static struct of_device_id sirfsoc_dma_match[] = {
926	{ .compatible = "sirf,prima2-dmac", },
927	{ .compatible = "sirf,marco-dmac", },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
928	{},
929};
 
930
931static struct platform_driver sirfsoc_dma_driver = {
932	.probe		= sirfsoc_dma_probe,
933	.remove		= sirfsoc_dma_remove,
934	.driver = {
935		.name = DRV_NAME,
936		.owner = THIS_MODULE,
937		.pm = &sirfsoc_dma_pm_ops,
938		.of_match_table	= sirfsoc_dma_match,
939	},
940};
941
942static __init int sirfsoc_dma_init(void)
943{
944	return platform_driver_register(&sirfsoc_dma_driver);
945}
946
947static void __exit sirfsoc_dma_exit(void)
948{
949	platform_driver_unregister(&sirfsoc_dma_driver);
950}
951
952subsys_initcall(sirfsoc_dma_init);
953module_exit(sirfsoc_dma_exit);
954
955MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
956	"Barry Song <baohua.song@csr.com>");
957MODULE_DESCRIPTION("SIRFSOC DMA control driver");
958MODULE_LICENSE("GPL v2");
v4.17
   1/*
   2 * DMA controller driver for CSR SiRFprimaII
   3 *
   4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
   5 *
   6 * Licensed under GPLv2 or later.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/dmaengine.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/slab.h>
  16#include <linux/of_irq.h>
  17#include <linux/of_address.h>
  18#include <linux/of_device.h>
  19#include <linux/of_platform.h>
  20#include <linux/clk.h>
  21#include <linux/of_dma.h>
  22#include <linux/sirfsoc_dma.h>
  23
  24#include "dmaengine.h"
  25
  26#define SIRFSOC_DMA_VER_A7V1                    1
  27#define SIRFSOC_DMA_VER_A7V2                    2
  28#define SIRFSOC_DMA_VER_A6                      4
  29
  30#define SIRFSOC_DMA_DESCRIPTORS                 16
  31#define SIRFSOC_DMA_CHANNELS                    16
  32#define SIRFSOC_DMA_TABLE_NUM                   256
  33
  34#define SIRFSOC_DMA_CH_ADDR                     0x00
  35#define SIRFSOC_DMA_CH_XLEN                     0x04
  36#define SIRFSOC_DMA_CH_YLEN                     0x08
  37#define SIRFSOC_DMA_CH_CTRL                     0x0C
  38
  39#define SIRFSOC_DMA_WIDTH_0                     0x100
  40#define SIRFSOC_DMA_CH_VALID                    0x140
  41#define SIRFSOC_DMA_CH_INT                      0x144
  42#define SIRFSOC_DMA_INT_EN                      0x148
  43#define SIRFSOC_DMA_INT_EN_CLR                  0x14C
  44#define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
  45#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR            0x154
  46#define SIRFSOC_DMA_WIDTH_ATLAS7                0x10
  47#define SIRFSOC_DMA_VALID_ATLAS7                0x14
  48#define SIRFSOC_DMA_INT_ATLAS7                  0x18
  49#define SIRFSOC_DMA_INT_EN_ATLAS7               0x1c
  50#define SIRFSOC_DMA_LOOP_CTRL_ATLAS7            0x20
  51#define SIRFSOC_DMA_CUR_DATA_ADDR               0x34
  52#define SIRFSOC_DMA_MUL_ATLAS7                  0x38
  53#define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7         0x158
  54#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7     0x15C
  55#define SIRFSOC_DMA_IOBG_SCMD_EN		0x800
  56#define SIRFSOC_DMA_EARLY_RESP_SET		0x818
  57#define SIRFSOC_DMA_EARLY_RESP_CLR		0x81C
  58
  59#define SIRFSOC_DMA_MODE_CTRL_BIT               4
  60#define SIRFSOC_DMA_DIR_CTRL_BIT                5
  61#define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7        2
  62#define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7       3
  63#define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7         4
  64#define SIRFSOC_DMA_TAB_NUM_ATLAS7              7
  65#define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7        5
  66#define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7     25
  67#define SIRFSOC_DMA_CHAIN_ADDR_SHIFT            32
  68
  69#define SIRFSOC_DMA_INT_FINI_INT_ATLAS7         BIT(0)
  70#define SIRFSOC_DMA_INT_CNT_INT_ATLAS7          BIT(1)
  71#define SIRFSOC_DMA_INT_PAU_INT_ATLAS7          BIT(2)
  72#define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7         BIT(3)
  73#define SIRFSOC_DMA_INT_INV_INT_ATLAS7          BIT(4)
  74#define SIRFSOC_DMA_INT_END_INT_ATLAS7          BIT(5)
  75#define SIRFSOC_DMA_INT_ALL_ATLAS7              0x3F
  76
  77/* xlen and dma_width register is in 4 bytes boundary */
  78#define SIRFSOC_DMA_WORD_LEN			4
  79#define SIRFSOC_DMA_XLEN_MAX_V1         0x800
  80#define SIRFSOC_DMA_XLEN_MAX_V2         0x1000
  81
  82struct sirfsoc_dma_desc {
  83	struct dma_async_tx_descriptor	desc;
  84	struct list_head		node;
  85
  86	/* SiRFprimaII 2D-DMA parameters */
  87
  88	int             xlen;           /* DMA xlen */
  89	int             ylen;           /* DMA ylen */
  90	int             width;          /* DMA width */
  91	int             dir;
  92	bool            cyclic;         /* is loop DMA? */
  93	bool            chain;          /* is chain DMA? */
  94	u32             addr;		/* DMA buffer address */
  95	u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */
  96};
  97
  98struct sirfsoc_dma_chan {
  99	struct dma_chan			chan;
 100	struct list_head		free;
 101	struct list_head		prepared;
 102	struct list_head		queued;
 103	struct list_head		active;
 104	struct list_head		completed;
 105	unsigned long			happened_cyclic;
 106	unsigned long			completed_cyclic;
 107
 108	/* Lock for this structure */
 109	spinlock_t			lock;
 110
 111	int				mode;
 112};
 113
 114struct sirfsoc_dma_regs {
 115	u32				ctrl[SIRFSOC_DMA_CHANNELS];
 116	u32				interrupt_en;
 117};
 118
 119struct sirfsoc_dma {
 120	struct dma_device		dma;
 121	struct tasklet_struct		tasklet;
 122	struct sirfsoc_dma_chan		channels[SIRFSOC_DMA_CHANNELS];
 123	void __iomem			*base;
 124	int				irq;
 125	struct clk			*clk;
 126	int				type;
 127	void (*exec_desc)(struct sirfsoc_dma_desc *sdesc,
 128		int cid, int burst_mode, void __iomem *base);
 129	struct sirfsoc_dma_regs		regs_save;
 130};
 131
 132struct sirfsoc_dmadata {
 133	void (*exec)(struct sirfsoc_dma_desc *sdesc,
 134		int cid, int burst_mode, void __iomem *base);
 135	int type;
 136};
 137
 138enum sirfsoc_dma_chain_flag {
 139	SIRFSOC_DMA_CHAIN_NORMAL = 0x01,
 140	SIRFSOC_DMA_CHAIN_PAUSE = 0x02,
 141	SIRFSOC_DMA_CHAIN_LOOP = 0x03,
 142	SIRFSOC_DMA_CHAIN_END = 0x04
 143};
 144
 145#define DRV_NAME	"sirfsoc_dma"
 146
 147static int sirfsoc_dma_runtime_suspend(struct device *dev);
 148
 149/* Convert struct dma_chan to struct sirfsoc_dma_chan */
 150static inline
 151struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
 152{
 153	return container_of(c, struct sirfsoc_dma_chan, chan);
 154}
 155
 156/* Convert struct dma_chan to struct sirfsoc_dma */
 157static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
 158{
 159	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
 160	return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
 161}
 162
 163static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc,
 164		int cid, int burst_mode, void __iomem *base)
 165{
 166	if (sdesc->chain) {
 167		/* DMA v2 HW chain mode */
 168		writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
 169			       (sdesc->chain <<
 170				SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
 171			       (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3,
 172			       base + SIRFSOC_DMA_CH_CTRL);
 173	} else {
 174		/* DMA v2 legacy mode */
 175		writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN);
 176		writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN);
 177		writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7);
 178		writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)),
 179				base + SIRFSOC_DMA_MUL_ATLAS7);
 180		writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
 181			       (sdesc->chain <<
 182				SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
 183			       0x3, base + SIRFSOC_DMA_CH_CTRL);
 184	}
 185	writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 :
 186		       (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 |
 187			SIRFSOC_DMA_INT_LOOP_INT_ATLAS7),
 188		       base + SIRFSOC_DMA_INT_EN_ATLAS7);
 189	writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR);
 190	if (sdesc->cyclic)
 191		writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 192}
 193
 194static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc,
 195		int cid, int burst_mode, void __iomem *base)
 196{
 197	writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN);
 198	writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET);
 199	writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
 200	writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
 201		       (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
 202		       base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
 203	writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
 204	writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
 205	writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
 206		       (1 << cid), base + SIRFSOC_DMA_INT_EN);
 207	writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
 208	if (sdesc->cyclic) {
 209		writel((1 << cid) | 1 << (cid + 16) |
 210		       readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7),
 211		       base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
 212	}
 213
 214}
 215
 216static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc,
 217		int cid, int burst_mode, void __iomem *base)
 218{
 219	writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
 220	writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
 221		       (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
 222		       base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
 223	writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
 224	writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
 225	writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
 226		       (1 << cid), base + SIRFSOC_DMA_INT_EN);
 227	writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
 228	if (sdesc->cyclic) {
 229		writel((1 << cid) | 1 << (cid + 16) |
 230		       readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL),
 231		       base + SIRFSOC_DMA_CH_LOOP_CTRL);
 232	}
 233
 234}
 235
 236/* Execute all queued DMA descriptors */
 237static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
 238{
 239	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 240	int cid = schan->chan.chan_id;
 241	struct sirfsoc_dma_desc *sdesc = NULL;
 242	void __iomem *base;
 243
 244	/*
 245	 * lock has been held by functions calling this, so we don't hold
 246	 * lock again
 247	 */
 248	base = sdma->base;
 249	sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
 250				 node);
 251	/* Move the first queued descriptor to active list */
 252	list_move_tail(&sdesc->node, &schan->active);
 253
 254	if (sdma->type == SIRFSOC_DMA_VER_A7V2)
 255		cid = 0;
 
 
 
 
 
 
 
 
 
 
 256
 257	/* Start the DMA transfer */
 258	sdma->exec_desc(sdesc, cid, schan->mode, base);
 
 
 
 259
 260	if (sdesc->cyclic)
 
 
 
 261		schan->happened_cyclic = schan->completed_cyclic = 0;
 
 262}
 263
 264/* Interrupt handler */
 265static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
 266{
 267	struct sirfsoc_dma *sdma = data;
 268	struct sirfsoc_dma_chan *schan;
 269	struct sirfsoc_dma_desc *sdesc = NULL;
 270	u32 is;
 271	bool chain;
 272	int ch;
 273	void __iomem *reg;
 274
 275	switch (sdma->type) {
 276	case SIRFSOC_DMA_VER_A6:
 277	case SIRFSOC_DMA_VER_A7V1:
 278		is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
 279		reg = sdma->base + SIRFSOC_DMA_CH_INT;
 280		while ((ch = fls(is) - 1) >= 0) {
 281			is &= ~(1 << ch);
 282			writel_relaxed(1 << ch, reg);
 283			schan = &sdma->channels[ch];
 284			spin_lock(&schan->lock);
 285			sdesc = list_first_entry(&schan->active,
 286						 struct sirfsoc_dma_desc, node);
 287			if (!sdesc->cyclic) {
 288				/* Execute queued descriptors */
 289				list_splice_tail_init(&schan->active,
 290						      &schan->completed);
 291				dma_cookie_complete(&sdesc->desc);
 292				if (!list_empty(&schan->queued))
 293					sirfsoc_dma_execute(schan);
 294			} else
 295				schan->happened_cyclic++;
 296			spin_unlock(&schan->lock);
 297		}
 298		break;
 299
 300	case SIRFSOC_DMA_VER_A7V2:
 301		is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7);
 302
 303		reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7;
 304		writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg);
 305		schan = &sdma->channels[0];
 306		spin_lock(&schan->lock);
 307		sdesc = list_first_entry(&schan->active,
 308					 struct sirfsoc_dma_desc, node);
 309		if (!sdesc->cyclic) {
 310			chain = sdesc->chain;
 311			if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) ||
 312				(!chain &&
 313				(is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) {
 314				/* Execute queued descriptors */
 315				list_splice_tail_init(&schan->active,
 316						      &schan->completed);
 317				dma_cookie_complete(&sdesc->desc);
 318				if (!list_empty(&schan->queued))
 319					sirfsoc_dma_execute(schan);
 320			}
 321		} else if (sdesc->cyclic && (is &
 322					SIRFSOC_DMA_INT_LOOP_INT_ATLAS7))
 323			schan->happened_cyclic++;
 324
 325		spin_unlock(&schan->lock);
 326		break;
 327
 328	default:
 329		break;
 330	}
 331
 332	/* Schedule tasklet */
 333	tasklet_schedule(&sdma->tasklet);
 334
 335	return IRQ_HANDLED;
 336}
 337
 338/* process completed descriptors */
 339static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
 340{
 341	dma_cookie_t last_cookie = 0;
 342	struct sirfsoc_dma_chan *schan;
 343	struct sirfsoc_dma_desc *sdesc;
 344	struct dma_async_tx_descriptor *desc;
 345	unsigned long flags;
 346	unsigned long happened_cyclic;
 347	LIST_HEAD(list);
 348	int i;
 349
 350	for (i = 0; i < sdma->dma.chancnt; i++) {
 351		schan = &sdma->channels[i];
 352
 353		/* Get all completed descriptors */
 354		spin_lock_irqsave(&schan->lock, flags);
 355		if (!list_empty(&schan->completed)) {
 356			list_splice_tail_init(&schan->completed, &list);
 357			spin_unlock_irqrestore(&schan->lock, flags);
 358
 359			/* Execute callbacks and run dependencies */
 360			list_for_each_entry(sdesc, &list, node) {
 361				desc = &sdesc->desc;
 362
 363				dmaengine_desc_get_callback_invoke(desc, NULL);
 
 
 364				last_cookie = desc->cookie;
 365				dma_run_dependencies(desc);
 366			}
 367
 368			/* Free descriptors */
 369			spin_lock_irqsave(&schan->lock, flags);
 370			list_splice_tail_init(&list, &schan->free);
 371			schan->chan.completed_cookie = last_cookie;
 372			spin_unlock_irqrestore(&schan->lock, flags);
 373		} else {
 374			if (list_empty(&schan->active)) {
 
 
 
 
 
 375				spin_unlock_irqrestore(&schan->lock, flags);
 376				continue;
 377			}
 378
 379			/* for cyclic channel, desc is always in active list */
 380			sdesc = list_first_entry(&schan->active,
 381				struct sirfsoc_dma_desc, node);
 382
 383			/* cyclic DMA */
 384			happened_cyclic = schan->happened_cyclic;
 385			spin_unlock_irqrestore(&schan->lock, flags);
 386
 387			desc = &sdesc->desc;
 388			while (happened_cyclic != schan->completed_cyclic) {
 389				dmaengine_desc_get_callback_invoke(desc, NULL);
 
 390				schan->completed_cyclic++;
 391			}
 392		}
 393	}
 394}
 395
 396/* DMA Tasklet */
 397static void sirfsoc_dma_tasklet(unsigned long data)
 398{
 399	struct sirfsoc_dma *sdma = (void *)data;
 400
 401	sirfsoc_dma_process_completed(sdma);
 402}
 403
 404/* Submit descriptor to hardware */
 405static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
 406{
 407	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
 408	struct sirfsoc_dma_desc *sdesc;
 409	unsigned long flags;
 410	dma_cookie_t cookie;
 411
 412	sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
 413
 414	spin_lock_irqsave(&schan->lock, flags);
 415
 416	/* Move descriptor to queue */
 417	list_move_tail(&sdesc->node, &schan->queued);
 418
 419	cookie = dma_cookie_assign(txd);
 420
 421	spin_unlock_irqrestore(&schan->lock, flags);
 422
 423	return cookie;
 424}
 425
 426static int sirfsoc_dma_slave_config(struct dma_chan *chan,
 427				    struct dma_slave_config *config)
 428{
 429	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 430	unsigned long flags;
 431
 432	if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
 433		(config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
 434		return -EINVAL;
 435
 436	spin_lock_irqsave(&schan->lock, flags);
 437	schan->mode = (config->src_maxburst == 4 ? 1 : 0);
 438	spin_unlock_irqrestore(&schan->lock, flags);
 439
 440	return 0;
 441}
 442
 443static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
 444{
 445	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 446	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 447	int cid = schan->chan.chan_id;
 448	unsigned long flags;
 449
 450	spin_lock_irqsave(&schan->lock, flags);
 451
 452	switch (sdma->type) {
 453	case SIRFSOC_DMA_VER_A7V1:
 
 
 
 
 
 454		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
 455		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
 456		writel_relaxed((1 << cid) | 1 << (cid + 16),
 457			       sdma->base +
 458			       SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
 459		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
 460		break;
 461	case SIRFSOC_DMA_VER_A7V2:
 462		writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
 463		writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
 464			       sdma->base + SIRFSOC_DMA_INT_ATLAS7);
 465		writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 466		writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
 467		break;
 468	case SIRFSOC_DMA_VER_A6:
 469		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
 470			       ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
 471		writel_relaxed(readl_relaxed(sdma->base +
 472					     SIRFSOC_DMA_CH_LOOP_CTRL) &
 473			       ~((1 << cid) | 1 << (cid + 16)),
 474			       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
 475		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
 476		break;
 477	default:
 478		break;
 479	}
 480
 
 
 481	list_splice_tail_init(&schan->active, &schan->free);
 482	list_splice_tail_init(&schan->queued, &schan->free);
 483
 484	spin_unlock_irqrestore(&schan->lock, flags);
 485
 486	return 0;
 487}
 488
 489static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
 490{
 491	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 492	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 493	int cid = schan->chan.chan_id;
 494	unsigned long flags;
 495
 496	spin_lock_irqsave(&schan->lock, flags);
 497
 498	switch (sdma->type) {
 499	case SIRFSOC_DMA_VER_A7V1:
 
 
 
 500		writel_relaxed((1 << cid) | 1 << (cid + 16),
 501			       sdma->base +
 502			       SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
 503		break;
 504	case SIRFSOC_DMA_VER_A7V2:
 505		writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 506		break;
 507	case SIRFSOC_DMA_VER_A6:
 508		writel_relaxed(readl_relaxed(sdma->base +
 509					     SIRFSOC_DMA_CH_LOOP_CTRL) &
 510			       ~((1 << cid) | 1 << (cid + 16)),
 511			       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
 512		break;
 513
 514	default:
 515		break;
 516	}
 517
 518	spin_unlock_irqrestore(&schan->lock, flags);
 519
 520	return 0;
 521}
 522
 523static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
 524{
 525	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 526	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 527	int cid = schan->chan.chan_id;
 528	unsigned long flags;
 529
 530	spin_lock_irqsave(&schan->lock, flags);
 531	switch (sdma->type) {
 532	case SIRFSOC_DMA_VER_A7V1:
 
 
 
 
 533		writel_relaxed((1 << cid) | 1 << (cid + 16),
 534			       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
 535		break;
 536	case SIRFSOC_DMA_VER_A7V2:
 537		writel_relaxed(0x10001,
 538			       sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 539		break;
 540	case SIRFSOC_DMA_VER_A6:
 541		writel_relaxed(readl_relaxed(sdma->base +
 542					     SIRFSOC_DMA_CH_LOOP_CTRL) |
 543			       ((1 << cid) | 1 << (cid + 16)),
 544			       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
 545		break;
 
 
 
 
 
 
 
 
 
 
 
 546
 547	default:
 548		break;
 549	}
 550
 551	spin_unlock_irqrestore(&schan->lock, flags);
 552
 553	return 0;
 554}
 555
 556/* Alloc channel resources */
 557static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
 558{
 559	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
 560	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 561	struct sirfsoc_dma_desc *sdesc;
 562	unsigned long flags;
 563	LIST_HEAD(descs);
 564	int i;
 565
 566	pm_runtime_get_sync(sdma->dma.dev);
 567
 568	/* Alloc descriptors for this channel */
 569	for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
 570		sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
 571		if (!sdesc) {
 572			dev_notice(sdma->dma.dev, "Memory allocation error. "
 573				"Allocated only %u descriptors\n", i);
 574			break;
 575		}
 576
 577		dma_async_tx_descriptor_init(&sdesc->desc, chan);
 578		sdesc->desc.flags = DMA_CTRL_ACK;
 579		sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
 580
 581		list_add_tail(&sdesc->node, &descs);
 582	}
 583
 584	/* Return error only if no descriptors were allocated */
 585	if (i == 0)
 586		return -ENOMEM;
 587
 588	spin_lock_irqsave(&schan->lock, flags);
 589
 590	list_splice_tail_init(&descs, &schan->free);
 591	spin_unlock_irqrestore(&schan->lock, flags);
 592
 593	return i;
 594}
 595
 596/* Free channel resources */
 597static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
 598{
 599	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 600	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
 601	struct sirfsoc_dma_desc *sdesc, *tmp;
 602	unsigned long flags;
 603	LIST_HEAD(descs);
 604
 605	spin_lock_irqsave(&schan->lock, flags);
 606
 607	/* Channel must be idle */
 608	BUG_ON(!list_empty(&schan->prepared));
 609	BUG_ON(!list_empty(&schan->queued));
 610	BUG_ON(!list_empty(&schan->active));
 611	BUG_ON(!list_empty(&schan->completed));
 612
 613	/* Move data */
 614	list_splice_tail_init(&schan->free, &descs);
 615
 616	spin_unlock_irqrestore(&schan->lock, flags);
 617
 618	/* Free descriptors */
 619	list_for_each_entry_safe(sdesc, tmp, &descs, node)
 620		kfree(sdesc);
 621
 622	pm_runtime_put(sdma->dma.dev);
 623}
 624
 625/* Send pending descriptor to hardware */
 626static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
 627{
 628	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 629	unsigned long flags;
 630
 631	spin_lock_irqsave(&schan->lock, flags);
 632
 633	if (list_empty(&schan->active) && !list_empty(&schan->queued))
 634		sirfsoc_dma_execute(schan);
 635
 636	spin_unlock_irqrestore(&schan->lock, flags);
 637}
 638
 639/* Check request completion status */
 640static enum dma_status
 641sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 642	struct dma_tx_state *txstate)
 643{
 644	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
 645	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 646	unsigned long flags;
 647	enum dma_status ret;
 648	struct sirfsoc_dma_desc *sdesc;
 649	int cid = schan->chan.chan_id;
 650	unsigned long dma_pos;
 651	unsigned long dma_request_bytes;
 652	unsigned long residue;
 653
 654	spin_lock_irqsave(&schan->lock, flags);
 655
 656	if (list_empty(&schan->active)) {
 657		ret = dma_cookie_status(chan, cookie, txstate);
 658		dma_set_residue(txstate, 0);
 659		spin_unlock_irqrestore(&schan->lock, flags);
 660		return ret;
 661	}
 662	sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node);
 663	if (sdesc->cyclic)
 664		dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
 665			(sdesc->width * SIRFSOC_DMA_WORD_LEN);
 666	else
 667		dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN;
 668
 669	ret = dma_cookie_status(chan, cookie, txstate);
 670
 671	if (sdma->type == SIRFSOC_DMA_VER_A7V2)
 672		cid = 0;
 673
 674	if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
 675		dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR);
 676	} else {
 677		dma_pos = readl_relaxed(
 678			sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2;
 679	}
 680
 681	residue = dma_request_bytes - (dma_pos - sdesc->addr);
 682	dma_set_residue(txstate, residue);
 683
 684	spin_unlock_irqrestore(&schan->lock, flags);
 685
 686	return ret;
 687}
 688
 689static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
 690	struct dma_chan *chan, struct dma_interleaved_template *xt,
 691	unsigned long flags)
 692{
 693	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
 694	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 695	struct sirfsoc_dma_desc *sdesc = NULL;
 696	unsigned long iflags;
 697	int ret;
 698
 699	if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
 700		ret = -EINVAL;
 701		goto err_dir;
 702	}
 703
 704	/* Get free descriptor */
 705	spin_lock_irqsave(&schan->lock, iflags);
 706	if (!list_empty(&schan->free)) {
 707		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
 708			node);
 709		list_del(&sdesc->node);
 710	}
 711	spin_unlock_irqrestore(&schan->lock, iflags);
 712
 713	if (!sdesc) {
 714		/* try to free completed descriptors */
 715		sirfsoc_dma_process_completed(sdma);
 716		ret = 0;
 717		goto no_desc;
 718	}
 719
 720	/* Place descriptor in prepared list */
 721	spin_lock_irqsave(&schan->lock, iflags);
 722
 723	/*
 724	 * Number of chunks in a frame can only be 1 for prima2
 725	 * and ylen (number of frame - 1) must be at least 0
 726	 */
 727	if ((xt->frame_size == 1) && (xt->numf > 0)) {
 728		sdesc->cyclic = 0;
 729		sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
 730		sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
 731				SIRFSOC_DMA_WORD_LEN;
 732		sdesc->ylen = xt->numf - 1;
 733		if (xt->dir == DMA_MEM_TO_DEV) {
 734			sdesc->addr = xt->src_start;
 735			sdesc->dir = 1;
 736		} else {
 737			sdesc->addr = xt->dst_start;
 738			sdesc->dir = 0;
 739		}
 740
 741		list_add_tail(&sdesc->node, &schan->prepared);
 742	} else {
 743		pr_err("sirfsoc DMA Invalid xfer\n");
 744		ret = -EINVAL;
 745		goto err_xfer;
 746	}
 747	spin_unlock_irqrestore(&schan->lock, iflags);
 748
 749	return &sdesc->desc;
 750err_xfer:
 751	spin_unlock_irqrestore(&schan->lock, iflags);
 752no_desc:
 753err_dir:
 754	return ERR_PTR(ret);
 755}
 756
 757static struct dma_async_tx_descriptor *
 758sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
 759	size_t buf_len, size_t period_len,
 760	enum dma_transfer_direction direction, unsigned long flags)
 761{
 762	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 763	struct sirfsoc_dma_desc *sdesc = NULL;
 764	unsigned long iflags;
 765
 766	/*
 767	 * we only support cycle transfer with 2 period
 768	 * If the X-length is set to 0, it would be the loop mode.
 769	 * The DMA address keeps increasing until reaching the end of a loop
 770	 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
 771	 * the DMA address goes back to the beginning of this area.
 772	 * In loop mode, the DMA data region is divided into two parts, BUFA
 773	 * and BUFB. DMA controller generates interrupts twice in each loop:
 774	 * when the DMA address reaches the end of BUFA or the end of the
 775	 * BUFB
 776	 */
 777	if (buf_len !=  2 * period_len)
 778		return ERR_PTR(-EINVAL);
 779
 780	/* Get free descriptor */
 781	spin_lock_irqsave(&schan->lock, iflags);
 782	if (!list_empty(&schan->free)) {
 783		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
 784			node);
 785		list_del(&sdesc->node);
 786	}
 787	spin_unlock_irqrestore(&schan->lock, iflags);
 788
 789	if (!sdesc)
 790		return NULL;
 791
 792	/* Place descriptor in prepared list */
 793	spin_lock_irqsave(&schan->lock, iflags);
 794	sdesc->addr = addr;
 795	sdesc->cyclic = 1;
 796	sdesc->xlen = 0;
 797	sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
 798	sdesc->width = 1;
 799	list_add_tail(&sdesc->node, &schan->prepared);
 800	spin_unlock_irqrestore(&schan->lock, iflags);
 801
 802	return &sdesc->desc;
 803}
 804
 805/*
 806 * The DMA controller consists of 16 independent DMA channels.
 807 * Each channel is allocated to a different function
 808 */
 809bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
 810{
 811	unsigned int ch_nr = (unsigned int) chan_id;
 812
 813	if (ch_nr == chan->chan_id +
 814		chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
 815		return true;
 816
 817	return false;
 818}
 819EXPORT_SYMBOL(sirfsoc_dma_filter_id);
 820
 821#define SIRFSOC_DMA_BUSWIDTHS \
 822	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
 823	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
 824	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 825	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
 826	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
 827
 
 
 
 
 
 
 
 
 
 
 
 
 828static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
 829	struct of_dma *ofdma)
 830{
 831	struct sirfsoc_dma *sdma = ofdma->of_dma_data;
 832	unsigned int request = dma_spec->args[0];
 833
 834	if (request >= SIRFSOC_DMA_CHANNELS)
 835		return NULL;
 836
 837	return dma_get_slave_channel(&sdma->channels[request].chan);
 838}
 839
 840static int sirfsoc_dma_probe(struct platform_device *op)
 841{
 842	struct device_node *dn = op->dev.of_node;
 843	struct device *dev = &op->dev;
 844	struct dma_device *dma;
 845	struct sirfsoc_dma *sdma;
 846	struct sirfsoc_dma_chan *schan;
 847	struct sirfsoc_dmadata *data;
 848	struct resource res;
 849	ulong regs_start, regs_size;
 850	u32 id;
 851	int ret, i;
 852
 853	sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
 854	if (!sdma)
 
 855		return -ENOMEM;
 
 856
 857	data = (struct sirfsoc_dmadata *)
 858		(of_match_device(op->dev.driver->of_match_table,
 859				 &op->dev)->data);
 860	sdma->exec_desc = data->exec;
 861	sdma->type = data->type;
 862
 863	if (of_property_read_u32(dn, "cell-index", &id)) {
 864		dev_err(dev, "Fail to get DMAC index\n");
 865		return -ENODEV;
 866	}
 867
 868	sdma->irq = irq_of_parse_and_map(dn, 0);
 869	if (!sdma->irq) {
 870		dev_err(dev, "Error mapping IRQ!\n");
 871		return -EINVAL;
 872	}
 873
 874	sdma->clk = devm_clk_get(dev, NULL);
 875	if (IS_ERR(sdma->clk)) {
 876		dev_err(dev, "failed to get a clock.\n");
 877		return PTR_ERR(sdma->clk);
 878	}
 879
 880	ret = of_address_to_resource(dn, 0, &res);
 881	if (ret) {
 882		dev_err(dev, "Error parsing memory region!\n");
 883		goto irq_dispose;
 884	}
 885
 886	regs_start = res.start;
 887	regs_size = resource_size(&res);
 888
 889	sdma->base = devm_ioremap(dev, regs_start, regs_size);
 890	if (!sdma->base) {
 891		dev_err(dev, "Error mapping memory region!\n");
 892		ret = -ENOMEM;
 893		goto irq_dispose;
 894	}
 895
 896	ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
 897	if (ret) {
 898		dev_err(dev, "Error requesting IRQ!\n");
 899		ret = -EINVAL;
 900		goto irq_dispose;
 901	}
 902
 903	dma = &sdma->dma;
 904	dma->dev = dev;
 
 905
 906	dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
 907	dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
 908	dma->device_issue_pending = sirfsoc_dma_issue_pending;
 909	dma->device_config = sirfsoc_dma_slave_config;
 910	dma->device_pause = sirfsoc_dma_pause_chan;
 911	dma->device_resume = sirfsoc_dma_resume_chan;
 912	dma->device_terminate_all = sirfsoc_dma_terminate_all;
 913	dma->device_tx_status = sirfsoc_dma_tx_status;
 914	dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
 915	dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
 916	dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
 917	dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
 918	dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 919
 920	INIT_LIST_HEAD(&dma->channels);
 921	dma_cap_set(DMA_SLAVE, dma->cap_mask);
 922	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
 923	dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
 924	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
 925
 926	for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
 927		schan = &sdma->channels[i];
 928
 929		schan->chan.device = dma;
 930		dma_cookie_init(&schan->chan);
 931
 932		INIT_LIST_HEAD(&schan->free);
 933		INIT_LIST_HEAD(&schan->prepared);
 934		INIT_LIST_HEAD(&schan->queued);
 935		INIT_LIST_HEAD(&schan->active);
 936		INIT_LIST_HEAD(&schan->completed);
 937
 938		spin_lock_init(&schan->lock);
 939		list_add_tail(&schan->chan.device_node, &dma->channels);
 940	}
 941
 942	tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
 943
 944	/* Register DMA engine */
 945	dev_set_drvdata(dev, sdma);
 946
 947	ret = dma_async_device_register(dma);
 948	if (ret)
 949		goto free_irq;
 950
 951	/* Device-tree DMA controller registration */
 952	ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
 953	if (ret) {
 954		dev_err(dev, "failed to register DMA controller\n");
 955		goto unreg_dma_dev;
 956	}
 957
 958	pm_runtime_enable(&op->dev);
 959	dev_info(dev, "initialized SIRFSOC DMAC driver\n");
 960
 961	return 0;
 962
 963unreg_dma_dev:
 964	dma_async_device_unregister(dma);
 965free_irq:
 966	free_irq(sdma->irq, sdma);
 967irq_dispose:
 968	irq_dispose_mapping(sdma->irq);
 969	return ret;
 970}
 971
 972static int sirfsoc_dma_remove(struct platform_device *op)
 973{
 974	struct device *dev = &op->dev;
 975	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 976
 977	of_dma_controller_free(op->dev.of_node);
 978	dma_async_device_unregister(&sdma->dma);
 979	free_irq(sdma->irq, sdma);
 980	tasklet_kill(&sdma->tasklet);
 981	irq_dispose_mapping(sdma->irq);
 982	pm_runtime_disable(&op->dev);
 983	if (!pm_runtime_status_suspended(&op->dev))
 984		sirfsoc_dma_runtime_suspend(&op->dev);
 985
 986	return 0;
 987}
 988
 989static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev)
 990{
 991	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 992
 993	clk_disable_unprepare(sdma->clk);
 994	return 0;
 995}
 996
 997static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev)
 998{
 999	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
1000	int ret;
1001
1002	ret = clk_prepare_enable(sdma->clk);
1003	if (ret < 0) {
1004		dev_err(dev, "clk_enable failed: %d\n", ret);
1005		return ret;
1006	}
1007	return 0;
1008}
1009
1010static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
1011{
1012	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
1013	struct sirfsoc_dma_regs *save = &sdma->regs_save;
 
1014	struct sirfsoc_dma_chan *schan;
1015	int ch;
1016	int ret;
1017	int count;
1018	u32 int_offset;
1019
1020	/*
1021	 * if we were runtime-suspended before, resume to enable clock
1022	 * before accessing register
1023	 */
1024	if (pm_runtime_status_suspended(dev)) {
1025		ret = sirfsoc_dma_runtime_resume(dev);
1026		if (ret < 0)
1027			return ret;
1028	}
1029
1030	if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
1031		count = 1;
1032		int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
1033	} else {
1034		count = SIRFSOC_DMA_CHANNELS;
1035		int_offset = SIRFSOC_DMA_INT_EN;
1036	}
1037
1038	/*
1039	 * DMA controller will lose all registers while suspending
1040	 * so we need to save registers for active channels
1041	 */
1042	for (ch = 0; ch < count; ch++) {
1043		schan = &sdma->channels[ch];
1044		if (list_empty(&schan->active))
1045			continue;
 
 
 
1046		save->ctrl[ch] = readl_relaxed(sdma->base +
1047			ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
1048	}
1049	save->interrupt_en = readl_relaxed(sdma->base + int_offset);
1050
1051	/* Disable clock */
1052	sirfsoc_dma_runtime_suspend(dev);
1053
1054	return 0;
1055}
1056
1057static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev)
1058{
1059	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
1060	struct sirfsoc_dma_regs *save = &sdma->regs_save;
1061	struct sirfsoc_dma_desc *sdesc;
1062	struct sirfsoc_dma_chan *schan;
1063	int ch;
1064	int ret;
1065	int count;
1066	u32 int_offset;
1067	u32 width_offset;
1068
1069	/* Enable clock before accessing register */
1070	ret = sirfsoc_dma_runtime_resume(dev);
1071	if (ret < 0)
1072		return ret;
1073
1074	if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
1075		count = 1;
1076		int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
1077		width_offset = SIRFSOC_DMA_WIDTH_ATLAS7;
1078	} else {
1079		count = SIRFSOC_DMA_CHANNELS;
1080		int_offset = SIRFSOC_DMA_INT_EN;
1081		width_offset = SIRFSOC_DMA_WIDTH_0;
1082	}
1083
1084	writel_relaxed(save->interrupt_en, sdma->base + int_offset);
1085	for (ch = 0; ch < count; ch++) {
1086		schan = &sdma->channels[ch];
1087		if (list_empty(&schan->active))
1088			continue;
1089		sdesc = list_first_entry(&schan->active,
1090			struct sirfsoc_dma_desc,
1091			node);
1092		writel_relaxed(sdesc->width,
1093			sdma->base + width_offset + ch * 4);
1094		writel_relaxed(sdesc->xlen,
1095			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
1096		writel_relaxed(sdesc->ylen,
1097			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
1098		writel_relaxed(save->ctrl[ch],
1099			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
1100		if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
1101			writel_relaxed(sdesc->addr,
1102				sdma->base + SIRFSOC_DMA_CH_ADDR);
1103		} else {
1104			writel_relaxed(sdesc->addr >> 2,
1105				sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
1106
1107		}
1108	}
1109
1110	/* if we were runtime-suspended before, suspend again */
1111	if (pm_runtime_status_suspended(dev))
1112		sirfsoc_dma_runtime_suspend(dev);
1113
1114	return 0;
1115}
1116
1117static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
1118	SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
1119	SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
1120};
1121
1122static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
1123	.exec = sirfsoc_dma_execute_hw_a6,
1124	.type = SIRFSOC_DMA_VER_A6,
1125};
1126
1127static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
1128	.exec = sirfsoc_dma_execute_hw_a7v1,
1129	.type = SIRFSOC_DMA_VER_A7V1,
1130};
1131
1132static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
1133	.exec = sirfsoc_dma_execute_hw_a7v2,
1134	.type = SIRFSOC_DMA_VER_A7V2,
1135};
1136
1137static const struct of_device_id sirfsoc_dma_match[] = {
1138	{ .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,},
1139	{ .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,},
1140	{ .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,},
1141	{},
1142};
1143MODULE_DEVICE_TABLE(of, sirfsoc_dma_match);
1144
1145static struct platform_driver sirfsoc_dma_driver = {
1146	.probe		= sirfsoc_dma_probe,
1147	.remove		= sirfsoc_dma_remove,
1148	.driver = {
1149		.name = DRV_NAME,
 
1150		.pm = &sirfsoc_dma_pm_ops,
1151		.of_match_table	= sirfsoc_dma_match,
1152	},
1153};
1154
1155static __init int sirfsoc_dma_init(void)
1156{
1157	return platform_driver_register(&sirfsoc_dma_driver);
1158}
1159
1160static void __exit sirfsoc_dma_exit(void)
1161{
1162	platform_driver_unregister(&sirfsoc_dma_driver);
1163}
1164
1165subsys_initcall(sirfsoc_dma_init);
1166module_exit(sirfsoc_dma_exit);
1167
1168MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>");
1169MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
1170MODULE_DESCRIPTION("SIRFSOC DMA control driver");
1171MODULE_LICENSE("GPL v2");