Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// drivers/dma/imx-dma.c
   4//
   5// This file contains a driver for the Freescale i.MX DMA engine
   6// found on i.MX1/21/27
   7//
   8// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   9// Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
  10
  11#include <linux/err.h>
  12#include <linux/init.h>
  13#include <linux/types.h>
  14#include <linux/mm.h>
  15#include <linux/interrupt.h>
  16#include <linux/spinlock.h>
  17#include <linux/device.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/slab.h>
  20#include <linux/platform_device.h>
  21#include <linux/clk.h>
  22#include <linux/dmaengine.h>
  23#include <linux/module.h>
  24#include <linux/of.h>
  25#include <linux/of_dma.h>
  26
  27#include <asm/irq.h>
  28#include <linux/dma/imx-dma.h>
  29
  30#include "dmaengine.h"
  31#define IMXDMA_MAX_CHAN_DESCRIPTORS	16
  32#define IMX_DMA_CHANNELS  16
  33
  34#define IMX_DMA_2D_SLOTS	2
  35#define IMX_DMA_2D_SLOT_A	0
  36#define IMX_DMA_2D_SLOT_B	1
  37
  38#define IMX_DMA_LENGTH_LOOP	((unsigned int)-1)
  39#define IMX_DMA_MEMSIZE_32	(0 << 4)
  40#define IMX_DMA_MEMSIZE_8	(1 << 4)
  41#define IMX_DMA_MEMSIZE_16	(2 << 4)
  42#define IMX_DMA_TYPE_LINEAR	(0 << 10)
  43#define IMX_DMA_TYPE_2D		(1 << 10)
  44#define IMX_DMA_TYPE_FIFO	(2 << 10)
  45
  46#define IMX_DMA_ERR_BURST     (1 << 0)
  47#define IMX_DMA_ERR_REQUEST   (1 << 1)
  48#define IMX_DMA_ERR_TRANSFER  (1 << 2)
  49#define IMX_DMA_ERR_BUFFER    (1 << 3)
  50#define IMX_DMA_ERR_TIMEOUT   (1 << 4)
  51
  52#define DMA_DCR     0x00		/* Control Register */
  53#define DMA_DISR    0x04		/* Interrupt status Register */
  54#define DMA_DIMR    0x08		/* Interrupt mask Register */
  55#define DMA_DBTOSR  0x0c		/* Burst timeout status Register */
  56#define DMA_DRTOSR  0x10		/* Request timeout Register */
  57#define DMA_DSESR   0x14		/* Transfer Error Status Register */
  58#define DMA_DBOSR   0x18		/* Buffer overflow status Register */
  59#define DMA_DBTOCR  0x1c		/* Burst timeout control Register */
  60#define DMA_WSRA    0x40		/* W-Size Register A */
  61#define DMA_XSRA    0x44		/* X-Size Register A */
  62#define DMA_YSRA    0x48		/* Y-Size Register A */
  63#define DMA_WSRB    0x4c		/* W-Size Register B */
  64#define DMA_XSRB    0x50		/* X-Size Register B */
  65#define DMA_YSRB    0x54		/* Y-Size Register B */
  66#define DMA_SAR(x)  (0x80 + ((x) << 6))	/* Source Address Registers */
  67#define DMA_DAR(x)  (0x84 + ((x) << 6))	/* Destination Address Registers */
  68#define DMA_CNTR(x) (0x88 + ((x) << 6))	/* Count Registers */
  69#define DMA_CCR(x)  (0x8c + ((x) << 6))	/* Control Registers */
  70#define DMA_RSSR(x) (0x90 + ((x) << 6))	/* Request source select Registers */
  71#define DMA_BLR(x)  (0x94 + ((x) << 6))	/* Burst length Registers */
  72#define DMA_RTOR(x) (0x98 + ((x) << 6))	/* Request timeout Registers */
  73#define DMA_BUCR(x) (0x98 + ((x) << 6))	/* Bus Utilization Registers */
  74#define DMA_CCNR(x) (0x9C + ((x) << 6))	/* Channel counter Registers */
  75
  76#define DCR_DRST           (1<<1)
  77#define DCR_DEN            (1<<0)
  78#define DBTOCR_EN          (1<<15)
  79#define DBTOCR_CNT(x)      ((x) & 0x7fff)
  80#define CNTR_CNT(x)        ((x) & 0xffffff)
  81#define CCR_ACRPT          (1<<14)
  82#define CCR_DMOD_LINEAR    (0x0 << 12)
  83#define CCR_DMOD_2D        (0x1 << 12)
  84#define CCR_DMOD_FIFO      (0x2 << 12)
  85#define CCR_DMOD_EOBFIFO   (0x3 << 12)
  86#define CCR_SMOD_LINEAR    (0x0 << 10)
  87#define CCR_SMOD_2D        (0x1 << 10)
  88#define CCR_SMOD_FIFO      (0x2 << 10)
  89#define CCR_SMOD_EOBFIFO   (0x3 << 10)
  90#define CCR_MDIR_DEC       (1<<9)
  91#define CCR_MSEL_B         (1<<8)
  92#define CCR_DSIZ_32        (0x0 << 6)
  93#define CCR_DSIZ_8         (0x1 << 6)
  94#define CCR_DSIZ_16        (0x2 << 6)
  95#define CCR_SSIZ_32        (0x0 << 4)
  96#define CCR_SSIZ_8         (0x1 << 4)
  97#define CCR_SSIZ_16        (0x2 << 4)
  98#define CCR_REN            (1<<3)
  99#define CCR_RPT            (1<<2)
 100#define CCR_FRC            (1<<1)
 101#define CCR_CEN            (1<<0)
 102#define RTOR_EN            (1<<15)
 103#define RTOR_CLK           (1<<14)
 104#define RTOR_PSC           (1<<13)
 105
 106enum  imxdma_prep_type {
 107	IMXDMA_DESC_MEMCPY,
 108	IMXDMA_DESC_INTERLEAVED,
 109	IMXDMA_DESC_SLAVE_SG,
 110	IMXDMA_DESC_CYCLIC,
 111};
 112
 113struct imx_dma_2d_config {
 114	u16		xsr;
 115	u16		ysr;
 116	u16		wsr;
 117	int		count;
 118};
 119
 120struct imxdma_desc {
 121	struct list_head		node;
 122	struct dma_async_tx_descriptor	desc;
 123	enum dma_status			status;
 124	dma_addr_t			src;
 125	dma_addr_t			dest;
 126	size_t				len;
 127	enum dma_transfer_direction	direction;
 128	enum imxdma_prep_type		type;
 129	/* For memcpy and interleaved */
 130	unsigned int			config_port;
 131	unsigned int			config_mem;
 132	/* For interleaved transfers */
 133	unsigned int			x;
 134	unsigned int			y;
 135	unsigned int			w;
 136	/* For slave sg and cyclic */
 137	struct scatterlist		*sg;
 138	unsigned int			sgcount;
 139};
 140
 141struct imxdma_channel {
 142	int				hw_chaining;
 143	struct timer_list		watchdog;
 144	struct imxdma_engine		*imxdma;
 145	unsigned int			channel;
 146
 147	struct tasklet_struct		dma_tasklet;
 148	struct list_head		ld_free;
 149	struct list_head		ld_queue;
 150	struct list_head		ld_active;
 151	int				descs_allocated;
 152	enum dma_slave_buswidth		word_size;
 153	dma_addr_t			per_address;
 154	u32				watermark_level;
 155	struct dma_chan			chan;
 156	struct dma_async_tx_descriptor	desc;
 157	enum dma_status			status;
 158	int				dma_request;
 159	struct scatterlist		*sg_list;
 160	u32				ccr_from_device;
 161	u32				ccr_to_device;
 162	bool				enabled_2d;
 163	int				slot_2d;
 164	unsigned int			irq;
 165	struct dma_slave_config		config;
 166};
 167
 168enum imx_dma_type {
 169	IMX1_DMA,
 170	IMX21_DMA,
 171	IMX27_DMA,
 172};
 173
 174struct imxdma_engine {
 175	struct device			*dev;
 
 176	struct dma_device		dma_device;
 177	void __iomem			*base;
 178	struct clk			*dma_ahb;
 179	struct clk			*dma_ipg;
 180	spinlock_t			lock;
 181	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
 182	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
 183	enum imx_dma_type		devtype;
 184	unsigned int			irq;
 185	unsigned int			irq_err;
 186
 187};
 188
 189struct imxdma_filter_data {
 190	struct imxdma_engine	*imxdma;
 191	int			 request;
 192};
 193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194static const struct of_device_id imx_dma_of_dev_id[] = {
 195	{
 196		.compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA,
 
 197	}, {
 198		.compatible = "fsl,imx21-dma", .data = (const void *)IMX21_DMA,
 
 199	}, {
 200		.compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA,
 
 201	}, {
 202		/* sentinel */
 203	}
 204};
 205MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
 206
 207static inline int is_imx1_dma(struct imxdma_engine *imxdma)
 208{
 209	return imxdma->devtype == IMX1_DMA;
 210}
 211
 212static inline int is_imx27_dma(struct imxdma_engine *imxdma)
 213{
 214	return imxdma->devtype == IMX27_DMA;
 215}
 216
 217static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
 218{
 219	return container_of(chan, struct imxdma_channel, chan);
 220}
 221
 222static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
 223{
 224	struct imxdma_desc *desc;
 225
 226	if (!list_empty(&imxdmac->ld_active)) {
 227		desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
 228					node);
 229		if (desc->type == IMXDMA_DESC_CYCLIC)
 230			return true;
 231	}
 232	return false;
 233}
 234
 235
 236
 237static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
 238			     unsigned offset)
 239{
 240	__raw_writel(val, imxdma->base + offset);
 241}
 242
 243static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
 244{
 245	return __raw_readl(imxdma->base + offset);
 246}
 247
 248static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
 249{
 250	struct imxdma_engine *imxdma = imxdmac->imxdma;
 251
 252	if (is_imx27_dma(imxdma))
 253		return imxdmac->hw_chaining;
 254	else
 255		return 0;
 256}
 257
 258/*
 259 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
 260 */
 261static inline void imxdma_sg_next(struct imxdma_desc *d)
 262{
 263	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
 264	struct imxdma_engine *imxdma = imxdmac->imxdma;
 265	struct scatterlist *sg = d->sg;
 266	size_t now;
 267
 268	now = min_t(size_t, d->len, sg_dma_len(sg));
 269	if (d->len != IMX_DMA_LENGTH_LOOP)
 270		d->len -= now;
 271
 272	if (d->direction == DMA_DEV_TO_MEM)
 273		imx_dmav1_writel(imxdma, sg->dma_address,
 274				 DMA_DAR(imxdmac->channel));
 275	else
 276		imx_dmav1_writel(imxdma, sg->dma_address,
 277				 DMA_SAR(imxdmac->channel));
 278
 279	imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
 280
 281	dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
 282		"size 0x%08x\n", __func__, imxdmac->channel,
 283		 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
 284		 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
 285		 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
 286}
 287
 288static void imxdma_enable_hw(struct imxdma_desc *d)
 289{
 290	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
 291	struct imxdma_engine *imxdma = imxdmac->imxdma;
 292	int channel = imxdmac->channel;
 293	unsigned long flags;
 294
 295	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
 296
 297	local_irq_save(flags);
 298
 299	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
 300	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
 301			 ~(1 << channel), DMA_DIMR);
 302	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
 303			 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
 304
 305	if (!is_imx1_dma(imxdma) &&
 306			d->sg && imxdma_hw_chain(imxdmac)) {
 307		d->sg = sg_next(d->sg);
 308		if (d->sg) {
 309			u32 tmp;
 310			imxdma_sg_next(d);
 311			tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
 312			imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
 313					 DMA_CCR(channel));
 314		}
 315	}
 316
 317	local_irq_restore(flags);
 318}
 319
 320static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
 321{
 322	struct imxdma_engine *imxdma = imxdmac->imxdma;
 323	int channel = imxdmac->channel;
 324	unsigned long flags;
 325
 326	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
 327
 328	if (imxdma_hw_chain(imxdmac))
 329		del_timer(&imxdmac->watchdog);
 330
 331	local_irq_save(flags);
 332	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
 333			 (1 << channel), DMA_DIMR);
 334	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
 335			 ~CCR_CEN, DMA_CCR(channel));
 336	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
 337	local_irq_restore(flags);
 338}
 339
 340static void imxdma_watchdog(struct timer_list *t)
 341{
 342	struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
 343	struct imxdma_engine *imxdma = imxdmac->imxdma;
 344	int channel = imxdmac->channel;
 345
 346	imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
 347
 348	/* Tasklet watchdog error handler */
 349	tasklet_schedule(&imxdmac->dma_tasklet);
 350	dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
 351		imxdmac->channel);
 352}
 353
 354static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
 355{
 356	struct imxdma_engine *imxdma = dev_id;
 357	unsigned int err_mask;
 358	int i, disr;
 359	int errcode;
 360
 361	disr = imx_dmav1_readl(imxdma, DMA_DISR);
 362
 363	err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
 364		   imx_dmav1_readl(imxdma, DMA_DRTOSR) |
 365		   imx_dmav1_readl(imxdma, DMA_DSESR)  |
 366		   imx_dmav1_readl(imxdma, DMA_DBOSR);
 367
 368	if (!err_mask)
 369		return IRQ_HANDLED;
 370
 371	imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
 372
 373	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
 374		if (!(err_mask & (1 << i)))
 375			continue;
 376		errcode = 0;
 377
 378		if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
 379			imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
 380			errcode |= IMX_DMA_ERR_BURST;
 381		}
 382		if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
 383			imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
 384			errcode |= IMX_DMA_ERR_REQUEST;
 385		}
 386		if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
 387			imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
 388			errcode |= IMX_DMA_ERR_TRANSFER;
 389		}
 390		if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
 391			imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
 392			errcode |= IMX_DMA_ERR_BUFFER;
 393		}
 394		/* Tasklet error handler */
 395		tasklet_schedule(&imxdma->channel[i].dma_tasklet);
 396
 397		dev_warn(imxdma->dev,
 398			 "DMA timeout on channel %d -%s%s%s%s\n", i,
 399			 errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
 400			 errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
 401			 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
 402			 errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
 403	}
 404	return IRQ_HANDLED;
 405}
 406
 407static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
 408{
 409	struct imxdma_engine *imxdma = imxdmac->imxdma;
 410	int chno = imxdmac->channel;
 411	struct imxdma_desc *desc;
 412	unsigned long flags;
 413
 414	spin_lock_irqsave(&imxdma->lock, flags);
 415	if (list_empty(&imxdmac->ld_active)) {
 416		spin_unlock_irqrestore(&imxdma->lock, flags);
 417		goto out;
 418	}
 419
 420	desc = list_first_entry(&imxdmac->ld_active,
 421				struct imxdma_desc,
 422				node);
 423	spin_unlock_irqrestore(&imxdma->lock, flags);
 424
 425	if (desc->sg) {
 426		u32 tmp;
 427		desc->sg = sg_next(desc->sg);
 428
 429		if (desc->sg) {
 430			imxdma_sg_next(desc);
 431
 432			tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
 433
 434			if (imxdma_hw_chain(imxdmac)) {
 435				/* FIXME: The timeout should probably be
 436				 * configurable
 437				 */
 438				mod_timer(&imxdmac->watchdog,
 439					jiffies + msecs_to_jiffies(500));
 440
 441				tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
 442				imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
 443			} else {
 444				imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
 445						 DMA_CCR(chno));
 446				tmp |= CCR_CEN;
 447			}
 448
 449			imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
 450
 451			if (imxdma_chan_is_doing_cyclic(imxdmac))
 452				/* Tasklet progression */
 453				tasklet_schedule(&imxdmac->dma_tasklet);
 454
 455			return;
 456		}
 457
 458		if (imxdma_hw_chain(imxdmac)) {
 459			del_timer(&imxdmac->watchdog);
 460			return;
 461		}
 462	}
 463
 464out:
 465	imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
 466	/* Tasklet irq */
 467	tasklet_schedule(&imxdmac->dma_tasklet);
 468}
 469
 470static irqreturn_t dma_irq_handler(int irq, void *dev_id)
 471{
 472	struct imxdma_engine *imxdma = dev_id;
 473	int i, disr;
 474
 475	if (!is_imx1_dma(imxdma))
 476		imxdma_err_handler(irq, dev_id);
 477
 478	disr = imx_dmav1_readl(imxdma, DMA_DISR);
 479
 480	dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
 481
 482	imx_dmav1_writel(imxdma, disr, DMA_DISR);
 483	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
 484		if (disr & (1 << i))
 485			dma_irq_handle_channel(&imxdma->channel[i]);
 486	}
 487
 488	return IRQ_HANDLED;
 489}
 490
 491static int imxdma_xfer_desc(struct imxdma_desc *d)
 492{
 493	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
 494	struct imxdma_engine *imxdma = imxdmac->imxdma;
 495	int slot = -1;
 496	int i;
 497
 498	/* Configure and enable */
 499	switch (d->type) {
 500	case IMXDMA_DESC_INTERLEAVED:
 501		/* Try to get a free 2D slot */
 502		for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
 503			if ((imxdma->slots_2d[i].count > 0) &&
 504			((imxdma->slots_2d[i].xsr != d->x) ||
 505			(imxdma->slots_2d[i].ysr != d->y) ||
 506			(imxdma->slots_2d[i].wsr != d->w)))
 507				continue;
 508			slot = i;
 509			break;
 510		}
 511		if (slot < 0)
 512			return -EBUSY;
 513
 514		imxdma->slots_2d[slot].xsr = d->x;
 515		imxdma->slots_2d[slot].ysr = d->y;
 516		imxdma->slots_2d[slot].wsr = d->w;
 517		imxdma->slots_2d[slot].count++;
 518
 519		imxdmac->slot_2d = slot;
 520		imxdmac->enabled_2d = true;
 521
 522		if (slot == IMX_DMA_2D_SLOT_A) {
 523			d->config_mem &= ~CCR_MSEL_B;
 524			d->config_port &= ~CCR_MSEL_B;
 525			imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
 526			imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
 527			imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
 528		} else {
 529			d->config_mem |= CCR_MSEL_B;
 530			d->config_port |= CCR_MSEL_B;
 531			imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
 532			imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
 533			imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
 534		}
 535		/*
 536		 * We fall-through here intentionally, since a 2D transfer is
 537		 * similar to MEMCPY just adding the 2D slot configuration.
 538		 */
 539		fallthrough;
 540	case IMXDMA_DESC_MEMCPY:
 541		imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
 542		imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
 543		imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
 544			 DMA_CCR(imxdmac->channel));
 545
 546		imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
 547
 548		dev_dbg(imxdma->dev,
 549			"%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
 550			__func__, imxdmac->channel,
 551			(unsigned long long)d->dest,
 552			(unsigned long long)d->src, d->len);
 553
 554		break;
 555	/* Cyclic transfer is the same as slave_sg with special sg configuration. */
 556	case IMXDMA_DESC_CYCLIC:
 557	case IMXDMA_DESC_SLAVE_SG:
 558		if (d->direction == DMA_DEV_TO_MEM) {
 559			imx_dmav1_writel(imxdma, imxdmac->per_address,
 560					 DMA_SAR(imxdmac->channel));
 561			imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
 562					 DMA_CCR(imxdmac->channel));
 563
 564			dev_dbg(imxdma->dev,
 565				"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
 566				__func__, imxdmac->channel,
 567				d->sg, d->sgcount, d->len,
 568				(unsigned long long)imxdmac->per_address);
 569		} else if (d->direction == DMA_MEM_TO_DEV) {
 570			imx_dmav1_writel(imxdma, imxdmac->per_address,
 571					 DMA_DAR(imxdmac->channel));
 572			imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
 573					 DMA_CCR(imxdmac->channel));
 574
 575			dev_dbg(imxdma->dev,
 576				"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
 577				__func__, imxdmac->channel,
 578				d->sg, d->sgcount, d->len,
 579				(unsigned long long)imxdmac->per_address);
 580		} else {
 581			dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
 582				__func__, imxdmac->channel);
 583			return -EINVAL;
 584		}
 585
 586		imxdma_sg_next(d);
 587
 588		break;
 589	default:
 590		return -EINVAL;
 591	}
 592	imxdma_enable_hw(d);
 593	return 0;
 594}
 595
 596static void imxdma_tasklet(struct tasklet_struct *t)
 597{
 598	struct imxdma_channel *imxdmac = from_tasklet(imxdmac, t, dma_tasklet);
 599	struct imxdma_engine *imxdma = imxdmac->imxdma;
 600	struct imxdma_desc *desc, *next_desc;
 601	unsigned long flags;
 602
 603	spin_lock_irqsave(&imxdma->lock, flags);
 604
 605	if (list_empty(&imxdmac->ld_active)) {
 606		/* Someone might have called terminate all */
 607		spin_unlock_irqrestore(&imxdma->lock, flags);
 608		return;
 609	}
 610	desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
 611
 612	/* If we are dealing with a cyclic descriptor, keep it on ld_active
 613	 * and dont mark the descriptor as complete.
 614	 * Only in non-cyclic cases it would be marked as complete
 615	 */
 616	if (imxdma_chan_is_doing_cyclic(imxdmac))
 617		goto out;
 618	else
 619		dma_cookie_complete(&desc->desc);
 620
 621	/* Free 2D slot if it was an interleaved transfer */
 622	if (imxdmac->enabled_2d) {
 623		imxdma->slots_2d[imxdmac->slot_2d].count--;
 624		imxdmac->enabled_2d = false;
 625	}
 626
 627	list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
 628
 629	if (!list_empty(&imxdmac->ld_queue)) {
 630		next_desc = list_first_entry(&imxdmac->ld_queue,
 631					     struct imxdma_desc, node);
 632		list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
 633		if (imxdma_xfer_desc(next_desc) < 0)
 634			dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
 635				 __func__, imxdmac->channel);
 636	}
 637out:
 638	spin_unlock_irqrestore(&imxdma->lock, flags);
 639
 640	dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
 641}
 642
 643static int imxdma_terminate_all(struct dma_chan *chan)
 644{
 645	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 646	struct imxdma_engine *imxdma = imxdmac->imxdma;
 647	unsigned long flags;
 648
 649	imxdma_disable_hw(imxdmac);
 650
 651	spin_lock_irqsave(&imxdma->lock, flags);
 652	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
 653	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
 654	spin_unlock_irqrestore(&imxdma->lock, flags);
 655	return 0;
 656}
 657
 658static int imxdma_config_write(struct dma_chan *chan,
 659			       struct dma_slave_config *dmaengine_cfg,
 660			       enum dma_transfer_direction direction)
 661{
 662	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 663	struct imxdma_engine *imxdma = imxdmac->imxdma;
 664	unsigned int mode = 0;
 665
 666	if (direction == DMA_DEV_TO_MEM) {
 667		imxdmac->per_address = dmaengine_cfg->src_addr;
 668		imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
 669		imxdmac->word_size = dmaengine_cfg->src_addr_width;
 670	} else {
 671		imxdmac->per_address = dmaengine_cfg->dst_addr;
 672		imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
 673		imxdmac->word_size = dmaengine_cfg->dst_addr_width;
 674	}
 675
 676	switch (imxdmac->word_size) {
 677	case DMA_SLAVE_BUSWIDTH_1_BYTE:
 678		mode = IMX_DMA_MEMSIZE_8;
 679		break;
 680	case DMA_SLAVE_BUSWIDTH_2_BYTES:
 681		mode = IMX_DMA_MEMSIZE_16;
 682		break;
 683	default:
 684	case DMA_SLAVE_BUSWIDTH_4_BYTES:
 685		mode = IMX_DMA_MEMSIZE_32;
 686		break;
 687	}
 688
 689	imxdmac->hw_chaining = 0;
 690
 691	imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
 692		((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
 693		CCR_REN;
 694	imxdmac->ccr_to_device =
 695		(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
 696		((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
 697	imx_dmav1_writel(imxdma, imxdmac->dma_request,
 698			 DMA_RSSR(imxdmac->channel));
 699
 700	/* Set burst length */
 701	imx_dmav1_writel(imxdma, imxdmac->watermark_level *
 702			 imxdmac->word_size, DMA_BLR(imxdmac->channel));
 703
 704	return 0;
 705}
 706
 707static int imxdma_config(struct dma_chan *chan,
 708			 struct dma_slave_config *dmaengine_cfg)
 709{
 710	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 711
 712	memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
 713
 714	return 0;
 715}
 716
 717static enum dma_status imxdma_tx_status(struct dma_chan *chan,
 718					    dma_cookie_t cookie,
 719					    struct dma_tx_state *txstate)
 720{
 721	return dma_cookie_status(chan, cookie, txstate);
 722}
 723
 724static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
 725{
 726	struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
 727	struct imxdma_engine *imxdma = imxdmac->imxdma;
 728	dma_cookie_t cookie;
 729	unsigned long flags;
 730
 731	spin_lock_irqsave(&imxdma->lock, flags);
 732	list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
 733	cookie = dma_cookie_assign(tx);
 734	spin_unlock_irqrestore(&imxdma->lock, flags);
 735
 736	return cookie;
 737}
 738
 739static int imxdma_alloc_chan_resources(struct dma_chan *chan)
 740{
 741	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 742	struct imx_dma_data *data = chan->private;
 743
 744	if (data != NULL)
 745		imxdmac->dma_request = data->dma_request;
 746
 747	while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
 748		struct imxdma_desc *desc;
 749
 750		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 751		if (!desc)
 752			break;
 
 753		dma_async_tx_descriptor_init(&desc->desc, chan);
 754		desc->desc.tx_submit = imxdma_tx_submit;
 755		/* txd.flags will be overwritten in prep funcs */
 756		desc->desc.flags = DMA_CTRL_ACK;
 757		desc->status = DMA_COMPLETE;
 758
 759		list_add_tail(&desc->node, &imxdmac->ld_free);
 760		imxdmac->descs_allocated++;
 761	}
 762
 763	if (!imxdmac->descs_allocated)
 764		return -ENOMEM;
 765
 766	return imxdmac->descs_allocated;
 767}
 768
 769static void imxdma_free_chan_resources(struct dma_chan *chan)
 770{
 771	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 772	struct imxdma_engine *imxdma = imxdmac->imxdma;
 773	struct imxdma_desc *desc, *_desc;
 774	unsigned long flags;
 775
 776	spin_lock_irqsave(&imxdma->lock, flags);
 777
 778	imxdma_disable_hw(imxdmac);
 779	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
 780	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
 781
 782	spin_unlock_irqrestore(&imxdma->lock, flags);
 783
 784	list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
 785		kfree(desc);
 786		imxdmac->descs_allocated--;
 787	}
 788	INIT_LIST_HEAD(&imxdmac->ld_free);
 789
 790	kfree(imxdmac->sg_list);
 791	imxdmac->sg_list = NULL;
 792}
 793
 794static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
 795		struct dma_chan *chan, struct scatterlist *sgl,
 796		unsigned int sg_len, enum dma_transfer_direction direction,
 797		unsigned long flags, void *context)
 798{
 799	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 800	struct scatterlist *sg;
 801	int i, dma_length = 0;
 802	struct imxdma_desc *desc;
 803
 804	if (list_empty(&imxdmac->ld_free) ||
 805	    imxdma_chan_is_doing_cyclic(imxdmac))
 806		return NULL;
 807
 808	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 809
 810	for_each_sg(sgl, sg, sg_len, i) {
 811		dma_length += sg_dma_len(sg);
 812	}
 813
 814	imxdma_config_write(chan, &imxdmac->config, direction);
 815
 816	switch (imxdmac->word_size) {
 817	case DMA_SLAVE_BUSWIDTH_4_BYTES:
 818		if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
 819			return NULL;
 820		break;
 821	case DMA_SLAVE_BUSWIDTH_2_BYTES:
 822		if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
 823			return NULL;
 824		break;
 825	case DMA_SLAVE_BUSWIDTH_1_BYTE:
 826		break;
 827	default:
 828		return NULL;
 829	}
 830
 831	desc->type = IMXDMA_DESC_SLAVE_SG;
 832	desc->sg = sgl;
 833	desc->sgcount = sg_len;
 834	desc->len = dma_length;
 835	desc->direction = direction;
 836	if (direction == DMA_DEV_TO_MEM) {
 837		desc->src = imxdmac->per_address;
 838	} else {
 839		desc->dest = imxdmac->per_address;
 840	}
 841	desc->desc.callback = NULL;
 842	desc->desc.callback_param = NULL;
 843
 844	return &desc->desc;
 845}
 846
 847static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
 848		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
 849		size_t period_len, enum dma_transfer_direction direction,
 850		unsigned long flags)
 851{
 852	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 853	struct imxdma_engine *imxdma = imxdmac->imxdma;
 854	struct imxdma_desc *desc;
 855	int i;
 856	unsigned int periods = buf_len / period_len;
 857
 858	dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
 859			__func__, imxdmac->channel, buf_len, period_len);
 860
 861	if (list_empty(&imxdmac->ld_free) ||
 862	    imxdma_chan_is_doing_cyclic(imxdmac))
 863		return NULL;
 864
 865	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 866
 867	kfree(imxdmac->sg_list);
 868
 869	imxdmac->sg_list = kcalloc(periods + 1,
 870			sizeof(struct scatterlist), GFP_ATOMIC);
 871	if (!imxdmac->sg_list)
 872		return NULL;
 873
 874	sg_init_table(imxdmac->sg_list, periods);
 875
 876	for (i = 0; i < periods; i++) {
 877		sg_assign_page(&imxdmac->sg_list[i], NULL);
 878		imxdmac->sg_list[i].offset = 0;
 879		imxdmac->sg_list[i].dma_address = dma_addr;
 880		sg_dma_len(&imxdmac->sg_list[i]) = period_len;
 881		dma_addr += period_len;
 882	}
 883
 884	/* close the loop */
 885	sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
 886
 887	desc->type = IMXDMA_DESC_CYCLIC;
 888	desc->sg = imxdmac->sg_list;
 889	desc->sgcount = periods;
 890	desc->len = IMX_DMA_LENGTH_LOOP;
 891	desc->direction = direction;
 892	if (direction == DMA_DEV_TO_MEM) {
 893		desc->src = imxdmac->per_address;
 894	} else {
 895		desc->dest = imxdmac->per_address;
 896	}
 897	desc->desc.callback = NULL;
 898	desc->desc.callback_param = NULL;
 899
 900	imxdma_config_write(chan, &imxdmac->config, direction);
 901
 902	return &desc->desc;
 903}
 904
 905static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
 906	struct dma_chan *chan, dma_addr_t dest,
 907	dma_addr_t src, size_t len, unsigned long flags)
 908{
 909	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 910	struct imxdma_engine *imxdma = imxdmac->imxdma;
 911	struct imxdma_desc *desc;
 912
 913	dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
 914		__func__, imxdmac->channel, (unsigned long long)src,
 915		(unsigned long long)dest, len);
 916
 917	if (list_empty(&imxdmac->ld_free) ||
 918	    imxdma_chan_is_doing_cyclic(imxdmac))
 919		return NULL;
 920
 921	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 922
 923	desc->type = IMXDMA_DESC_MEMCPY;
 924	desc->src = src;
 925	desc->dest = dest;
 926	desc->len = len;
 927	desc->direction = DMA_MEM_TO_MEM;
 928	desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
 929	desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
 930	desc->desc.callback = NULL;
 931	desc->desc.callback_param = NULL;
 932
 933	return &desc->desc;
 934}
 935
 936static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
 937	struct dma_chan *chan, struct dma_interleaved_template *xt,
 938	unsigned long flags)
 939{
 940	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 941	struct imxdma_engine *imxdma = imxdmac->imxdma;
 942	struct imxdma_desc *desc;
 943
 944	dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
 945		"   src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
 946		imxdmac->channel, (unsigned long long)xt->src_start,
 947		(unsigned long long) xt->dst_start,
 948		xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
 949		xt->numf, xt->frame_size);
 950
 951	if (list_empty(&imxdmac->ld_free) ||
 952	    imxdma_chan_is_doing_cyclic(imxdmac))
 953		return NULL;
 954
 955	if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
 956		return NULL;
 957
 958	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 959
 960	desc->type = IMXDMA_DESC_INTERLEAVED;
 961	desc->src = xt->src_start;
 962	desc->dest = xt->dst_start;
 963	desc->x = xt->sgl[0].size;
 964	desc->y = xt->numf;
 965	desc->w = xt->sgl[0].icg + desc->x;
 966	desc->len = desc->x * desc->y;
 967	desc->direction = DMA_MEM_TO_MEM;
 968	desc->config_port = IMX_DMA_MEMSIZE_32;
 969	desc->config_mem = IMX_DMA_MEMSIZE_32;
 970	if (xt->src_sgl)
 971		desc->config_mem |= IMX_DMA_TYPE_2D;
 972	if (xt->dst_sgl)
 973		desc->config_port |= IMX_DMA_TYPE_2D;
 974	desc->desc.callback = NULL;
 975	desc->desc.callback_param = NULL;
 976
 977	return &desc->desc;
 978}
 979
 980static void imxdma_issue_pending(struct dma_chan *chan)
 981{
 982	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 983	struct imxdma_engine *imxdma = imxdmac->imxdma;
 984	struct imxdma_desc *desc;
 985	unsigned long flags;
 986
 987	spin_lock_irqsave(&imxdma->lock, flags);
 988	if (list_empty(&imxdmac->ld_active) &&
 989	    !list_empty(&imxdmac->ld_queue)) {
 990		desc = list_first_entry(&imxdmac->ld_queue,
 991					struct imxdma_desc, node);
 992
 993		if (imxdma_xfer_desc(desc) < 0) {
 994			dev_warn(imxdma->dev,
 995				 "%s: channel: %d couldn't issue DMA xfer\n",
 996				 __func__, imxdmac->channel);
 997		} else {
 998			list_move_tail(imxdmac->ld_queue.next,
 999				       &imxdmac->ld_active);
1000		}
1001	}
1002	spin_unlock_irqrestore(&imxdma->lock, flags);
1003}
1004
1005static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1006{
1007	struct imxdma_filter_data *fdata = param;
1008	struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1009
1010	if (chan->device->dev != fdata->imxdma->dev)
1011		return false;
1012
1013	imxdma_chan->dma_request = fdata->request;
1014	chan->private = NULL;
1015
1016	return true;
1017}
1018
1019static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1020						struct of_dma *ofdma)
1021{
1022	int count = dma_spec->args_count;
1023	struct imxdma_engine *imxdma = ofdma->of_dma_data;
1024	struct imxdma_filter_data fdata = {
1025		.imxdma = imxdma,
1026	};
1027
1028	if (count != 1)
1029		return NULL;
1030
1031	fdata.request = dma_spec->args[0];
1032
1033	return dma_request_channel(imxdma->dma_device.cap_mask,
1034					imxdma_filter_fn, &fdata);
1035}
1036
1037static int __init imxdma_probe(struct platform_device *pdev)
1038{
1039	struct imxdma_engine *imxdma;
 
 
1040	int ret, i;
1041	int irq, irq_err;
1042
 
 
 
 
1043	imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1044	if (!imxdma)
1045		return -ENOMEM;
1046
1047	imxdma->dev = &pdev->dev;
1048	imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
1049
1050	imxdma->base = devm_platform_ioremap_resource(pdev, 0);
 
1051	if (IS_ERR(imxdma->base))
1052		return PTR_ERR(imxdma->base);
1053
1054	irq = platform_get_irq(pdev, 0);
1055	if (irq < 0)
1056		return irq;
1057
1058	imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1059	if (IS_ERR(imxdma->dma_ipg))
1060		return PTR_ERR(imxdma->dma_ipg);
1061
1062	imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1063	if (IS_ERR(imxdma->dma_ahb))
1064		return PTR_ERR(imxdma->dma_ahb);
1065
1066	ret = clk_prepare_enable(imxdma->dma_ipg);
1067	if (ret)
1068		return ret;
1069	ret = clk_prepare_enable(imxdma->dma_ahb);
1070	if (ret)
1071		goto disable_dma_ipg_clk;
1072
1073	/* reset DMA module */
1074	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1075
1076	if (is_imx1_dma(imxdma)) {
1077		ret = devm_request_irq(&pdev->dev, irq,
1078				       dma_irq_handler, 0, "DMA", imxdma);
1079		if (ret) {
1080			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1081			goto disable_dma_ahb_clk;
1082		}
1083		imxdma->irq = irq;
1084
1085		irq_err = platform_get_irq(pdev, 1);
1086		if (irq_err < 0) {
1087			ret = irq_err;
1088			goto disable_dma_ahb_clk;
1089		}
1090
1091		ret = devm_request_irq(&pdev->dev, irq_err,
1092				       imxdma_err_handler, 0, "DMA", imxdma);
1093		if (ret) {
1094			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1095			goto disable_dma_ahb_clk;
1096		}
1097		imxdma->irq_err = irq_err;
1098	}
1099
1100	/* enable DMA module */
1101	imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1102
1103	/* clear all interrupts */
1104	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1105
1106	/* disable interrupts */
1107	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1108
1109	INIT_LIST_HEAD(&imxdma->dma_device.channels);
1110
1111	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1112	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1113	dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1114	dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1115
1116	/* Initialize 2D global parameters */
1117	for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1118		imxdma->slots_2d[i].count = 0;
1119
1120	spin_lock_init(&imxdma->lock);
1121
1122	/* Initialize channel parameters */
1123	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1124		struct imxdma_channel *imxdmac = &imxdma->channel[i];
1125
1126		if (!is_imx1_dma(imxdma)) {
1127			ret = devm_request_irq(&pdev->dev, irq + i,
1128					dma_irq_handler, 0, "DMA", imxdma);
1129			if (ret) {
1130				dev_warn(imxdma->dev, "Can't register IRQ %d "
1131					 "for DMA channel %d\n",
1132					 irq + i, i);
1133				goto disable_dma_ahb_clk;
1134			}
1135
1136			imxdmac->irq = irq + i;
1137			timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0);
1138		}
1139
1140		imxdmac->imxdma = imxdma;
1141
1142		INIT_LIST_HEAD(&imxdmac->ld_queue);
1143		INIT_LIST_HEAD(&imxdmac->ld_free);
1144		INIT_LIST_HEAD(&imxdmac->ld_active);
1145
1146		tasklet_setup(&imxdmac->dma_tasklet, imxdma_tasklet);
 
1147		imxdmac->chan.device = &imxdma->dma_device;
1148		dma_cookie_init(&imxdmac->chan);
1149		imxdmac->channel = i;
1150
1151		/* Add the channel to the DMAC list */
1152		list_add_tail(&imxdmac->chan.device_node,
1153			      &imxdma->dma_device.channels);
1154	}
1155
1156	imxdma->dma_device.dev = &pdev->dev;
1157
1158	imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1159	imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1160	imxdma->dma_device.device_tx_status = imxdma_tx_status;
1161	imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1162	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1163	imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1164	imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1165	imxdma->dma_device.device_config = imxdma_config;
1166	imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1167	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1168
1169	platform_set_drvdata(pdev, imxdma);
1170
1171	imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
 
1172	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1173
1174	ret = dma_async_device_register(&imxdma->dma_device);
1175	if (ret) {
1176		dev_err(&pdev->dev, "unable to register\n");
1177		goto disable_dma_ahb_clk;
1178	}
1179
1180	if (pdev->dev.of_node) {
1181		ret = of_dma_controller_register(pdev->dev.of_node,
1182				imxdma_xlate, imxdma);
1183		if (ret) {
1184			dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1185			goto err_of_dma_controller;
1186		}
1187	}
1188
1189	return 0;
1190
1191err_of_dma_controller:
1192	dma_async_device_unregister(&imxdma->dma_device);
1193disable_dma_ahb_clk:
1194	clk_disable_unprepare(imxdma->dma_ahb);
1195disable_dma_ipg_clk:
1196	clk_disable_unprepare(imxdma->dma_ipg);
1197	return ret;
1198}
1199
1200static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1201{
1202	int i;
1203
1204	if (is_imx1_dma(imxdma)) {
1205		disable_irq(imxdma->irq);
1206		disable_irq(imxdma->irq_err);
1207	}
1208
1209	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1210		struct imxdma_channel *imxdmac = &imxdma->channel[i];
1211
1212		if (!is_imx1_dma(imxdma))
1213			disable_irq(imxdmac->irq);
1214
1215		tasklet_kill(&imxdmac->dma_tasklet);
1216	}
1217}
1218
1219static void imxdma_remove(struct platform_device *pdev)
1220{
1221	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1222
1223	imxdma_free_irq(pdev, imxdma);
1224
1225        dma_async_device_unregister(&imxdma->dma_device);
1226
1227	if (pdev->dev.of_node)
1228		of_dma_controller_free(pdev->dev.of_node);
1229
1230	clk_disable_unprepare(imxdma->dma_ipg);
1231	clk_disable_unprepare(imxdma->dma_ahb);
 
 
1232}
1233
1234static struct platform_driver imxdma_driver = {
1235	.driver		= {
1236		.name	= "imx-dma",
1237		.of_match_table = imx_dma_of_dev_id,
1238	},
1239	.remove_new	= imxdma_remove,
 
1240};
1241
1242static int __init imxdma_module_init(void)
1243{
1244	return platform_driver_probe(&imxdma_driver, imxdma_probe);
1245}
1246subsys_initcall(imxdma_module_init);
1247
1248MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1249MODULE_DESCRIPTION("i.MX dma driver");
1250MODULE_LICENSE("GPL");
v5.4
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// drivers/dma/imx-dma.c
   4//
   5// This file contains a driver for the Freescale i.MX DMA engine
   6// found on i.MX1/21/27
   7//
   8// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   9// Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
  10
  11#include <linux/err.h>
  12#include <linux/init.h>
  13#include <linux/types.h>
  14#include <linux/mm.h>
  15#include <linux/interrupt.h>
  16#include <linux/spinlock.h>
  17#include <linux/device.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/slab.h>
  20#include <linux/platform_device.h>
  21#include <linux/clk.h>
  22#include <linux/dmaengine.h>
  23#include <linux/module.h>
  24#include <linux/of_device.h>
  25#include <linux/of_dma.h>
  26
  27#include <asm/irq.h>
  28#include <linux/platform_data/dma-imx.h>
  29
  30#include "dmaengine.h"
  31#define IMXDMA_MAX_CHAN_DESCRIPTORS	16
  32#define IMX_DMA_CHANNELS  16
  33
  34#define IMX_DMA_2D_SLOTS	2
  35#define IMX_DMA_2D_SLOT_A	0
  36#define IMX_DMA_2D_SLOT_B	1
  37
  38#define IMX_DMA_LENGTH_LOOP	((unsigned int)-1)
  39#define IMX_DMA_MEMSIZE_32	(0 << 4)
  40#define IMX_DMA_MEMSIZE_8	(1 << 4)
  41#define IMX_DMA_MEMSIZE_16	(2 << 4)
  42#define IMX_DMA_TYPE_LINEAR	(0 << 10)
  43#define IMX_DMA_TYPE_2D		(1 << 10)
  44#define IMX_DMA_TYPE_FIFO	(2 << 10)
  45
  46#define IMX_DMA_ERR_BURST     (1 << 0)
  47#define IMX_DMA_ERR_REQUEST   (1 << 1)
  48#define IMX_DMA_ERR_TRANSFER  (1 << 2)
  49#define IMX_DMA_ERR_BUFFER    (1 << 3)
  50#define IMX_DMA_ERR_TIMEOUT   (1 << 4)
  51
  52#define DMA_DCR     0x00		/* Control Register */
  53#define DMA_DISR    0x04		/* Interrupt status Register */
  54#define DMA_DIMR    0x08		/* Interrupt mask Register */
  55#define DMA_DBTOSR  0x0c		/* Burst timeout status Register */
  56#define DMA_DRTOSR  0x10		/* Request timeout Register */
  57#define DMA_DSESR   0x14		/* Transfer Error Status Register */
  58#define DMA_DBOSR   0x18		/* Buffer overflow status Register */
  59#define DMA_DBTOCR  0x1c		/* Burst timeout control Register */
  60#define DMA_WSRA    0x40		/* W-Size Register A */
  61#define DMA_XSRA    0x44		/* X-Size Register A */
  62#define DMA_YSRA    0x48		/* Y-Size Register A */
  63#define DMA_WSRB    0x4c		/* W-Size Register B */
  64#define DMA_XSRB    0x50		/* X-Size Register B */
  65#define DMA_YSRB    0x54		/* Y-Size Register B */
  66#define DMA_SAR(x)  (0x80 + ((x) << 6))	/* Source Address Registers */
  67#define DMA_DAR(x)  (0x84 + ((x) << 6))	/* Destination Address Registers */
  68#define DMA_CNTR(x) (0x88 + ((x) << 6))	/* Count Registers */
  69#define DMA_CCR(x)  (0x8c + ((x) << 6))	/* Control Registers */
  70#define DMA_RSSR(x) (0x90 + ((x) << 6))	/* Request source select Registers */
  71#define DMA_BLR(x)  (0x94 + ((x) << 6))	/* Burst length Registers */
  72#define DMA_RTOR(x) (0x98 + ((x) << 6))	/* Request timeout Registers */
  73#define DMA_BUCR(x) (0x98 + ((x) << 6))	/* Bus Utilization Registers */
  74#define DMA_CCNR(x) (0x9C + ((x) << 6))	/* Channel counter Registers */
  75
  76#define DCR_DRST           (1<<1)
  77#define DCR_DEN            (1<<0)
  78#define DBTOCR_EN          (1<<15)
  79#define DBTOCR_CNT(x)      ((x) & 0x7fff)
  80#define CNTR_CNT(x)        ((x) & 0xffffff)
  81#define CCR_ACRPT          (1<<14)
  82#define CCR_DMOD_LINEAR    (0x0 << 12)
  83#define CCR_DMOD_2D        (0x1 << 12)
  84#define CCR_DMOD_FIFO      (0x2 << 12)
  85#define CCR_DMOD_EOBFIFO   (0x3 << 12)
  86#define CCR_SMOD_LINEAR    (0x0 << 10)
  87#define CCR_SMOD_2D        (0x1 << 10)
  88#define CCR_SMOD_FIFO      (0x2 << 10)
  89#define CCR_SMOD_EOBFIFO   (0x3 << 10)
  90#define CCR_MDIR_DEC       (1<<9)
  91#define CCR_MSEL_B         (1<<8)
  92#define CCR_DSIZ_32        (0x0 << 6)
  93#define CCR_DSIZ_8         (0x1 << 6)
  94#define CCR_DSIZ_16        (0x2 << 6)
  95#define CCR_SSIZ_32        (0x0 << 4)
  96#define CCR_SSIZ_8         (0x1 << 4)
  97#define CCR_SSIZ_16        (0x2 << 4)
  98#define CCR_REN            (1<<3)
  99#define CCR_RPT            (1<<2)
 100#define CCR_FRC            (1<<1)
 101#define CCR_CEN            (1<<0)
 102#define RTOR_EN            (1<<15)
 103#define RTOR_CLK           (1<<14)
 104#define RTOR_PSC           (1<<13)
 105
 106enum  imxdma_prep_type {
 107	IMXDMA_DESC_MEMCPY,
 108	IMXDMA_DESC_INTERLEAVED,
 109	IMXDMA_DESC_SLAVE_SG,
 110	IMXDMA_DESC_CYCLIC,
 111};
 112
 113struct imx_dma_2d_config {
 114	u16		xsr;
 115	u16		ysr;
 116	u16		wsr;
 117	int		count;
 118};
 119
 120struct imxdma_desc {
 121	struct list_head		node;
 122	struct dma_async_tx_descriptor	desc;
 123	enum dma_status			status;
 124	dma_addr_t			src;
 125	dma_addr_t			dest;
 126	size_t				len;
 127	enum dma_transfer_direction	direction;
 128	enum imxdma_prep_type		type;
 129	/* For memcpy and interleaved */
 130	unsigned int			config_port;
 131	unsigned int			config_mem;
 132	/* For interleaved transfers */
 133	unsigned int			x;
 134	unsigned int			y;
 135	unsigned int			w;
 136	/* For slave sg and cyclic */
 137	struct scatterlist		*sg;
 138	unsigned int			sgcount;
 139};
 140
 141struct imxdma_channel {
 142	int				hw_chaining;
 143	struct timer_list		watchdog;
 144	struct imxdma_engine		*imxdma;
 145	unsigned int			channel;
 146
 147	struct tasklet_struct		dma_tasklet;
 148	struct list_head		ld_free;
 149	struct list_head		ld_queue;
 150	struct list_head		ld_active;
 151	int				descs_allocated;
 152	enum dma_slave_buswidth		word_size;
 153	dma_addr_t			per_address;
 154	u32				watermark_level;
 155	struct dma_chan			chan;
 156	struct dma_async_tx_descriptor	desc;
 157	enum dma_status			status;
 158	int				dma_request;
 159	struct scatterlist		*sg_list;
 160	u32				ccr_from_device;
 161	u32				ccr_to_device;
 162	bool				enabled_2d;
 163	int				slot_2d;
 164	unsigned int			irq;
 165	struct dma_slave_config		config;
 166};
 167
 168enum imx_dma_type {
 169	IMX1_DMA,
 170	IMX21_DMA,
 171	IMX27_DMA,
 172};
 173
 174struct imxdma_engine {
 175	struct device			*dev;
 176	struct device_dma_parameters	dma_parms;
 177	struct dma_device		dma_device;
 178	void __iomem			*base;
 179	struct clk			*dma_ahb;
 180	struct clk			*dma_ipg;
 181	spinlock_t			lock;
 182	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
 183	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
 184	enum imx_dma_type		devtype;
 185	unsigned int			irq;
 186	unsigned int			irq_err;
 187
 188};
 189
 190struct imxdma_filter_data {
 191	struct imxdma_engine	*imxdma;
 192	int			 request;
 193};
 194
 195static const struct platform_device_id imx_dma_devtype[] = {
 196	{
 197		.name = "imx1-dma",
 198		.driver_data = IMX1_DMA,
 199	}, {
 200		.name = "imx21-dma",
 201		.driver_data = IMX21_DMA,
 202	}, {
 203		.name = "imx27-dma",
 204		.driver_data = IMX27_DMA,
 205	}, {
 206		/* sentinel */
 207	}
 208};
 209MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
 210
 211static const struct of_device_id imx_dma_of_dev_id[] = {
 212	{
 213		.compatible = "fsl,imx1-dma",
 214		.data = &imx_dma_devtype[IMX1_DMA],
 215	}, {
 216		.compatible = "fsl,imx21-dma",
 217		.data = &imx_dma_devtype[IMX21_DMA],
 218	}, {
 219		.compatible = "fsl,imx27-dma",
 220		.data = &imx_dma_devtype[IMX27_DMA],
 221	}, {
 222		/* sentinel */
 223	}
 224};
 225MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
 226
 227static inline int is_imx1_dma(struct imxdma_engine *imxdma)
 228{
 229	return imxdma->devtype == IMX1_DMA;
 230}
 231
 232static inline int is_imx27_dma(struct imxdma_engine *imxdma)
 233{
 234	return imxdma->devtype == IMX27_DMA;
 235}
 236
 237static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
 238{
 239	return container_of(chan, struct imxdma_channel, chan);
 240}
 241
 242static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
 243{
 244	struct imxdma_desc *desc;
 245
 246	if (!list_empty(&imxdmac->ld_active)) {
 247		desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
 248					node);
 249		if (desc->type == IMXDMA_DESC_CYCLIC)
 250			return true;
 251	}
 252	return false;
 253}
 254
 255
 256
 257static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
 258			     unsigned offset)
 259{
 260	__raw_writel(val, imxdma->base + offset);
 261}
 262
 263static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
 264{
 265	return __raw_readl(imxdma->base + offset);
 266}
 267
 268static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
 269{
 270	struct imxdma_engine *imxdma = imxdmac->imxdma;
 271
 272	if (is_imx27_dma(imxdma))
 273		return imxdmac->hw_chaining;
 274	else
 275		return 0;
 276}
 277
 278/*
 279 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
 280 */
 281static inline void imxdma_sg_next(struct imxdma_desc *d)
 282{
 283	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
 284	struct imxdma_engine *imxdma = imxdmac->imxdma;
 285	struct scatterlist *sg = d->sg;
 286	size_t now;
 287
 288	now = min_t(size_t, d->len, sg_dma_len(sg));
 289	if (d->len != IMX_DMA_LENGTH_LOOP)
 290		d->len -= now;
 291
 292	if (d->direction == DMA_DEV_TO_MEM)
 293		imx_dmav1_writel(imxdma, sg->dma_address,
 294				 DMA_DAR(imxdmac->channel));
 295	else
 296		imx_dmav1_writel(imxdma, sg->dma_address,
 297				 DMA_SAR(imxdmac->channel));
 298
 299	imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
 300
 301	dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
 302		"size 0x%08x\n", __func__, imxdmac->channel,
 303		 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
 304		 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
 305		 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
 306}
 307
 308static void imxdma_enable_hw(struct imxdma_desc *d)
 309{
 310	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
 311	struct imxdma_engine *imxdma = imxdmac->imxdma;
 312	int channel = imxdmac->channel;
 313	unsigned long flags;
 314
 315	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
 316
 317	local_irq_save(flags);
 318
 319	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
 320	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
 321			 ~(1 << channel), DMA_DIMR);
 322	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
 323			 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
 324
 325	if (!is_imx1_dma(imxdma) &&
 326			d->sg && imxdma_hw_chain(imxdmac)) {
 327		d->sg = sg_next(d->sg);
 328		if (d->sg) {
 329			u32 tmp;
 330			imxdma_sg_next(d);
 331			tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
 332			imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
 333					 DMA_CCR(channel));
 334		}
 335	}
 336
 337	local_irq_restore(flags);
 338}
 339
 340static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
 341{
 342	struct imxdma_engine *imxdma = imxdmac->imxdma;
 343	int channel = imxdmac->channel;
 344	unsigned long flags;
 345
 346	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
 347
 348	if (imxdma_hw_chain(imxdmac))
 349		del_timer(&imxdmac->watchdog);
 350
 351	local_irq_save(flags);
 352	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
 353			 (1 << channel), DMA_DIMR);
 354	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
 355			 ~CCR_CEN, DMA_CCR(channel));
 356	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
 357	local_irq_restore(flags);
 358}
 359
 360static void imxdma_watchdog(struct timer_list *t)
 361{
 362	struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
 363	struct imxdma_engine *imxdma = imxdmac->imxdma;
 364	int channel = imxdmac->channel;
 365
 366	imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
 367
 368	/* Tasklet watchdog error handler */
 369	tasklet_schedule(&imxdmac->dma_tasklet);
 370	dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
 371		imxdmac->channel);
 372}
 373
 374static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
 375{
 376	struct imxdma_engine *imxdma = dev_id;
 377	unsigned int err_mask;
 378	int i, disr;
 379	int errcode;
 380
 381	disr = imx_dmav1_readl(imxdma, DMA_DISR);
 382
 383	err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
 384		   imx_dmav1_readl(imxdma, DMA_DRTOSR) |
 385		   imx_dmav1_readl(imxdma, DMA_DSESR)  |
 386		   imx_dmav1_readl(imxdma, DMA_DBOSR);
 387
 388	if (!err_mask)
 389		return IRQ_HANDLED;
 390
 391	imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
 392
 393	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
 394		if (!(err_mask & (1 << i)))
 395			continue;
 396		errcode = 0;
 397
 398		if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
 399			imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
 400			errcode |= IMX_DMA_ERR_BURST;
 401		}
 402		if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
 403			imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
 404			errcode |= IMX_DMA_ERR_REQUEST;
 405		}
 406		if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
 407			imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
 408			errcode |= IMX_DMA_ERR_TRANSFER;
 409		}
 410		if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
 411			imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
 412			errcode |= IMX_DMA_ERR_BUFFER;
 413		}
 414		/* Tasklet error handler */
 415		tasklet_schedule(&imxdma->channel[i].dma_tasklet);
 416
 417		dev_warn(imxdma->dev,
 418			 "DMA timeout on channel %d -%s%s%s%s\n", i,
 419			 errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
 420			 errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
 421			 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
 422			 errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
 423	}
 424	return IRQ_HANDLED;
 425}
 426
 427static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
 428{
 429	struct imxdma_engine *imxdma = imxdmac->imxdma;
 430	int chno = imxdmac->channel;
 431	struct imxdma_desc *desc;
 432	unsigned long flags;
 433
 434	spin_lock_irqsave(&imxdma->lock, flags);
 435	if (list_empty(&imxdmac->ld_active)) {
 436		spin_unlock_irqrestore(&imxdma->lock, flags);
 437		goto out;
 438	}
 439
 440	desc = list_first_entry(&imxdmac->ld_active,
 441				struct imxdma_desc,
 442				node);
 443	spin_unlock_irqrestore(&imxdma->lock, flags);
 444
 445	if (desc->sg) {
 446		u32 tmp;
 447		desc->sg = sg_next(desc->sg);
 448
 449		if (desc->sg) {
 450			imxdma_sg_next(desc);
 451
 452			tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
 453
 454			if (imxdma_hw_chain(imxdmac)) {
 455				/* FIXME: The timeout should probably be
 456				 * configurable
 457				 */
 458				mod_timer(&imxdmac->watchdog,
 459					jiffies + msecs_to_jiffies(500));
 460
 461				tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
 462				imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
 463			} else {
 464				imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
 465						 DMA_CCR(chno));
 466				tmp |= CCR_CEN;
 467			}
 468
 469			imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
 470
 471			if (imxdma_chan_is_doing_cyclic(imxdmac))
 472				/* Tasklet progression */
 473				tasklet_schedule(&imxdmac->dma_tasklet);
 474
 475			return;
 476		}
 477
 478		if (imxdma_hw_chain(imxdmac)) {
 479			del_timer(&imxdmac->watchdog);
 480			return;
 481		}
 482	}
 483
 484out:
 485	imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
 486	/* Tasklet irq */
 487	tasklet_schedule(&imxdmac->dma_tasklet);
 488}
 489
 490static irqreturn_t dma_irq_handler(int irq, void *dev_id)
 491{
 492	struct imxdma_engine *imxdma = dev_id;
 493	int i, disr;
 494
 495	if (!is_imx1_dma(imxdma))
 496		imxdma_err_handler(irq, dev_id);
 497
 498	disr = imx_dmav1_readl(imxdma, DMA_DISR);
 499
 500	dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
 501
 502	imx_dmav1_writel(imxdma, disr, DMA_DISR);
 503	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
 504		if (disr & (1 << i))
 505			dma_irq_handle_channel(&imxdma->channel[i]);
 506	}
 507
 508	return IRQ_HANDLED;
 509}
 510
 511static int imxdma_xfer_desc(struct imxdma_desc *d)
 512{
 513	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
 514	struct imxdma_engine *imxdma = imxdmac->imxdma;
 515	int slot = -1;
 516	int i;
 517
 518	/* Configure and enable */
 519	switch (d->type) {
 520	case IMXDMA_DESC_INTERLEAVED:
 521		/* Try to get a free 2D slot */
 522		for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
 523			if ((imxdma->slots_2d[i].count > 0) &&
 524			((imxdma->slots_2d[i].xsr != d->x) ||
 525			(imxdma->slots_2d[i].ysr != d->y) ||
 526			(imxdma->slots_2d[i].wsr != d->w)))
 527				continue;
 528			slot = i;
 529			break;
 530		}
 531		if (slot < 0)
 532			return -EBUSY;
 533
 534		imxdma->slots_2d[slot].xsr = d->x;
 535		imxdma->slots_2d[slot].ysr = d->y;
 536		imxdma->slots_2d[slot].wsr = d->w;
 537		imxdma->slots_2d[slot].count++;
 538
 539		imxdmac->slot_2d = slot;
 540		imxdmac->enabled_2d = true;
 541
 542		if (slot == IMX_DMA_2D_SLOT_A) {
 543			d->config_mem &= ~CCR_MSEL_B;
 544			d->config_port &= ~CCR_MSEL_B;
 545			imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
 546			imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
 547			imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
 548		} else {
 549			d->config_mem |= CCR_MSEL_B;
 550			d->config_port |= CCR_MSEL_B;
 551			imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
 552			imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
 553			imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
 554		}
 555		/*
 556		 * We fall-through here intentionally, since a 2D transfer is
 557		 * similar to MEMCPY just adding the 2D slot configuration.
 558		 */
 559		/* Fall through */
 560	case IMXDMA_DESC_MEMCPY:
 561		imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
 562		imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
 563		imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
 564			 DMA_CCR(imxdmac->channel));
 565
 566		imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
 567
 568		dev_dbg(imxdma->dev,
 569			"%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
 570			__func__, imxdmac->channel,
 571			(unsigned long long)d->dest,
 572			(unsigned long long)d->src, d->len);
 573
 574		break;
 575	/* Cyclic transfer is the same as slave_sg with special sg configuration. */
 576	case IMXDMA_DESC_CYCLIC:
 577	case IMXDMA_DESC_SLAVE_SG:
 578		if (d->direction == DMA_DEV_TO_MEM) {
 579			imx_dmav1_writel(imxdma, imxdmac->per_address,
 580					 DMA_SAR(imxdmac->channel));
 581			imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
 582					 DMA_CCR(imxdmac->channel));
 583
 584			dev_dbg(imxdma->dev,
 585				"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
 586				__func__, imxdmac->channel,
 587				d->sg, d->sgcount, d->len,
 588				(unsigned long long)imxdmac->per_address);
 589		} else if (d->direction == DMA_MEM_TO_DEV) {
 590			imx_dmav1_writel(imxdma, imxdmac->per_address,
 591					 DMA_DAR(imxdmac->channel));
 592			imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
 593					 DMA_CCR(imxdmac->channel));
 594
 595			dev_dbg(imxdma->dev,
 596				"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
 597				__func__, imxdmac->channel,
 598				d->sg, d->sgcount, d->len,
 599				(unsigned long long)imxdmac->per_address);
 600		} else {
 601			dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
 602				__func__, imxdmac->channel);
 603			return -EINVAL;
 604		}
 605
 606		imxdma_sg_next(d);
 607
 608		break;
 609	default:
 610		return -EINVAL;
 611	}
 612	imxdma_enable_hw(d);
 613	return 0;
 614}
 615
 616static void imxdma_tasklet(unsigned long data)
 617{
 618	struct imxdma_channel *imxdmac = (void *)data;
 619	struct imxdma_engine *imxdma = imxdmac->imxdma;
 620	struct imxdma_desc *desc, *next_desc;
 621	unsigned long flags;
 622
 623	spin_lock_irqsave(&imxdma->lock, flags);
 624
 625	if (list_empty(&imxdmac->ld_active)) {
 626		/* Someone might have called terminate all */
 627		spin_unlock_irqrestore(&imxdma->lock, flags);
 628		return;
 629	}
 630	desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
 631
 632	/* If we are dealing with a cyclic descriptor, keep it on ld_active
 633	 * and dont mark the descriptor as complete.
 634	 * Only in non-cyclic cases it would be marked as complete
 635	 */
 636	if (imxdma_chan_is_doing_cyclic(imxdmac))
 637		goto out;
 638	else
 639		dma_cookie_complete(&desc->desc);
 640
 641	/* Free 2D slot if it was an interleaved transfer */
 642	if (imxdmac->enabled_2d) {
 643		imxdma->slots_2d[imxdmac->slot_2d].count--;
 644		imxdmac->enabled_2d = false;
 645	}
 646
 647	list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
 648
 649	if (!list_empty(&imxdmac->ld_queue)) {
 650		next_desc = list_first_entry(&imxdmac->ld_queue,
 651					     struct imxdma_desc, node);
 652		list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
 653		if (imxdma_xfer_desc(next_desc) < 0)
 654			dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
 655				 __func__, imxdmac->channel);
 656	}
 657out:
 658	spin_unlock_irqrestore(&imxdma->lock, flags);
 659
 660	dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
 661}
 662
 663static int imxdma_terminate_all(struct dma_chan *chan)
 664{
 665	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 666	struct imxdma_engine *imxdma = imxdmac->imxdma;
 667	unsigned long flags;
 668
 669	imxdma_disable_hw(imxdmac);
 670
 671	spin_lock_irqsave(&imxdma->lock, flags);
 672	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
 673	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
 674	spin_unlock_irqrestore(&imxdma->lock, flags);
 675	return 0;
 676}
 677
 678static int imxdma_config_write(struct dma_chan *chan,
 679			       struct dma_slave_config *dmaengine_cfg,
 680			       enum dma_transfer_direction direction)
 681{
 682	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 683	struct imxdma_engine *imxdma = imxdmac->imxdma;
 684	unsigned int mode = 0;
 685
 686	if (direction == DMA_DEV_TO_MEM) {
 687		imxdmac->per_address = dmaengine_cfg->src_addr;
 688		imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
 689		imxdmac->word_size = dmaengine_cfg->src_addr_width;
 690	} else {
 691		imxdmac->per_address = dmaengine_cfg->dst_addr;
 692		imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
 693		imxdmac->word_size = dmaengine_cfg->dst_addr_width;
 694	}
 695
 696	switch (imxdmac->word_size) {
 697	case DMA_SLAVE_BUSWIDTH_1_BYTE:
 698		mode = IMX_DMA_MEMSIZE_8;
 699		break;
 700	case DMA_SLAVE_BUSWIDTH_2_BYTES:
 701		mode = IMX_DMA_MEMSIZE_16;
 702		break;
 703	default:
 704	case DMA_SLAVE_BUSWIDTH_4_BYTES:
 705		mode = IMX_DMA_MEMSIZE_32;
 706		break;
 707	}
 708
 709	imxdmac->hw_chaining = 0;
 710
 711	imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
 712		((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
 713		CCR_REN;
 714	imxdmac->ccr_to_device =
 715		(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
 716		((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
 717	imx_dmav1_writel(imxdma, imxdmac->dma_request,
 718			 DMA_RSSR(imxdmac->channel));
 719
 720	/* Set burst length */
 721	imx_dmav1_writel(imxdma, imxdmac->watermark_level *
 722			 imxdmac->word_size, DMA_BLR(imxdmac->channel));
 723
 724	return 0;
 725}
 726
 727static int imxdma_config(struct dma_chan *chan,
 728			 struct dma_slave_config *dmaengine_cfg)
 729{
 730	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 731
 732	memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
 733
 734	return 0;
 735}
 736
 737static enum dma_status imxdma_tx_status(struct dma_chan *chan,
 738					    dma_cookie_t cookie,
 739					    struct dma_tx_state *txstate)
 740{
 741	return dma_cookie_status(chan, cookie, txstate);
 742}
 743
 744static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
 745{
 746	struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
 747	struct imxdma_engine *imxdma = imxdmac->imxdma;
 748	dma_cookie_t cookie;
 749	unsigned long flags;
 750
 751	spin_lock_irqsave(&imxdma->lock, flags);
 752	list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
 753	cookie = dma_cookie_assign(tx);
 754	spin_unlock_irqrestore(&imxdma->lock, flags);
 755
 756	return cookie;
 757}
 758
 759static int imxdma_alloc_chan_resources(struct dma_chan *chan)
 760{
 761	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 762	struct imx_dma_data *data = chan->private;
 763
 764	if (data != NULL)
 765		imxdmac->dma_request = data->dma_request;
 766
 767	while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
 768		struct imxdma_desc *desc;
 769
 770		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 771		if (!desc)
 772			break;
 773		memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor));
 774		dma_async_tx_descriptor_init(&desc->desc, chan);
 775		desc->desc.tx_submit = imxdma_tx_submit;
 776		/* txd.flags will be overwritten in prep funcs */
 777		desc->desc.flags = DMA_CTRL_ACK;
 778		desc->status = DMA_COMPLETE;
 779
 780		list_add_tail(&desc->node, &imxdmac->ld_free);
 781		imxdmac->descs_allocated++;
 782	}
 783
 784	if (!imxdmac->descs_allocated)
 785		return -ENOMEM;
 786
 787	return imxdmac->descs_allocated;
 788}
 789
 790static void imxdma_free_chan_resources(struct dma_chan *chan)
 791{
 792	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 793	struct imxdma_engine *imxdma = imxdmac->imxdma;
 794	struct imxdma_desc *desc, *_desc;
 795	unsigned long flags;
 796
 797	spin_lock_irqsave(&imxdma->lock, flags);
 798
 799	imxdma_disable_hw(imxdmac);
 800	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
 801	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
 802
 803	spin_unlock_irqrestore(&imxdma->lock, flags);
 804
 805	list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
 806		kfree(desc);
 807		imxdmac->descs_allocated--;
 808	}
 809	INIT_LIST_HEAD(&imxdmac->ld_free);
 810
 811	kfree(imxdmac->sg_list);
 812	imxdmac->sg_list = NULL;
 813}
 814
 815static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
 816		struct dma_chan *chan, struct scatterlist *sgl,
 817		unsigned int sg_len, enum dma_transfer_direction direction,
 818		unsigned long flags, void *context)
 819{
 820	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 821	struct scatterlist *sg;
 822	int i, dma_length = 0;
 823	struct imxdma_desc *desc;
 824
 825	if (list_empty(&imxdmac->ld_free) ||
 826	    imxdma_chan_is_doing_cyclic(imxdmac))
 827		return NULL;
 828
 829	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 830
 831	for_each_sg(sgl, sg, sg_len, i) {
 832		dma_length += sg_dma_len(sg);
 833	}
 834
 
 
 835	switch (imxdmac->word_size) {
 836	case DMA_SLAVE_BUSWIDTH_4_BYTES:
 837		if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
 838			return NULL;
 839		break;
 840	case DMA_SLAVE_BUSWIDTH_2_BYTES:
 841		if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
 842			return NULL;
 843		break;
 844	case DMA_SLAVE_BUSWIDTH_1_BYTE:
 845		break;
 846	default:
 847		return NULL;
 848	}
 849
 850	desc->type = IMXDMA_DESC_SLAVE_SG;
 851	desc->sg = sgl;
 852	desc->sgcount = sg_len;
 853	desc->len = dma_length;
 854	desc->direction = direction;
 855	if (direction == DMA_DEV_TO_MEM) {
 856		desc->src = imxdmac->per_address;
 857	} else {
 858		desc->dest = imxdmac->per_address;
 859	}
 860	desc->desc.callback = NULL;
 861	desc->desc.callback_param = NULL;
 862
 863	return &desc->desc;
 864}
 865
 866static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
 867		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
 868		size_t period_len, enum dma_transfer_direction direction,
 869		unsigned long flags)
 870{
 871	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 872	struct imxdma_engine *imxdma = imxdmac->imxdma;
 873	struct imxdma_desc *desc;
 874	int i;
 875	unsigned int periods = buf_len / period_len;
 876
 877	dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
 878			__func__, imxdmac->channel, buf_len, period_len);
 879
 880	if (list_empty(&imxdmac->ld_free) ||
 881	    imxdma_chan_is_doing_cyclic(imxdmac))
 882		return NULL;
 883
 884	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 885
 886	kfree(imxdmac->sg_list);
 887
 888	imxdmac->sg_list = kcalloc(periods + 1,
 889			sizeof(struct scatterlist), GFP_ATOMIC);
 890	if (!imxdmac->sg_list)
 891		return NULL;
 892
 893	sg_init_table(imxdmac->sg_list, periods);
 894
 895	for (i = 0; i < periods; i++) {
 896		sg_assign_page(&imxdmac->sg_list[i], NULL);
 897		imxdmac->sg_list[i].offset = 0;
 898		imxdmac->sg_list[i].dma_address = dma_addr;
 899		sg_dma_len(&imxdmac->sg_list[i]) = period_len;
 900		dma_addr += period_len;
 901	}
 902
 903	/* close the loop */
 904	sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
 905
 906	desc->type = IMXDMA_DESC_CYCLIC;
 907	desc->sg = imxdmac->sg_list;
 908	desc->sgcount = periods;
 909	desc->len = IMX_DMA_LENGTH_LOOP;
 910	desc->direction = direction;
 911	if (direction == DMA_DEV_TO_MEM) {
 912		desc->src = imxdmac->per_address;
 913	} else {
 914		desc->dest = imxdmac->per_address;
 915	}
 916	desc->desc.callback = NULL;
 917	desc->desc.callback_param = NULL;
 918
 919	imxdma_config_write(chan, &imxdmac->config, direction);
 920
 921	return &desc->desc;
 922}
 923
 924static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
 925	struct dma_chan *chan, dma_addr_t dest,
 926	dma_addr_t src, size_t len, unsigned long flags)
 927{
 928	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 929	struct imxdma_engine *imxdma = imxdmac->imxdma;
 930	struct imxdma_desc *desc;
 931
 932	dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
 933		__func__, imxdmac->channel, (unsigned long long)src,
 934		(unsigned long long)dest, len);
 935
 936	if (list_empty(&imxdmac->ld_free) ||
 937	    imxdma_chan_is_doing_cyclic(imxdmac))
 938		return NULL;
 939
 940	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 941
 942	desc->type = IMXDMA_DESC_MEMCPY;
 943	desc->src = src;
 944	desc->dest = dest;
 945	desc->len = len;
 946	desc->direction = DMA_MEM_TO_MEM;
 947	desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
 948	desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
 949	desc->desc.callback = NULL;
 950	desc->desc.callback_param = NULL;
 951
 952	return &desc->desc;
 953}
 954
 955static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
 956	struct dma_chan *chan, struct dma_interleaved_template *xt,
 957	unsigned long flags)
 958{
 959	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
 960	struct imxdma_engine *imxdma = imxdmac->imxdma;
 961	struct imxdma_desc *desc;
 962
 963	dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
 964		"   src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
 965		imxdmac->channel, (unsigned long long)xt->src_start,
 966		(unsigned long long) xt->dst_start,
 967		xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
 968		xt->numf, xt->frame_size);
 969
 970	if (list_empty(&imxdmac->ld_free) ||
 971	    imxdma_chan_is_doing_cyclic(imxdmac))
 972		return NULL;
 973
 974	if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
 975		return NULL;
 976
 977	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 978
 979	desc->type = IMXDMA_DESC_INTERLEAVED;
 980	desc->src = xt->src_start;
 981	desc->dest = xt->dst_start;
 982	desc->x = xt->sgl[0].size;
 983	desc->y = xt->numf;
 984	desc->w = xt->sgl[0].icg + desc->x;
 985	desc->len = desc->x * desc->y;
 986	desc->direction = DMA_MEM_TO_MEM;
 987	desc->config_port = IMX_DMA_MEMSIZE_32;
 988	desc->config_mem = IMX_DMA_MEMSIZE_32;
 989	if (xt->src_sgl)
 990		desc->config_mem |= IMX_DMA_TYPE_2D;
 991	if (xt->dst_sgl)
 992		desc->config_port |= IMX_DMA_TYPE_2D;
 993	desc->desc.callback = NULL;
 994	desc->desc.callback_param = NULL;
 995
 996	return &desc->desc;
 997}
 998
 999static void imxdma_issue_pending(struct dma_chan *chan)
1000{
1001	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
1002	struct imxdma_engine *imxdma = imxdmac->imxdma;
1003	struct imxdma_desc *desc;
1004	unsigned long flags;
1005
1006	spin_lock_irqsave(&imxdma->lock, flags);
1007	if (list_empty(&imxdmac->ld_active) &&
1008	    !list_empty(&imxdmac->ld_queue)) {
1009		desc = list_first_entry(&imxdmac->ld_queue,
1010					struct imxdma_desc, node);
1011
1012		if (imxdma_xfer_desc(desc) < 0) {
1013			dev_warn(imxdma->dev,
1014				 "%s: channel: %d couldn't issue DMA xfer\n",
1015				 __func__, imxdmac->channel);
1016		} else {
1017			list_move_tail(imxdmac->ld_queue.next,
1018				       &imxdmac->ld_active);
1019		}
1020	}
1021	spin_unlock_irqrestore(&imxdma->lock, flags);
1022}
1023
1024static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1025{
1026	struct imxdma_filter_data *fdata = param;
1027	struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1028
1029	if (chan->device->dev != fdata->imxdma->dev)
1030		return false;
1031
1032	imxdma_chan->dma_request = fdata->request;
1033	chan->private = NULL;
1034
1035	return true;
1036}
1037
1038static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1039						struct of_dma *ofdma)
1040{
1041	int count = dma_spec->args_count;
1042	struct imxdma_engine *imxdma = ofdma->of_dma_data;
1043	struct imxdma_filter_data fdata = {
1044		.imxdma = imxdma,
1045	};
1046
1047	if (count != 1)
1048		return NULL;
1049
1050	fdata.request = dma_spec->args[0];
1051
1052	return dma_request_channel(imxdma->dma_device.cap_mask,
1053					imxdma_filter_fn, &fdata);
1054}
1055
1056static int __init imxdma_probe(struct platform_device *pdev)
1057{
1058	struct imxdma_engine *imxdma;
1059	struct resource *res;
1060	const struct of_device_id *of_id;
1061	int ret, i;
1062	int irq, irq_err;
1063
1064	of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
1065	if (of_id)
1066		pdev->id_entry = of_id->data;
1067
1068	imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1069	if (!imxdma)
1070		return -ENOMEM;
1071
1072	imxdma->dev = &pdev->dev;
1073	imxdma->devtype = pdev->id_entry->driver_data;
1074
1075	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1076	imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1077	if (IS_ERR(imxdma->base))
1078		return PTR_ERR(imxdma->base);
1079
1080	irq = platform_get_irq(pdev, 0);
1081	if (irq < 0)
1082		return irq;
1083
1084	imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1085	if (IS_ERR(imxdma->dma_ipg))
1086		return PTR_ERR(imxdma->dma_ipg);
1087
1088	imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1089	if (IS_ERR(imxdma->dma_ahb))
1090		return PTR_ERR(imxdma->dma_ahb);
1091
1092	ret = clk_prepare_enable(imxdma->dma_ipg);
1093	if (ret)
1094		return ret;
1095	ret = clk_prepare_enable(imxdma->dma_ahb);
1096	if (ret)
1097		goto disable_dma_ipg_clk;
1098
1099	/* reset DMA module */
1100	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1101
1102	if (is_imx1_dma(imxdma)) {
1103		ret = devm_request_irq(&pdev->dev, irq,
1104				       dma_irq_handler, 0, "DMA", imxdma);
1105		if (ret) {
1106			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1107			goto disable_dma_ahb_clk;
1108		}
1109		imxdma->irq = irq;
1110
1111		irq_err = platform_get_irq(pdev, 1);
1112		if (irq_err < 0) {
1113			ret = irq_err;
1114			goto disable_dma_ahb_clk;
1115		}
1116
1117		ret = devm_request_irq(&pdev->dev, irq_err,
1118				       imxdma_err_handler, 0, "DMA", imxdma);
1119		if (ret) {
1120			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1121			goto disable_dma_ahb_clk;
1122		}
1123		imxdma->irq_err = irq_err;
1124	}
1125
1126	/* enable DMA module */
1127	imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1128
1129	/* clear all interrupts */
1130	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1131
1132	/* disable interrupts */
1133	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1134
1135	INIT_LIST_HEAD(&imxdma->dma_device.channels);
1136
1137	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1138	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1139	dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1140	dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1141
1142	/* Initialize 2D global parameters */
1143	for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1144		imxdma->slots_2d[i].count = 0;
1145
1146	spin_lock_init(&imxdma->lock);
1147
1148	/* Initialize channel parameters */
1149	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1150		struct imxdma_channel *imxdmac = &imxdma->channel[i];
1151
1152		if (!is_imx1_dma(imxdma)) {
1153			ret = devm_request_irq(&pdev->dev, irq + i,
1154					dma_irq_handler, 0, "DMA", imxdma);
1155			if (ret) {
1156				dev_warn(imxdma->dev, "Can't register IRQ %d "
1157					 "for DMA channel %d\n",
1158					 irq + i, i);
1159				goto disable_dma_ahb_clk;
1160			}
1161
1162			imxdmac->irq = irq + i;
1163			timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0);
1164		}
1165
1166		imxdmac->imxdma = imxdma;
1167
1168		INIT_LIST_HEAD(&imxdmac->ld_queue);
1169		INIT_LIST_HEAD(&imxdmac->ld_free);
1170		INIT_LIST_HEAD(&imxdmac->ld_active);
1171
1172		tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1173			     (unsigned long)imxdmac);
1174		imxdmac->chan.device = &imxdma->dma_device;
1175		dma_cookie_init(&imxdmac->chan);
1176		imxdmac->channel = i;
1177
1178		/* Add the channel to the DMAC list */
1179		list_add_tail(&imxdmac->chan.device_node,
1180			      &imxdma->dma_device.channels);
1181	}
1182
1183	imxdma->dma_device.dev = &pdev->dev;
1184
1185	imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1186	imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1187	imxdma->dma_device.device_tx_status = imxdma_tx_status;
1188	imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1189	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1190	imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1191	imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1192	imxdma->dma_device.device_config = imxdma_config;
1193	imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1194	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1195
1196	platform_set_drvdata(pdev, imxdma);
1197
1198	imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1199	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
1200	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1201
1202	ret = dma_async_device_register(&imxdma->dma_device);
1203	if (ret) {
1204		dev_err(&pdev->dev, "unable to register\n");
1205		goto disable_dma_ahb_clk;
1206	}
1207
1208	if (pdev->dev.of_node) {
1209		ret = of_dma_controller_register(pdev->dev.of_node,
1210				imxdma_xlate, imxdma);
1211		if (ret) {
1212			dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1213			goto err_of_dma_controller;
1214		}
1215	}
1216
1217	return 0;
1218
1219err_of_dma_controller:
1220	dma_async_device_unregister(&imxdma->dma_device);
1221disable_dma_ahb_clk:
1222	clk_disable_unprepare(imxdma->dma_ahb);
1223disable_dma_ipg_clk:
1224	clk_disable_unprepare(imxdma->dma_ipg);
1225	return ret;
1226}
1227
1228static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1229{
1230	int i;
1231
1232	if (is_imx1_dma(imxdma)) {
1233		disable_irq(imxdma->irq);
1234		disable_irq(imxdma->irq_err);
1235	}
1236
1237	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1238		struct imxdma_channel *imxdmac = &imxdma->channel[i];
1239
1240		if (!is_imx1_dma(imxdma))
1241			disable_irq(imxdmac->irq);
1242
1243		tasklet_kill(&imxdmac->dma_tasklet);
1244	}
1245}
1246
1247static int imxdma_remove(struct platform_device *pdev)
1248{
1249	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1250
1251	imxdma_free_irq(pdev, imxdma);
1252
1253        dma_async_device_unregister(&imxdma->dma_device);
1254
1255	if (pdev->dev.of_node)
1256		of_dma_controller_free(pdev->dev.of_node);
1257
1258	clk_disable_unprepare(imxdma->dma_ipg);
1259	clk_disable_unprepare(imxdma->dma_ahb);
1260
1261        return 0;
1262}
1263
1264static struct platform_driver imxdma_driver = {
1265	.driver		= {
1266		.name	= "imx-dma",
1267		.of_match_table = imx_dma_of_dev_id,
1268	},
1269	.id_table	= imx_dma_devtype,
1270	.remove		= imxdma_remove,
1271};
1272
1273static int __init imxdma_module_init(void)
1274{
1275	return platform_driver_probe(&imxdma_driver, imxdma_probe);
1276}
1277subsys_initcall(imxdma_module_init);
1278
1279MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1280MODULE_DESCRIPTION("i.MX dma driver");
1281MODULE_LICENSE("GPL");