Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1/*
   2 * OMAP DMAengine support
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#include <linux/delay.h>
   9#include <linux/dmaengine.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/err.h>
  12#include <linux/init.h>
  13#include <linux/interrupt.h>
  14#include <linux/list.h>
  15#include <linux/module.h>
  16#include <linux/omap-dma.h>
  17#include <linux/platform_device.h>
  18#include <linux/slab.h>
  19#include <linux/spinlock.h>
  20#include <linux/of_dma.h>
  21#include <linux/of_device.h>
  22
  23#include "virt-dma.h"
  24
  25#define OMAP_SDMA_REQUESTS	127
  26#define OMAP_SDMA_CHANNELS	32
  27
  28struct omap_dmadev {
  29	struct dma_device ddev;
  30	spinlock_t lock;
  31	void __iomem *base;
  32	const struct omap_dma_reg *reg_map;
  33	struct omap_system_dma_plat_info *plat;
  34	bool legacy;
  35	unsigned dma_requests;
  36	spinlock_t irq_lock;
  37	uint32_t irq_enable_mask;
  38	struct omap_chan *lch_map[OMAP_SDMA_CHANNELS];
  39};
  40
  41struct omap_chan {
  42	struct virt_dma_chan vc;
  43	void __iomem *channel_base;
  44	const struct omap_dma_reg *reg_map;
  45	uint32_t ccr;
  46
  47	struct dma_slave_config	cfg;
  48	unsigned dma_sig;
  49	bool cyclic;
  50	bool paused;
  51	bool running;
  52
  53	int dma_ch;
  54	struct omap_desc *desc;
  55	unsigned sgidx;
  56};
  57
  58struct omap_sg {
  59	dma_addr_t addr;
  60	uint32_t en;		/* number of elements (24-bit) */
  61	uint32_t fn;		/* number of frames (16-bit) */
  62};
  63
  64struct omap_desc {
  65	struct virt_dma_desc vd;
  66	enum dma_transfer_direction dir;
  67	dma_addr_t dev_addr;
  68
  69	int16_t fi;		/* for OMAP_DMA_SYNC_PACKET */
  70	uint8_t es;		/* CSDP_DATA_TYPE_xxx */
  71	uint32_t ccr;		/* CCR value */
  72	uint16_t clnk_ctrl;	/* CLNK_CTRL value */
  73	uint16_t cicr;		/* CICR value */
  74	uint32_t csdp;		/* CSDP value */
  75
  76	unsigned sglen;
  77	struct omap_sg sg[0];
  78};
  79
  80enum {
  81	CCR_FS			= BIT(5),
  82	CCR_READ_PRIORITY	= BIT(6),
  83	CCR_ENABLE		= BIT(7),
  84	CCR_AUTO_INIT		= BIT(8),	/* OMAP1 only */
  85	CCR_REPEAT		= BIT(9),	/* OMAP1 only */
  86	CCR_OMAP31_DISABLE	= BIT(10),	/* OMAP1 only */
  87	CCR_SUSPEND_SENSITIVE	= BIT(8),	/* OMAP2+ only */
  88	CCR_RD_ACTIVE		= BIT(9),	/* OMAP2+ only */
  89	CCR_WR_ACTIVE		= BIT(10),	/* OMAP2+ only */
  90	CCR_SRC_AMODE_CONSTANT	= 0 << 12,
  91	CCR_SRC_AMODE_POSTINC	= 1 << 12,
  92	CCR_SRC_AMODE_SGLIDX	= 2 << 12,
  93	CCR_SRC_AMODE_DBLIDX	= 3 << 12,
  94	CCR_DST_AMODE_CONSTANT	= 0 << 14,
  95	CCR_DST_AMODE_POSTINC	= 1 << 14,
  96	CCR_DST_AMODE_SGLIDX	= 2 << 14,
  97	CCR_DST_AMODE_DBLIDX	= 3 << 14,
  98	CCR_CONSTANT_FILL	= BIT(16),
  99	CCR_TRANSPARENT_COPY	= BIT(17),
 100	CCR_BS			= BIT(18),
 101	CCR_SUPERVISOR		= BIT(22),
 102	CCR_PREFETCH		= BIT(23),
 103	CCR_TRIGGER_SRC		= BIT(24),
 104	CCR_BUFFERING_DISABLE	= BIT(25),
 105	CCR_WRITE_PRIORITY	= BIT(26),
 106	CCR_SYNC_ELEMENT	= 0,
 107	CCR_SYNC_FRAME		= CCR_FS,
 108	CCR_SYNC_BLOCK		= CCR_BS,
 109	CCR_SYNC_PACKET		= CCR_BS | CCR_FS,
 110
 111	CSDP_DATA_TYPE_8	= 0,
 112	CSDP_DATA_TYPE_16	= 1,
 113	CSDP_DATA_TYPE_32	= 2,
 114	CSDP_SRC_PORT_EMIFF	= 0 << 2, /* OMAP1 only */
 115	CSDP_SRC_PORT_EMIFS	= 1 << 2, /* OMAP1 only */
 116	CSDP_SRC_PORT_OCP_T1	= 2 << 2, /* OMAP1 only */
 117	CSDP_SRC_PORT_TIPB	= 3 << 2, /* OMAP1 only */
 118	CSDP_SRC_PORT_OCP_T2	= 4 << 2, /* OMAP1 only */
 119	CSDP_SRC_PORT_MPUI	= 5 << 2, /* OMAP1 only */
 120	CSDP_SRC_PACKED		= BIT(6),
 121	CSDP_SRC_BURST_1	= 0 << 7,
 122	CSDP_SRC_BURST_16	= 1 << 7,
 123	CSDP_SRC_BURST_32	= 2 << 7,
 124	CSDP_SRC_BURST_64	= 3 << 7,
 125	CSDP_DST_PORT_EMIFF	= 0 << 9, /* OMAP1 only */
 126	CSDP_DST_PORT_EMIFS	= 1 << 9, /* OMAP1 only */
 127	CSDP_DST_PORT_OCP_T1	= 2 << 9, /* OMAP1 only */
 128	CSDP_DST_PORT_TIPB	= 3 << 9, /* OMAP1 only */
 129	CSDP_DST_PORT_OCP_T2	= 4 << 9, /* OMAP1 only */
 130	CSDP_DST_PORT_MPUI	= 5 << 9, /* OMAP1 only */
 131	CSDP_DST_PACKED		= BIT(13),
 132	CSDP_DST_BURST_1	= 0 << 14,
 133	CSDP_DST_BURST_16	= 1 << 14,
 134	CSDP_DST_BURST_32	= 2 << 14,
 135	CSDP_DST_BURST_64	= 3 << 14,
 136
 137	CICR_TOUT_IE		= BIT(0),	/* OMAP1 only */
 138	CICR_DROP_IE		= BIT(1),
 139	CICR_HALF_IE		= BIT(2),
 140	CICR_FRAME_IE		= BIT(3),
 141	CICR_LAST_IE		= BIT(4),
 142	CICR_BLOCK_IE		= BIT(5),
 143	CICR_PKT_IE		= BIT(7),	/* OMAP2+ only */
 144	CICR_TRANS_ERR_IE	= BIT(8),	/* OMAP2+ only */
 145	CICR_SUPERVISOR_ERR_IE	= BIT(10),	/* OMAP2+ only */
 146	CICR_MISALIGNED_ERR_IE	= BIT(11),	/* OMAP2+ only */
 147	CICR_DRAIN_IE		= BIT(12),	/* OMAP2+ only */
 148	CICR_SUPER_BLOCK_IE	= BIT(14),	/* OMAP2+ only */
 149
 150	CLNK_CTRL_ENABLE_LNK	= BIT(15),
 151};
 152
 153static const unsigned es_bytes[] = {
 154	[CSDP_DATA_TYPE_8] = 1,
 155	[CSDP_DATA_TYPE_16] = 2,
 156	[CSDP_DATA_TYPE_32] = 4,
 157};
 158
 159static struct of_dma_filter_info omap_dma_info = {
 160	.filter_fn = omap_dma_filter_fn,
 161};
 162
 163static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
 164{
 165	return container_of(d, struct omap_dmadev, ddev);
 166}
 167
 168static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
 169{
 170	return container_of(c, struct omap_chan, vc.chan);
 171}
 172
 173static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
 174{
 175	return container_of(t, struct omap_desc, vd.tx);
 176}
 177
 178static void omap_dma_desc_free(struct virt_dma_desc *vd)
 179{
 180	kfree(container_of(vd, struct omap_desc, vd));
 181}
 182
 183static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
 184{
 185	switch (type) {
 186	case OMAP_DMA_REG_16BIT:
 187		writew_relaxed(val, addr);
 188		break;
 189	case OMAP_DMA_REG_2X16BIT:
 190		writew_relaxed(val, addr);
 191		writew_relaxed(val >> 16, addr + 2);
 192		break;
 193	case OMAP_DMA_REG_32BIT:
 194		writel_relaxed(val, addr);
 195		break;
 196	default:
 197		WARN_ON(1);
 198	}
 199}
 200
 201static unsigned omap_dma_read(unsigned type, void __iomem *addr)
 202{
 203	unsigned val;
 204
 205	switch (type) {
 206	case OMAP_DMA_REG_16BIT:
 207		val = readw_relaxed(addr);
 208		break;
 209	case OMAP_DMA_REG_2X16BIT:
 210		val = readw_relaxed(addr);
 211		val |= readw_relaxed(addr + 2) << 16;
 212		break;
 213	case OMAP_DMA_REG_32BIT:
 214		val = readl_relaxed(addr);
 215		break;
 216	default:
 217		WARN_ON(1);
 218		val = 0;
 219	}
 220
 221	return val;
 222}
 223
 224static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
 225{
 226	const struct omap_dma_reg *r = od->reg_map + reg;
 227
 228	WARN_ON(r->stride);
 229
 230	omap_dma_write(val, r->type, od->base + r->offset);
 231}
 232
 233static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
 234{
 235	const struct omap_dma_reg *r = od->reg_map + reg;
 236
 237	WARN_ON(r->stride);
 238
 239	return omap_dma_read(r->type, od->base + r->offset);
 240}
 241
 242static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
 243{
 244	const struct omap_dma_reg *r = c->reg_map + reg;
 245
 246	omap_dma_write(val, r->type, c->channel_base + r->offset);
 247}
 248
 249static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
 250{
 251	const struct omap_dma_reg *r = c->reg_map + reg;
 252
 253	return omap_dma_read(r->type, c->channel_base + r->offset);
 254}
 255
 256static void omap_dma_clear_csr(struct omap_chan *c)
 257{
 258	if (dma_omap1())
 259		omap_dma_chan_read(c, CSR);
 260	else
 261		omap_dma_chan_write(c, CSR, ~0);
 262}
 263
 264static unsigned omap_dma_get_csr(struct omap_chan *c)
 265{
 266	unsigned val = omap_dma_chan_read(c, CSR);
 267
 268	if (!dma_omap1())
 269		omap_dma_chan_write(c, CSR, val);
 270
 271	return val;
 272}
 273
 274static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
 275	unsigned lch)
 276{
 277	c->channel_base = od->base + od->plat->channel_stride * lch;
 278
 279	od->lch_map[lch] = c;
 280}
 281
 282static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
 283{
 284	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
 285
 286	if (__dma_omap15xx(od->plat->dma_attr))
 287		omap_dma_chan_write(c, CPC, 0);
 288	else
 289		omap_dma_chan_write(c, CDAC, 0);
 290
 291	omap_dma_clear_csr(c);
 292
 293	/* Enable interrupts */
 294	omap_dma_chan_write(c, CICR, d->cicr);
 295
 296	/* Enable channel */
 297	omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
 298
 299	c->running = true;
 300}
 301
 302static void omap_dma_stop(struct omap_chan *c)
 303{
 304	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
 305	uint32_t val;
 306
 307	/* disable irq */
 308	omap_dma_chan_write(c, CICR, 0);
 309
 310	omap_dma_clear_csr(c);
 311
 312	val = omap_dma_chan_read(c, CCR);
 313	if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
 314		uint32_t sysconfig;
 315		unsigned i;
 316
 317		sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
 318		val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
 319		val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
 320		omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
 321
 322		val = omap_dma_chan_read(c, CCR);
 323		val &= ~CCR_ENABLE;
 324		omap_dma_chan_write(c, CCR, val);
 325
 326		/* Wait for sDMA FIFO to drain */
 327		for (i = 0; ; i++) {
 328			val = omap_dma_chan_read(c, CCR);
 329			if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
 330				break;
 331
 332			if (i > 100)
 333				break;
 334
 335			udelay(5);
 336		}
 337
 338		if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
 339			dev_err(c->vc.chan.device->dev,
 340				"DMA drain did not complete on lch %d\n",
 341			        c->dma_ch);
 342
 343		omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
 344	} else {
 345		val &= ~CCR_ENABLE;
 346		omap_dma_chan_write(c, CCR, val);
 347	}
 348
 349	mb();
 350
 351	if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
 352		val = omap_dma_chan_read(c, CLNK_CTRL);
 353
 354		if (dma_omap1())
 355			val |= 1 << 14; /* set the STOP_LNK bit */
 356		else
 357			val &= ~CLNK_CTRL_ENABLE_LNK;
 358
 359		omap_dma_chan_write(c, CLNK_CTRL, val);
 360	}
 361
 362	c->running = false;
 363}
 364
 365static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
 366	unsigned idx)
 367{
 368	struct omap_sg *sg = d->sg + idx;
 369	unsigned cxsa, cxei, cxfi;
 370
 371	if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
 372		cxsa = CDSA;
 373		cxei = CDEI;
 374		cxfi = CDFI;
 375	} else {
 376		cxsa = CSSA;
 377		cxei = CSEI;
 378		cxfi = CSFI;
 379	}
 380
 381	omap_dma_chan_write(c, cxsa, sg->addr);
 382	omap_dma_chan_write(c, cxei, 0);
 383	omap_dma_chan_write(c, cxfi, 0);
 384	omap_dma_chan_write(c, CEN, sg->en);
 385	omap_dma_chan_write(c, CFN, sg->fn);
 386
 387	omap_dma_start(c, d);
 388}
 389
 390static void omap_dma_start_desc(struct omap_chan *c)
 391{
 392	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
 393	struct omap_desc *d;
 394	unsigned cxsa, cxei, cxfi;
 395
 396	if (!vd) {
 397		c->desc = NULL;
 398		return;
 399	}
 400
 401	list_del(&vd->node);
 402
 403	c->desc = d = to_omap_dma_desc(&vd->tx);
 404	c->sgidx = 0;
 405
 406	/*
 407	 * This provides the necessary barrier to ensure data held in
 408	 * DMA coherent memory is visible to the DMA engine prior to
 409	 * the transfer starting.
 410	 */
 411	mb();
 412
 413	omap_dma_chan_write(c, CCR, d->ccr);
 414	if (dma_omap1())
 415		omap_dma_chan_write(c, CCR2, d->ccr >> 16);
 416
 417	if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
 418		cxsa = CSSA;
 419		cxei = CSEI;
 420		cxfi = CSFI;
 421	} else {
 422		cxsa = CDSA;
 423		cxei = CDEI;
 424		cxfi = CDFI;
 425	}
 426
 427	omap_dma_chan_write(c, cxsa, d->dev_addr);
 428	omap_dma_chan_write(c, cxei, 0);
 429	omap_dma_chan_write(c, cxfi, d->fi);
 430	omap_dma_chan_write(c, CSDP, d->csdp);
 431	omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
 432
 433	omap_dma_start_sg(c, d, 0);
 434}
 435
 436static void omap_dma_callback(int ch, u16 status, void *data)
 437{
 438	struct omap_chan *c = data;
 439	struct omap_desc *d;
 440	unsigned long flags;
 441
 442	spin_lock_irqsave(&c->vc.lock, flags);
 443	d = c->desc;
 444	if (d) {
 445		if (!c->cyclic) {
 446			if (++c->sgidx < d->sglen) {
 447				omap_dma_start_sg(c, d, c->sgidx);
 448			} else {
 449				omap_dma_start_desc(c);
 450				vchan_cookie_complete(&d->vd);
 451			}
 452		} else {
 453			vchan_cyclic_callback(&d->vd);
 454		}
 455	}
 456	spin_unlock_irqrestore(&c->vc.lock, flags);
 457}
 458
 459static irqreturn_t omap_dma_irq(int irq, void *devid)
 460{
 461	struct omap_dmadev *od = devid;
 462	unsigned status, channel;
 463
 464	spin_lock(&od->irq_lock);
 465
 466	status = omap_dma_glbl_read(od, IRQSTATUS_L1);
 467	status &= od->irq_enable_mask;
 468	if (status == 0) {
 469		spin_unlock(&od->irq_lock);
 470		return IRQ_NONE;
 471	}
 472
 473	while ((channel = ffs(status)) != 0) {
 474		unsigned mask, csr;
 475		struct omap_chan *c;
 476
 477		channel -= 1;
 478		mask = BIT(channel);
 479		status &= ~mask;
 480
 481		c = od->lch_map[channel];
 482		if (c == NULL) {
 483			/* This should never happen */
 484			dev_err(od->ddev.dev, "invalid channel %u\n", channel);
 485			continue;
 486		}
 487
 488		csr = omap_dma_get_csr(c);
 489		omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
 490
 491		omap_dma_callback(channel, csr, c);
 492	}
 493
 494	spin_unlock(&od->irq_lock);
 495
 496	return IRQ_HANDLED;
 497}
 498
 499static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
 500{
 501	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
 502	struct omap_chan *c = to_omap_dma_chan(chan);
 503	int ret;
 504
 505	if (od->legacy) {
 506		ret = omap_request_dma(c->dma_sig, "DMA engine",
 507				       omap_dma_callback, c, &c->dma_ch);
 508	} else {
 509		ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
 510				       &c->dma_ch);
 511	}
 512
 513	dev_dbg(od->ddev.dev, "allocating channel %u for %u\n",
 514		c->dma_ch, c->dma_sig);
 515
 516	if (ret >= 0) {
 517		omap_dma_assign(od, c, c->dma_ch);
 518
 519		if (!od->legacy) {
 520			unsigned val;
 521
 522			spin_lock_irq(&od->irq_lock);
 523			val = BIT(c->dma_ch);
 524			omap_dma_glbl_write(od, IRQSTATUS_L1, val);
 525			od->irq_enable_mask |= val;
 526			omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
 527
 528			val = omap_dma_glbl_read(od, IRQENABLE_L0);
 529			val &= ~BIT(c->dma_ch);
 530			omap_dma_glbl_write(od, IRQENABLE_L0, val);
 531			spin_unlock_irq(&od->irq_lock);
 532		}
 533	}
 534
 535	if (dma_omap1()) {
 536		if (__dma_omap16xx(od->plat->dma_attr)) {
 537			c->ccr = CCR_OMAP31_DISABLE;
 538			/* Duplicate what plat-omap/dma.c does */
 539			c->ccr |= c->dma_ch + 1;
 540		} else {
 541			c->ccr = c->dma_sig & 0x1f;
 542		}
 543	} else {
 544		c->ccr = c->dma_sig & 0x1f;
 545		c->ccr |= (c->dma_sig & ~0x1f) << 14;
 546	}
 547	if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
 548		c->ccr |= CCR_BUFFERING_DISABLE;
 549
 550	return ret;
 551}
 552
 553static void omap_dma_free_chan_resources(struct dma_chan *chan)
 554{
 555	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
 556	struct omap_chan *c = to_omap_dma_chan(chan);
 557
 558	if (!od->legacy) {
 559		spin_lock_irq(&od->irq_lock);
 560		od->irq_enable_mask &= ~BIT(c->dma_ch);
 561		omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
 562		spin_unlock_irq(&od->irq_lock);
 563	}
 564
 565	c->channel_base = NULL;
 566	od->lch_map[c->dma_ch] = NULL;
 567	vchan_free_chan_resources(&c->vc);
 568	omap_free_dma(c->dma_ch);
 569
 570	dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig);
 571	c->dma_sig = 0;
 572}
 573
 574static size_t omap_dma_sg_size(struct omap_sg *sg)
 575{
 576	return sg->en * sg->fn;
 577}
 578
 579static size_t omap_dma_desc_size(struct omap_desc *d)
 580{
 581	unsigned i;
 582	size_t size;
 583
 584	for (size = i = 0; i < d->sglen; i++)
 585		size += omap_dma_sg_size(&d->sg[i]);
 586
 587	return size * es_bytes[d->es];
 588}
 589
 590static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
 591{
 592	unsigned i;
 593	size_t size, es_size = es_bytes[d->es];
 594
 595	for (size = i = 0; i < d->sglen; i++) {
 596		size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
 597
 598		if (size)
 599			size += this_size;
 600		else if (addr >= d->sg[i].addr &&
 601			 addr < d->sg[i].addr + this_size)
 602			size += d->sg[i].addr + this_size - addr;
 603	}
 604	return size;
 605}
 606
 607/*
 608 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
 609 * read before the DMA controller finished disabling the channel.
 610 */
 611static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
 612{
 613	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
 614	uint32_t val;
 615
 616	val = omap_dma_chan_read(c, reg);
 617	if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
 618		val = omap_dma_chan_read(c, reg);
 619
 620	return val;
 621}
 622
 623static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
 624{
 625	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
 626	dma_addr_t addr, cdac;
 627
 628	if (__dma_omap15xx(od->plat->dma_attr)) {
 629		addr = omap_dma_chan_read(c, CPC);
 630	} else {
 631		addr = omap_dma_chan_read_3_3(c, CSAC);
 632		cdac = omap_dma_chan_read_3_3(c, CDAC);
 633
 634		/*
 635		 * CDAC == 0 indicates that the DMA transfer on the channel has
 636		 * not been started (no data has been transferred so far).
 637		 * Return the programmed source start address in this case.
 638		 */
 639		if (cdac == 0)
 640			addr = omap_dma_chan_read(c, CSSA);
 641	}
 642
 643	if (dma_omap1())
 644		addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
 645
 646	return addr;
 647}
 648
 649static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
 650{
 651	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
 652	dma_addr_t addr;
 653
 654	if (__dma_omap15xx(od->plat->dma_attr)) {
 655		addr = omap_dma_chan_read(c, CPC);
 656	} else {
 657		addr = omap_dma_chan_read_3_3(c, CDAC);
 658
 659		/*
 660		 * CDAC == 0 indicates that the DMA transfer on the channel
 661		 * has not been started (no data has been transferred so
 662		 * far).  Return the programmed destination start address in
 663		 * this case.
 664		 */
 665		if (addr == 0)
 666			addr = omap_dma_chan_read(c, CDSA);
 667	}
 668
 669	if (dma_omap1())
 670		addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
 671
 672	return addr;
 673}
 674
 675static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
 676	dma_cookie_t cookie, struct dma_tx_state *txstate)
 677{
 678	struct omap_chan *c = to_omap_dma_chan(chan);
 679	struct virt_dma_desc *vd;
 680	enum dma_status ret;
 681	unsigned long flags;
 682
 683	ret = dma_cookie_status(chan, cookie, txstate);
 684
 685	if (!c->paused && c->running) {
 686		uint32_t ccr = omap_dma_chan_read(c, CCR);
 687		/*
 688		 * The channel is no longer active, set the return value
 689		 * accordingly
 690		 */
 691		if (!(ccr & CCR_ENABLE))
 692			ret = DMA_COMPLETE;
 693	}
 694
 695	if (ret == DMA_COMPLETE || !txstate)
 696		return ret;
 697
 698	spin_lock_irqsave(&c->vc.lock, flags);
 699	vd = vchan_find_desc(&c->vc, cookie);
 700	if (vd) {
 701		txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
 702	} else if (c->desc && c->desc->vd.tx.cookie == cookie) {
 703		struct omap_desc *d = c->desc;
 704		dma_addr_t pos;
 705
 706		if (d->dir == DMA_MEM_TO_DEV)
 707			pos = omap_dma_get_src_pos(c);
 708		else if (d->dir == DMA_DEV_TO_MEM  || d->dir == DMA_MEM_TO_MEM)
 709			pos = omap_dma_get_dst_pos(c);
 710		else
 711			pos = 0;
 712
 713		txstate->residue = omap_dma_desc_size_pos(d, pos);
 714	} else {
 715		txstate->residue = 0;
 716	}
 717	spin_unlock_irqrestore(&c->vc.lock, flags);
 718
 719	return ret;
 720}
 721
 722static void omap_dma_issue_pending(struct dma_chan *chan)
 723{
 724	struct omap_chan *c = to_omap_dma_chan(chan);
 725	unsigned long flags;
 726
 727	spin_lock_irqsave(&c->vc.lock, flags);
 728	if (vchan_issue_pending(&c->vc) && !c->desc)
 729		omap_dma_start_desc(c);
 730	spin_unlock_irqrestore(&c->vc.lock, flags);
 731}
 732
 733static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 734	struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
 735	enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
 736{
 737	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
 738	struct omap_chan *c = to_omap_dma_chan(chan);
 739	enum dma_slave_buswidth dev_width;
 740	struct scatterlist *sgent;
 741	struct omap_desc *d;
 742	dma_addr_t dev_addr;
 743	unsigned i, es, en, frame_bytes;
 744	u32 burst;
 745
 746	if (dir == DMA_DEV_TO_MEM) {
 747		dev_addr = c->cfg.src_addr;
 748		dev_width = c->cfg.src_addr_width;
 749		burst = c->cfg.src_maxburst;
 750	} else if (dir == DMA_MEM_TO_DEV) {
 751		dev_addr = c->cfg.dst_addr;
 752		dev_width = c->cfg.dst_addr_width;
 753		burst = c->cfg.dst_maxburst;
 754	} else {
 755		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
 756		return NULL;
 757	}
 758
 759	/* Bus width translates to the element size (ES) */
 760	switch (dev_width) {
 761	case DMA_SLAVE_BUSWIDTH_1_BYTE:
 762		es = CSDP_DATA_TYPE_8;
 763		break;
 764	case DMA_SLAVE_BUSWIDTH_2_BYTES:
 765		es = CSDP_DATA_TYPE_16;
 766		break;
 767	case DMA_SLAVE_BUSWIDTH_4_BYTES:
 768		es = CSDP_DATA_TYPE_32;
 769		break;
 770	default: /* not reached */
 771		return NULL;
 772	}
 773
 774	/* Now allocate and setup the descriptor. */
 775	d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
 776	if (!d)
 777		return NULL;
 778
 779	d->dir = dir;
 780	d->dev_addr = dev_addr;
 781	d->es = es;
 782
 783	d->ccr = c->ccr | CCR_SYNC_FRAME;
 784	if (dir == DMA_DEV_TO_MEM)
 785		d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
 786	else
 787		d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
 788
 789	d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
 790	d->csdp = es;
 791
 792	if (dma_omap1()) {
 793		d->cicr |= CICR_TOUT_IE;
 794
 795		if (dir == DMA_DEV_TO_MEM)
 796			d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
 797		else
 798			d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
 799	} else {
 800		if (dir == DMA_DEV_TO_MEM)
 801			d->ccr |= CCR_TRIGGER_SRC;
 802
 803		d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
 804	}
 805	if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
 806		d->clnk_ctrl = c->dma_ch;
 807
 808	/*
 809	 * Build our scatterlist entries: each contains the address,
 810	 * the number of elements (EN) in each frame, and the number of
 811	 * frames (FN).  Number of bytes for this entry = ES * EN * FN.
 812	 *
 813	 * Burst size translates to number of elements with frame sync.
 814	 * Note: DMA engine defines burst to be the number of dev-width
 815	 * transfers.
 816	 */
 817	en = burst;
 818	frame_bytes = es_bytes[es] * en;
 819	for_each_sg(sgl, sgent, sglen, i) {
 820		d->sg[i].addr = sg_dma_address(sgent);
 821		d->sg[i].en = en;
 822		d->sg[i].fn = sg_dma_len(sgent) / frame_bytes;
 823	}
 824
 825	d->sglen = sglen;
 826
 827	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
 828}
 829
 830static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
 831	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 832	size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
 833{
 834	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
 835	struct omap_chan *c = to_omap_dma_chan(chan);
 836	enum dma_slave_buswidth dev_width;
 837	struct omap_desc *d;
 838	dma_addr_t dev_addr;
 839	unsigned es;
 840	u32 burst;
 841
 842	if (dir == DMA_DEV_TO_MEM) {
 843		dev_addr = c->cfg.src_addr;
 844		dev_width = c->cfg.src_addr_width;
 845		burst = c->cfg.src_maxburst;
 846	} else if (dir == DMA_MEM_TO_DEV) {
 847		dev_addr = c->cfg.dst_addr;
 848		dev_width = c->cfg.dst_addr_width;
 849		burst = c->cfg.dst_maxburst;
 850	} else {
 851		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
 852		return NULL;
 853	}
 854
 855	/* Bus width translates to the element size (ES) */
 856	switch (dev_width) {
 857	case DMA_SLAVE_BUSWIDTH_1_BYTE:
 858		es = CSDP_DATA_TYPE_8;
 859		break;
 860	case DMA_SLAVE_BUSWIDTH_2_BYTES:
 861		es = CSDP_DATA_TYPE_16;
 862		break;
 863	case DMA_SLAVE_BUSWIDTH_4_BYTES:
 864		es = CSDP_DATA_TYPE_32;
 865		break;
 866	default: /* not reached */
 867		return NULL;
 868	}
 869
 870	/* Now allocate and setup the descriptor. */
 871	d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
 872	if (!d)
 873		return NULL;
 874
 875	d->dir = dir;
 876	d->dev_addr = dev_addr;
 877	d->fi = burst;
 878	d->es = es;
 879	d->sg[0].addr = buf_addr;
 880	d->sg[0].en = period_len / es_bytes[es];
 881	d->sg[0].fn = buf_len / period_len;
 882	d->sglen = 1;
 883
 884	d->ccr = c->ccr;
 885	if (dir == DMA_DEV_TO_MEM)
 886		d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
 887	else
 888		d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
 889
 890	d->cicr = CICR_DROP_IE;
 891	if (flags & DMA_PREP_INTERRUPT)
 892		d->cicr |= CICR_FRAME_IE;
 893
 894	d->csdp = es;
 895
 896	if (dma_omap1()) {
 897		d->cicr |= CICR_TOUT_IE;
 898
 899		if (dir == DMA_DEV_TO_MEM)
 900			d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
 901		else
 902			d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
 903	} else {
 904		if (burst)
 905			d->ccr |= CCR_SYNC_PACKET;
 906		else
 907			d->ccr |= CCR_SYNC_ELEMENT;
 908
 909		if (dir == DMA_DEV_TO_MEM) {
 910			d->ccr |= CCR_TRIGGER_SRC;
 911			d->csdp |= CSDP_DST_PACKED;
 912		} else {
 913			d->csdp |= CSDP_SRC_PACKED;
 914		}
 915
 916		d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
 917
 918		d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
 919	}
 920
 921	if (__dma_omap15xx(od->plat->dma_attr))
 922		d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
 923	else
 924		d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
 925
 926	c->cyclic = true;
 927
 928	return vchan_tx_prep(&c->vc, &d->vd, flags);
 929}
 930
 931static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
 932	struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 933	size_t len, unsigned long tx_flags)
 934{
 935	struct omap_chan *c = to_omap_dma_chan(chan);
 936	struct omap_desc *d;
 937	uint8_t data_type;
 938
 939	d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
 940	if (!d)
 941		return NULL;
 942
 943	data_type = __ffs((src | dest | len));
 944	if (data_type > CSDP_DATA_TYPE_32)
 945		data_type = CSDP_DATA_TYPE_32;
 946
 947	d->dir = DMA_MEM_TO_MEM;
 948	d->dev_addr = src;
 949	d->fi = 0;
 950	d->es = data_type;
 951	d->sg[0].en = len / BIT(data_type);
 952	d->sg[0].fn = 1;
 953	d->sg[0].addr = dest;
 954	d->sglen = 1;
 955	d->ccr = c->ccr;
 956	d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
 957
 958	d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
 959
 960	d->csdp = data_type;
 961
 962	if (dma_omap1()) {
 963		d->cicr |= CICR_TOUT_IE;
 964		d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
 965	} else {
 966		d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
 967		d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
 968		d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
 969	}
 970
 971	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
 972}
 973
 974static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
 975{
 976	struct omap_chan *c = to_omap_dma_chan(chan);
 977
 978	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
 979	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 980		return -EINVAL;
 981
 982	memcpy(&c->cfg, cfg, sizeof(c->cfg));
 983
 984	return 0;
 985}
 986
 987static int omap_dma_terminate_all(struct dma_chan *chan)
 988{
 989	struct omap_chan *c = to_omap_dma_chan(chan);
 990	unsigned long flags;
 991	LIST_HEAD(head);
 992
 993	spin_lock_irqsave(&c->vc.lock, flags);
 994
 995	/*
 996	 * Stop DMA activity: we assume the callback will not be called
 997	 * after omap_dma_stop() returns (even if it does, it will see
 998	 * c->desc is NULL and exit.)
 999	 */
1000	if (c->desc) {
1001		omap_dma_desc_free(&c->desc->vd);
1002		c->desc = NULL;
1003		/* Avoid stopping the dma twice */
1004		if (!c->paused)
1005			omap_dma_stop(c);
1006	}
1007
1008	if (c->cyclic) {
1009		c->cyclic = false;
1010		c->paused = false;
1011	}
1012
1013	vchan_get_all_descriptors(&c->vc, &head);
1014	spin_unlock_irqrestore(&c->vc.lock, flags);
1015	vchan_dma_desc_free_list(&c->vc, &head);
1016
1017	return 0;
1018}
1019
1020static void omap_dma_synchronize(struct dma_chan *chan)
1021{
1022	struct omap_chan *c = to_omap_dma_chan(chan);
1023
1024	vchan_synchronize(&c->vc);
1025}
1026
1027static int omap_dma_pause(struct dma_chan *chan)
1028{
1029	struct omap_chan *c = to_omap_dma_chan(chan);
1030
1031	/* Pause/Resume only allowed with cyclic mode */
1032	if (!c->cyclic)
1033		return -EINVAL;
1034
1035	if (!c->paused) {
1036		omap_dma_stop(c);
1037		c->paused = true;
1038	}
1039
1040	return 0;
1041}
1042
1043static int omap_dma_resume(struct dma_chan *chan)
1044{
1045	struct omap_chan *c = to_omap_dma_chan(chan);
1046
1047	/* Pause/Resume only allowed with cyclic mode */
1048	if (!c->cyclic)
1049		return -EINVAL;
1050
1051	if (c->paused) {
1052		mb();
1053
1054		/* Restore channel link register */
1055		omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
1056
1057		omap_dma_start(c, c->desc);
1058		c->paused = false;
1059	}
1060
1061	return 0;
1062}
1063
1064static int omap_dma_chan_init(struct omap_dmadev *od)
1065{
1066	struct omap_chan *c;
1067
1068	c = kzalloc(sizeof(*c), GFP_KERNEL);
1069	if (!c)
1070		return -ENOMEM;
1071
1072	c->reg_map = od->reg_map;
1073	c->vc.desc_free = omap_dma_desc_free;
1074	vchan_init(&c->vc, &od->ddev);
1075
1076	return 0;
1077}
1078
1079static void omap_dma_free(struct omap_dmadev *od)
1080{
1081	while (!list_empty(&od->ddev.channels)) {
1082		struct omap_chan *c = list_first_entry(&od->ddev.channels,
1083			struct omap_chan, vc.chan.device_node);
1084
1085		list_del(&c->vc.chan.device_node);
1086		tasklet_kill(&c->vc.task);
1087		kfree(c);
1088	}
1089}
1090
1091#define OMAP_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1092				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1093				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1094
1095static int omap_dma_probe(struct platform_device *pdev)
1096{
1097	struct omap_dmadev *od;
1098	struct resource *res;
1099	int rc, i, irq;
1100
1101	od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1102	if (!od)
1103		return -ENOMEM;
1104
1105	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1106	od->base = devm_ioremap_resource(&pdev->dev, res);
1107	if (IS_ERR(od->base))
1108		return PTR_ERR(od->base);
1109
1110	od->plat = omap_get_plat_info();
1111	if (!od->plat)
1112		return -EPROBE_DEFER;
1113
1114	od->reg_map = od->plat->reg_map;
1115
1116	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
1117	dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
1118	dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
1119	od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1120	od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1121	od->ddev.device_tx_status = omap_dma_tx_status;
1122	od->ddev.device_issue_pending = omap_dma_issue_pending;
1123	od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1124	od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1125	od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
1126	od->ddev.device_config = omap_dma_slave_config;
1127	od->ddev.device_pause = omap_dma_pause;
1128	od->ddev.device_resume = omap_dma_resume;
1129	od->ddev.device_terminate_all = omap_dma_terminate_all;
1130	od->ddev.device_synchronize = omap_dma_synchronize;
1131	od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1132	od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1133	od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1134	od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1135	od->ddev.dev = &pdev->dev;
1136	INIT_LIST_HEAD(&od->ddev.channels);
1137	spin_lock_init(&od->lock);
1138	spin_lock_init(&od->irq_lock);
1139
1140	od->dma_requests = OMAP_SDMA_REQUESTS;
1141	if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1142						      "dma-requests",
1143						      &od->dma_requests)) {
1144		dev_info(&pdev->dev,
1145			 "Missing dma-requests property, using %u.\n",
1146			 OMAP_SDMA_REQUESTS);
1147	}
1148
1149	for (i = 0; i < OMAP_SDMA_CHANNELS; i++) {
1150		rc = omap_dma_chan_init(od);
1151		if (rc) {
1152			omap_dma_free(od);
1153			return rc;
1154		}
1155	}
1156
1157	irq = platform_get_irq(pdev, 1);
1158	if (irq <= 0) {
1159		dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
1160		od->legacy = true;
1161	} else {
1162		/* Disable all interrupts */
1163		od->irq_enable_mask = 0;
1164		omap_dma_glbl_write(od, IRQENABLE_L1, 0);
1165
1166		rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1167				      IRQF_SHARED, "omap-dma-engine", od);
1168		if (rc)
1169			return rc;
1170	}
1171
1172	od->ddev.filter.map = od->plat->slave_map;
1173	od->ddev.filter.mapcnt = od->plat->slavecnt;
1174	od->ddev.filter.fn = omap_dma_filter_fn;
1175
1176	rc = dma_async_device_register(&od->ddev);
1177	if (rc) {
1178		pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1179			rc);
1180		omap_dma_free(od);
1181		return rc;
1182	}
1183
1184	platform_set_drvdata(pdev, od);
1185
1186	if (pdev->dev.of_node) {
1187		omap_dma_info.dma_cap = od->ddev.cap_mask;
1188
1189		/* Device-tree DMA controller registration */
1190		rc = of_dma_controller_register(pdev->dev.of_node,
1191				of_dma_simple_xlate, &omap_dma_info);
1192		if (rc) {
1193			pr_warn("OMAP-DMA: failed to register DMA controller\n");
1194			dma_async_device_unregister(&od->ddev);
1195			omap_dma_free(od);
1196		}
1197	}
1198
1199	dev_info(&pdev->dev, "OMAP DMA engine driver\n");
1200
1201	return rc;
1202}
1203
1204static int omap_dma_remove(struct platform_device *pdev)
1205{
1206	struct omap_dmadev *od = platform_get_drvdata(pdev);
1207
1208	if (pdev->dev.of_node)
1209		of_dma_controller_free(pdev->dev.of_node);
1210
1211	dma_async_device_unregister(&od->ddev);
1212
1213	if (!od->legacy) {
1214		/* Disable all interrupts */
1215		omap_dma_glbl_write(od, IRQENABLE_L0, 0);
1216	}
1217
1218	omap_dma_free(od);
1219
1220	return 0;
1221}
1222
1223static const struct of_device_id omap_dma_match[] = {
1224	{ .compatible = "ti,omap2420-sdma", },
1225	{ .compatible = "ti,omap2430-sdma", },
1226	{ .compatible = "ti,omap3430-sdma", },
1227	{ .compatible = "ti,omap3630-sdma", },
1228	{ .compatible = "ti,omap4430-sdma", },
1229	{},
1230};
1231MODULE_DEVICE_TABLE(of, omap_dma_match);
1232
1233static struct platform_driver omap_dma_driver = {
1234	.probe	= omap_dma_probe,
1235	.remove	= omap_dma_remove,
1236	.driver = {
1237		.name = "omap-dma-engine",
1238		.of_match_table = of_match_ptr(omap_dma_match),
1239	},
1240};
1241
1242bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1243{
1244	if (chan->device->dev->driver == &omap_dma_driver.driver) {
1245		struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1246		struct omap_chan *c = to_omap_dma_chan(chan);
1247		unsigned req = *(unsigned *)param;
1248
1249		if (req <= od->dma_requests) {
1250			c->dma_sig = req;
1251			return true;
1252		}
1253	}
1254	return false;
1255}
1256EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
1257
1258static int omap_dma_init(void)
1259{
1260	return platform_driver_register(&omap_dma_driver);
1261}
1262subsys_initcall(omap_dma_init);
1263
1264static void __exit omap_dma_exit(void)
1265{
1266	platform_driver_unregister(&omap_dma_driver);
1267}
1268module_exit(omap_dma_exit);
1269
1270MODULE_AUTHOR("Russell King");
1271MODULE_LICENSE("GPL");