Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright 2012 Marvell International Ltd.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/err.h>
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/types.h>
  13#include <linux/interrupt.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/slab.h>
  16#include <linux/dmaengine.h>
  17#include <linux/platform_device.h>
  18#include <linux/device.h>
  19#include <linux/platform_data/mmp_dma.h>
  20#include <linux/dmapool.h>
  21#include <linux/of_device.h>
  22#include <linux/of_dma.h>
  23#include <linux/of.h>
  24#include <linux/dma/mmp-pdma.h>
  25
  26#include "dmaengine.h"
  27
  28#define DCSR		0x0000
  29#define DALGN		0x00a0
  30#define DINT		0x00f0
  31#define DDADR		0x0200
  32#define DSADR(n)	(0x0204 + ((n) << 4))
  33#define DTADR(n)	(0x0208 + ((n) << 4))
  34#define DCMD		0x020c
  35
  36#define DCSR_RUN	BIT(31)	/* Run Bit (read / write) */
  37#define DCSR_NODESC	BIT(30)	/* No-Descriptor Fetch (read / write) */
  38#define DCSR_STOPIRQEN	BIT(29)	/* Stop Interrupt Enable (read / write) */
  39#define DCSR_REQPEND	BIT(8)	/* Request Pending (read-only) */
  40#define DCSR_STOPSTATE	BIT(3)	/* Stop State (read-only) */
  41#define DCSR_ENDINTR	BIT(2)	/* End Interrupt (read / write) */
  42#define DCSR_STARTINTR	BIT(1)	/* Start Interrupt (read / write) */
  43#define DCSR_BUSERR	BIT(0)	/* Bus Error Interrupt (read / write) */
  44
  45#define DCSR_EORIRQEN	BIT(28)	/* End of Receive Interrupt Enable (R/W) */
  46#define DCSR_EORJMPEN	BIT(27)	/* Jump to next descriptor on EOR */
  47#define DCSR_EORSTOPEN	BIT(26)	/* STOP on an EOR */
  48#define DCSR_SETCMPST	BIT(25)	/* Set Descriptor Compare Status */
  49#define DCSR_CLRCMPST	BIT(24)	/* Clear Descriptor Compare Status */
  50#define DCSR_CMPST	BIT(10)	/* The Descriptor Compare Status */
  51#define DCSR_EORINTR	BIT(9)	/* The end of Receive */
  52
  53#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
  54#define DRCMR_MAPVLD	BIT(7)	/* Map Valid (read / write) */
  55#define DRCMR_CHLNUM	0x1f	/* mask for Channel Number (read / write) */
  56
  57#define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
  58#define DDADR_STOP	BIT(0)	/* Stop (read / write) */
  59
  60#define DCMD_INCSRCADDR	BIT(31)	/* Source Address Increment Setting. */
  61#define DCMD_INCTRGADDR	BIT(30)	/* Target Address Increment Setting. */
  62#define DCMD_FLOWSRC	BIT(29)	/* Flow Control by the source. */
  63#define DCMD_FLOWTRG	BIT(28)	/* Flow Control by the target. */
  64#define DCMD_STARTIRQEN	BIT(22)	/* Start Interrupt Enable */
  65#define DCMD_ENDIRQEN	BIT(21)	/* End Interrupt Enable */
  66#define DCMD_ENDIAN	BIT(18)	/* Device Endian-ness. */
  67#define DCMD_BURST8	(1 << 16)	/* 8 byte burst */
  68#define DCMD_BURST16	(2 << 16)	/* 16 byte burst */
  69#define DCMD_BURST32	(3 << 16)	/* 32 byte burst */
  70#define DCMD_WIDTH1	(1 << 14)	/* 1 byte width */
  71#define DCMD_WIDTH2	(2 << 14)	/* 2 byte width (HalfWord) */
  72#define DCMD_WIDTH4	(3 << 14)	/* 4 byte width (Word) */
  73#define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
  74
  75#define PDMA_MAX_DESC_BYTES	DCMD_LENGTH
  76
  77struct mmp_pdma_desc_hw {
  78	u32 ddadr;	/* Points to the next descriptor + flags */
  79	u32 dsadr;	/* DSADR value for the current transfer */
  80	u32 dtadr;	/* DTADR value for the current transfer */
  81	u32 dcmd;	/* DCMD value for the current transfer */
  82} __aligned(32);
  83
  84struct mmp_pdma_desc_sw {
  85	struct mmp_pdma_desc_hw desc;
  86	struct list_head node;
  87	struct list_head tx_list;
  88	struct dma_async_tx_descriptor async_tx;
  89};
  90
  91struct mmp_pdma_phy;
  92
  93struct mmp_pdma_chan {
  94	struct device *dev;
  95	struct dma_chan chan;
  96	struct dma_async_tx_descriptor desc;
  97	struct mmp_pdma_phy *phy;
  98	enum dma_transfer_direction dir;
  99
 100	struct mmp_pdma_desc_sw *cyclic_first;	/* first desc_sw if channel
 101						 * is in cyclic mode */
 102
 103	/* channel's basic info */
 104	struct tasklet_struct tasklet;
 105	u32 dcmd;
 106	u32 drcmr;
 107	u32 dev_addr;
 108
 109	/* list for desc */
 110	spinlock_t desc_lock;		/* Descriptor list lock */
 111	struct list_head chain_pending;	/* Link descriptors queue for pending */
 112	struct list_head chain_running;	/* Link descriptors queue for running */
 113	bool idle;			/* channel statue machine */
 114	bool byte_align;
 115
 116	struct dma_pool *desc_pool;	/* Descriptors pool */
 117};
 118
 119struct mmp_pdma_phy {
 120	int idx;
 121	void __iomem *base;
 122	struct mmp_pdma_chan *vchan;
 123};
 124
 125struct mmp_pdma_device {
 126	int				dma_channels;
 127	void __iomem			*base;
 128	struct device			*dev;
 129	struct dma_device		device;
 130	struct mmp_pdma_phy		*phy;
 131	spinlock_t phy_lock; /* protect alloc/free phy channels */
 132};
 133
 134#define tx_to_mmp_pdma_desc(tx)					\
 135	container_of(tx, struct mmp_pdma_desc_sw, async_tx)
 136#define to_mmp_pdma_desc(lh)					\
 137	container_of(lh, struct mmp_pdma_desc_sw, node)
 138#define to_mmp_pdma_chan(dchan)					\
 139	container_of(dchan, struct mmp_pdma_chan, chan)
 140#define to_mmp_pdma_dev(dmadev)					\
 141	container_of(dmadev, struct mmp_pdma_device, device)
 142
 143static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 144{
 145	u32 reg = (phy->idx << 4) + DDADR;
 146
 147	writel(addr, phy->base + reg);
 148}
 149
 150static void enable_chan(struct mmp_pdma_phy *phy)
 151{
 152	u32 reg, dalgn;
 153
 154	if (!phy->vchan)
 155		return;
 156
 157	reg = DRCMR(phy->vchan->drcmr);
 158	writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
 159
 160	dalgn = readl(phy->base + DALGN);
 161	if (phy->vchan->byte_align)
 162		dalgn |= 1 << phy->idx;
 163	else
 164		dalgn &= ~(1 << phy->idx);
 165	writel(dalgn, phy->base + DALGN);
 166
 167	reg = (phy->idx << 2) + DCSR;
 168	writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
 169}
 170
 171static void disable_chan(struct mmp_pdma_phy *phy)
 172{
 173	u32 reg;
 174
 175	if (!phy)
 176		return;
 177
 178	reg = (phy->idx << 2) + DCSR;
 179	writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
 180}
 181
 182static int clear_chan_irq(struct mmp_pdma_phy *phy)
 183{
 184	u32 dcsr;
 185	u32 dint = readl(phy->base + DINT);
 186	u32 reg = (phy->idx << 2) + DCSR;
 187
 188	if (!(dint & BIT(phy->idx)))
 189		return -EAGAIN;
 190
 191	/* clear irq */
 192	dcsr = readl(phy->base + reg);
 193	writel(dcsr, phy->base + reg);
 194	if ((dcsr & DCSR_BUSERR) && (phy->vchan))
 195		dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
 196
 197	return 0;
 198}
 199
 200static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
 201{
 202	struct mmp_pdma_phy *phy = dev_id;
 203
 204	if (clear_chan_irq(phy) != 0)
 205		return IRQ_NONE;
 206
 207	tasklet_schedule(&phy->vchan->tasklet);
 208	return IRQ_HANDLED;
 209}
 210
 211static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
 212{
 213	struct mmp_pdma_device *pdev = dev_id;
 214	struct mmp_pdma_phy *phy;
 215	u32 dint = readl(pdev->base + DINT);
 216	int i, ret;
 217	int irq_num = 0;
 218
 219	while (dint) {
 220		i = __ffs(dint);
 221		/* only handle interrupts belonging to pdma driver*/
 222		if (i >= pdev->dma_channels)
 223			break;
 224		dint &= (dint - 1);
 225		phy = &pdev->phy[i];
 226		ret = mmp_pdma_chan_handler(irq, phy);
 227		if (ret == IRQ_HANDLED)
 228			irq_num++;
 229	}
 230
 231	if (irq_num)
 232		return IRQ_HANDLED;
 233
 234	return IRQ_NONE;
 235}
 236
 237/* lookup free phy channel as descending priority */
 238static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
 239{
 240	int prio, i;
 241	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
 242	struct mmp_pdma_phy *phy, *found = NULL;
 243	unsigned long flags;
 244
 245	/*
 246	 * dma channel priorities
 247	 * ch 0 - 3,  16 - 19  <--> (0)
 248	 * ch 4 - 7,  20 - 23  <--> (1)
 249	 * ch 8 - 11, 24 - 27  <--> (2)
 250	 * ch 12 - 15, 28 - 31  <--> (3)
 251	 */
 252
 253	spin_lock_irqsave(&pdev->phy_lock, flags);
 254	for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
 255		for (i = 0; i < pdev->dma_channels; i++) {
 256			if (prio != (i & 0xf) >> 2)
 257				continue;
 258			phy = &pdev->phy[i];
 259			if (!phy->vchan) {
 260				phy->vchan = pchan;
 261				found = phy;
 262				goto out_unlock;
 263			}
 264		}
 265	}
 266
 267out_unlock:
 268	spin_unlock_irqrestore(&pdev->phy_lock, flags);
 269	return found;
 270}
 271
 272static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
 273{
 274	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
 275	unsigned long flags;
 276	u32 reg;
 277
 278	if (!pchan->phy)
 279		return;
 280
 281	/* clear the channel mapping in DRCMR */
 282	reg = DRCMR(pchan->drcmr);
 283	writel(0, pchan->phy->base + reg);
 284
 285	spin_lock_irqsave(&pdev->phy_lock, flags);
 286	pchan->phy->vchan = NULL;
 287	pchan->phy = NULL;
 288	spin_unlock_irqrestore(&pdev->phy_lock, flags);
 289}
 290
 291/**
 292 * start_pending_queue - transfer any pending transactions
 293 * pending list ==> running list
 294 */
 295static void start_pending_queue(struct mmp_pdma_chan *chan)
 296{
 297	struct mmp_pdma_desc_sw *desc;
 298
 299	/* still in running, irq will start the pending list */
 300	if (!chan->idle) {
 301		dev_dbg(chan->dev, "DMA controller still busy\n");
 302		return;
 303	}
 304
 305	if (list_empty(&chan->chain_pending)) {
 306		/* chance to re-fetch phy channel with higher prio */
 307		mmp_pdma_free_phy(chan);
 308		dev_dbg(chan->dev, "no pending list\n");
 309		return;
 310	}
 311
 312	if (!chan->phy) {
 313		chan->phy = lookup_phy(chan);
 314		if (!chan->phy) {
 315			dev_dbg(chan->dev, "no free dma channel\n");
 316			return;
 317		}
 318	}
 319
 320	/*
 321	 * pending -> running
 322	 * reintilize pending list
 323	 */
 324	desc = list_first_entry(&chan->chain_pending,
 325				struct mmp_pdma_desc_sw, node);
 326	list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
 327
 328	/*
 329	 * Program the descriptor's address into the DMA controller,
 330	 * then start the DMA transaction
 331	 */
 332	set_desc(chan->phy, desc->async_tx.phys);
 333	enable_chan(chan->phy);
 334	chan->idle = false;
 335}
 336
 337
 338/* desc->tx_list ==> pending list */
 339static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
 340{
 341	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
 342	struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
 343	struct mmp_pdma_desc_sw *child;
 344	unsigned long flags;
 345	dma_cookie_t cookie = -EBUSY;
 346
 347	spin_lock_irqsave(&chan->desc_lock, flags);
 348
 349	list_for_each_entry(child, &desc->tx_list, node) {
 350		cookie = dma_cookie_assign(&child->async_tx);
 351	}
 352
 353	/* softly link to pending list - desc->tx_list ==> pending list */
 354	list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
 355
 356	spin_unlock_irqrestore(&chan->desc_lock, flags);
 357
 358	return cookie;
 359}
 360
 361static struct mmp_pdma_desc_sw *
 362mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
 363{
 364	struct mmp_pdma_desc_sw *desc;
 365	dma_addr_t pdesc;
 366
 367	desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
 368	if (!desc) {
 369		dev_err(chan->dev, "out of memory for link descriptor\n");
 370		return NULL;
 371	}
 372
 373	INIT_LIST_HEAD(&desc->tx_list);
 374	dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
 375	/* each desc has submit */
 376	desc->async_tx.tx_submit = mmp_pdma_tx_submit;
 377	desc->async_tx.phys = pdesc;
 378
 379	return desc;
 380}
 381
 382/**
 383 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
 384 *
 385 * This function will create a dma pool for descriptor allocation.
 386 * Request irq only when channel is requested
 387 * Return - The number of allocated descriptors.
 388 */
 389
 390static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
 391{
 392	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 393
 394	if (chan->desc_pool)
 395		return 1;
 396
 397	chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
 398					  chan->dev,
 399					  sizeof(struct mmp_pdma_desc_sw),
 400					  __alignof__(struct mmp_pdma_desc_sw),
 401					  0);
 402	if (!chan->desc_pool) {
 403		dev_err(chan->dev, "unable to allocate descriptor pool\n");
 404		return -ENOMEM;
 405	}
 406
 407	mmp_pdma_free_phy(chan);
 408	chan->idle = true;
 409	chan->dev_addr = 0;
 410	return 1;
 411}
 412
 413static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
 414				    struct list_head *list)
 415{
 416	struct mmp_pdma_desc_sw *desc, *_desc;
 417
 418	list_for_each_entry_safe(desc, _desc, list, node) {
 419		list_del(&desc->node);
 420		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
 421	}
 422}
 423
 424static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
 425{
 426	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 427	unsigned long flags;
 428
 429	spin_lock_irqsave(&chan->desc_lock, flags);
 430	mmp_pdma_free_desc_list(chan, &chan->chain_pending);
 431	mmp_pdma_free_desc_list(chan, &chan->chain_running);
 432	spin_unlock_irqrestore(&chan->desc_lock, flags);
 433
 434	dma_pool_destroy(chan->desc_pool);
 435	chan->desc_pool = NULL;
 436	chan->idle = true;
 437	chan->dev_addr = 0;
 438	mmp_pdma_free_phy(chan);
 439	return;
 440}
 441
 442static struct dma_async_tx_descriptor *
 443mmp_pdma_prep_memcpy(struct dma_chan *dchan,
 444		     dma_addr_t dma_dst, dma_addr_t dma_src,
 445		     size_t len, unsigned long flags)
 446{
 447	struct mmp_pdma_chan *chan;
 448	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
 449	size_t copy = 0;
 450
 451	if (!dchan)
 452		return NULL;
 453
 454	if (!len)
 455		return NULL;
 456
 457	chan = to_mmp_pdma_chan(dchan);
 458	chan->byte_align = false;
 459
 460	if (!chan->dir) {
 461		chan->dir = DMA_MEM_TO_MEM;
 462		chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
 463		chan->dcmd |= DCMD_BURST32;
 464	}
 465
 466	do {
 467		/* Allocate the link descriptor from DMA pool */
 468		new = mmp_pdma_alloc_descriptor(chan);
 469		if (!new) {
 470			dev_err(chan->dev, "no memory for desc\n");
 471			goto fail;
 472		}
 473
 474		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
 475		if (dma_src & 0x7 || dma_dst & 0x7)
 476			chan->byte_align = true;
 477
 478		new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
 479		new->desc.dsadr = dma_src;
 480		new->desc.dtadr = dma_dst;
 481
 482		if (!first)
 483			first = new;
 484		else
 485			prev->desc.ddadr = new->async_tx.phys;
 486
 487		new->async_tx.cookie = 0;
 488		async_tx_ack(&new->async_tx);
 489
 490		prev = new;
 491		len -= copy;
 492
 493		if (chan->dir == DMA_MEM_TO_DEV) {
 494			dma_src += copy;
 495		} else if (chan->dir == DMA_DEV_TO_MEM) {
 496			dma_dst += copy;
 497		} else if (chan->dir == DMA_MEM_TO_MEM) {
 498			dma_src += copy;
 499			dma_dst += copy;
 500		}
 501
 502		/* Insert the link descriptor to the LD ring */
 503		list_add_tail(&new->node, &first->tx_list);
 504	} while (len);
 505
 506	first->async_tx.flags = flags; /* client is in control of this ack */
 507	first->async_tx.cookie = -EBUSY;
 508
 509	/* last desc and fire IRQ */
 510	new->desc.ddadr = DDADR_STOP;
 511	new->desc.dcmd |= DCMD_ENDIRQEN;
 512
 513	chan->cyclic_first = NULL;
 514
 515	return &first->async_tx;
 516
 517fail:
 518	if (first)
 519		mmp_pdma_free_desc_list(chan, &first->tx_list);
 520	return NULL;
 521}
 522
 523static struct dma_async_tx_descriptor *
 524mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 525		       unsigned int sg_len, enum dma_transfer_direction dir,
 526		       unsigned long flags, void *context)
 527{
 528	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 529	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
 530	size_t len, avail;
 531	struct scatterlist *sg;
 532	dma_addr_t addr;
 533	int i;
 534
 535	if ((sgl == NULL) || (sg_len == 0))
 536		return NULL;
 537
 538	chan->byte_align = false;
 539
 540	for_each_sg(sgl, sg, sg_len, i) {
 541		addr = sg_dma_address(sg);
 542		avail = sg_dma_len(sgl);
 543
 544		do {
 545			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
 546			if (addr & 0x7)
 547				chan->byte_align = true;
 548
 549			/* allocate and populate the descriptor */
 550			new = mmp_pdma_alloc_descriptor(chan);
 551			if (!new) {
 552				dev_err(chan->dev, "no memory for desc\n");
 553				goto fail;
 554			}
 555
 556			new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
 557			if (dir == DMA_MEM_TO_DEV) {
 558				new->desc.dsadr = addr;
 559				new->desc.dtadr = chan->dev_addr;
 560			} else {
 561				new->desc.dsadr = chan->dev_addr;
 562				new->desc.dtadr = addr;
 563			}
 564
 565			if (!first)
 566				first = new;
 567			else
 568				prev->desc.ddadr = new->async_tx.phys;
 569
 570			new->async_tx.cookie = 0;
 571			async_tx_ack(&new->async_tx);
 572			prev = new;
 573
 574			/* Insert the link descriptor to the LD ring */
 575			list_add_tail(&new->node, &first->tx_list);
 576
 577			/* update metadata */
 578			addr += len;
 579			avail -= len;
 580		} while (avail);
 581	}
 582
 583	first->async_tx.cookie = -EBUSY;
 584	first->async_tx.flags = flags;
 585
 586	/* last desc and fire IRQ */
 587	new->desc.ddadr = DDADR_STOP;
 588	new->desc.dcmd |= DCMD_ENDIRQEN;
 589
 590	chan->dir = dir;
 591	chan->cyclic_first = NULL;
 592
 593	return &first->async_tx;
 594
 595fail:
 596	if (first)
 597		mmp_pdma_free_desc_list(chan, &first->tx_list);
 598	return NULL;
 599}
 600
 601static struct dma_async_tx_descriptor *
 602mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
 603			 dma_addr_t buf_addr, size_t len, size_t period_len,
 604			 enum dma_transfer_direction direction,
 605			 unsigned long flags)
 606{
 607	struct mmp_pdma_chan *chan;
 608	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
 609	dma_addr_t dma_src, dma_dst;
 610
 611	if (!dchan || !len || !period_len)
 612		return NULL;
 613
 614	/* the buffer length must be a multiple of period_len */
 615	if (len % period_len != 0)
 616		return NULL;
 617
 618	if (period_len > PDMA_MAX_DESC_BYTES)
 619		return NULL;
 620
 621	chan = to_mmp_pdma_chan(dchan);
 622
 623	switch (direction) {
 624	case DMA_MEM_TO_DEV:
 625		dma_src = buf_addr;
 626		dma_dst = chan->dev_addr;
 627		break;
 628	case DMA_DEV_TO_MEM:
 629		dma_dst = buf_addr;
 630		dma_src = chan->dev_addr;
 631		break;
 632	default:
 633		dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
 634		return NULL;
 635	}
 636
 637	chan->dir = direction;
 638
 639	do {
 640		/* Allocate the link descriptor from DMA pool */
 641		new = mmp_pdma_alloc_descriptor(chan);
 642		if (!new) {
 643			dev_err(chan->dev, "no memory for desc\n");
 644			goto fail;
 645		}
 646
 647		new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
 648				  (DCMD_LENGTH & period_len));
 649		new->desc.dsadr = dma_src;
 650		new->desc.dtadr = dma_dst;
 651
 652		if (!first)
 653			first = new;
 654		else
 655			prev->desc.ddadr = new->async_tx.phys;
 656
 657		new->async_tx.cookie = 0;
 658		async_tx_ack(&new->async_tx);
 659
 660		prev = new;
 661		len -= period_len;
 662
 663		if (chan->dir == DMA_MEM_TO_DEV)
 664			dma_src += period_len;
 665		else
 666			dma_dst += period_len;
 667
 668		/* Insert the link descriptor to the LD ring */
 669		list_add_tail(&new->node, &first->tx_list);
 670	} while (len);
 671
 672	first->async_tx.flags = flags; /* client is in control of this ack */
 673	first->async_tx.cookie = -EBUSY;
 674
 675	/* make the cyclic link */
 676	new->desc.ddadr = first->async_tx.phys;
 677	chan->cyclic_first = first;
 678
 679	return &first->async_tx;
 680
 681fail:
 682	if (first)
 683		mmp_pdma_free_desc_list(chan, &first->tx_list);
 684	return NULL;
 685}
 686
 687static int mmp_pdma_config(struct dma_chan *dchan,
 688			   struct dma_slave_config *cfg)
 689{
 690	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 691	u32 maxburst = 0, addr = 0;
 692	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 693
 694	if (!dchan)
 695		return -EINVAL;
 696
 697	if (cfg->direction == DMA_DEV_TO_MEM) {
 698		chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
 699		maxburst = cfg->src_maxburst;
 700		width = cfg->src_addr_width;
 701		addr = cfg->src_addr;
 702	} else if (cfg->direction == DMA_MEM_TO_DEV) {
 703		chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
 704		maxburst = cfg->dst_maxburst;
 705		width = cfg->dst_addr_width;
 706		addr = cfg->dst_addr;
 707	}
 708
 709	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
 710		chan->dcmd |= DCMD_WIDTH1;
 711	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
 712		chan->dcmd |= DCMD_WIDTH2;
 713	else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
 714		chan->dcmd |= DCMD_WIDTH4;
 715
 716	if (maxburst == 8)
 717		chan->dcmd |= DCMD_BURST8;
 718	else if (maxburst == 16)
 719		chan->dcmd |= DCMD_BURST16;
 720	else if (maxburst == 32)
 721		chan->dcmd |= DCMD_BURST32;
 722
 723	chan->dir = cfg->direction;
 724	chan->dev_addr = addr;
 725	/* FIXME: drivers should be ported over to use the filter
 726	 * function. Once that's done, the following two lines can
 727	 * be removed.
 728	 */
 729	if (cfg->slave_id)
 730		chan->drcmr = cfg->slave_id;
 731
 732	return 0;
 733}
 734
 735static int mmp_pdma_terminate_all(struct dma_chan *dchan)
 736{
 737	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 738	unsigned long flags;
 739
 740	if (!dchan)
 741		return -EINVAL;
 742
 743	disable_chan(chan->phy);
 744	mmp_pdma_free_phy(chan);
 745	spin_lock_irqsave(&chan->desc_lock, flags);
 746	mmp_pdma_free_desc_list(chan, &chan->chain_pending);
 747	mmp_pdma_free_desc_list(chan, &chan->chain_running);
 748	spin_unlock_irqrestore(&chan->desc_lock, flags);
 749	chan->idle = true;
 750
 751	return 0;
 752}
 753
 754static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
 755				     dma_cookie_t cookie)
 756{
 757	struct mmp_pdma_desc_sw *sw;
 758	u32 curr, residue = 0;
 759	bool passed = false;
 760	bool cyclic = chan->cyclic_first != NULL;
 761
 762	/*
 763	 * If the channel does not have a phy pointer anymore, it has already
 764	 * been completed. Therefore, its residue is 0.
 765	 */
 766	if (!chan->phy)
 767		return 0;
 768
 769	if (chan->dir == DMA_DEV_TO_MEM)
 770		curr = readl(chan->phy->base + DTADR(chan->phy->idx));
 771	else
 772		curr = readl(chan->phy->base + DSADR(chan->phy->idx));
 773
 774	list_for_each_entry(sw, &chan->chain_running, node) {
 775		u32 start, end, len;
 776
 777		if (chan->dir == DMA_DEV_TO_MEM)
 778			start = sw->desc.dtadr;
 779		else
 780			start = sw->desc.dsadr;
 781
 782		len = sw->desc.dcmd & DCMD_LENGTH;
 783		end = start + len;
 784
 785		/*
 786		 * 'passed' will be latched once we found the descriptor which
 787		 * lies inside the boundaries of the curr pointer. All
 788		 * descriptors that occur in the list _after_ we found that
 789		 * partially handled descriptor are still to be processed and
 790		 * are hence added to the residual bytes counter.
 791		 */
 792
 793		if (passed) {
 794			residue += len;
 795		} else if (curr >= start && curr <= end) {
 796			residue += end - curr;
 797			passed = true;
 798		}
 799
 800		/*
 801		 * Descriptors that have the ENDIRQEN bit set mark the end of a
 802		 * transaction chain, and the cookie assigned with it has been
 803		 * returned previously from mmp_pdma_tx_submit().
 804		 *
 805		 * In case we have multiple transactions in the running chain,
 806		 * and the cookie does not match the one the user asked us
 807		 * about, reset the state variables and start over.
 808		 *
 809		 * This logic does not apply to cyclic transactions, where all
 810		 * descriptors have the ENDIRQEN bit set, and for which we
 811		 * can't have multiple transactions on one channel anyway.
 812		 */
 813		if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
 814			continue;
 815
 816		if (sw->async_tx.cookie == cookie) {
 817			return residue;
 818		} else {
 819			residue = 0;
 820			passed = false;
 821		}
 822	}
 823
 824	/* We should only get here in case of cyclic transactions */
 825	return residue;
 826}
 827
 828static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
 829					  dma_cookie_t cookie,
 830					  struct dma_tx_state *txstate)
 831{
 832	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 833	enum dma_status ret;
 834
 835	ret = dma_cookie_status(dchan, cookie, txstate);
 836	if (likely(ret != DMA_ERROR))
 837		dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
 838
 839	return ret;
 840}
 841
 842/**
 843 * mmp_pdma_issue_pending - Issue the DMA start command
 844 * pending list ==> running list
 845 */
 846static void mmp_pdma_issue_pending(struct dma_chan *dchan)
 847{
 848	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 849	unsigned long flags;
 850
 851	spin_lock_irqsave(&chan->desc_lock, flags);
 852	start_pending_queue(chan);
 853	spin_unlock_irqrestore(&chan->desc_lock, flags);
 854}
 855
 856/*
 857 * dma_do_tasklet
 858 * Do call back
 859 * Start pending list
 860 */
 861static void dma_do_tasklet(unsigned long data)
 862{
 863	struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
 864	struct mmp_pdma_desc_sw *desc, *_desc;
 865	LIST_HEAD(chain_cleanup);
 866	unsigned long flags;
 867	struct dmaengine_desc_callback cb;
 868
 869	if (chan->cyclic_first) {
 870		spin_lock_irqsave(&chan->desc_lock, flags);
 871		desc = chan->cyclic_first;
 872		dmaengine_desc_get_callback(&desc->async_tx, &cb);
 873		spin_unlock_irqrestore(&chan->desc_lock, flags);
 874
 875		dmaengine_desc_callback_invoke(&cb, NULL);
 876
 877		return;
 878	}
 879
 880	/* submit pending list; callback for each desc; free desc */
 881	spin_lock_irqsave(&chan->desc_lock, flags);
 882
 883	list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
 884		/*
 885		 * move the descriptors to a temporary list so we can drop
 886		 * the lock during the entire cleanup operation
 887		 */
 888		list_move(&desc->node, &chain_cleanup);
 889
 890		/*
 891		 * Look for the first list entry which has the ENDIRQEN flag
 892		 * set. That is the descriptor we got an interrupt for, so
 893		 * complete that transaction and its cookie.
 894		 */
 895		if (desc->desc.dcmd & DCMD_ENDIRQEN) {
 896			dma_cookie_t cookie = desc->async_tx.cookie;
 897			dma_cookie_complete(&desc->async_tx);
 898			dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
 899			break;
 900		}
 901	}
 902
 903	/*
 904	 * The hardware is idle and ready for more when the
 905	 * chain_running list is empty.
 906	 */
 907	chan->idle = list_empty(&chan->chain_running);
 908
 909	/* Start any pending transactions automatically */
 910	start_pending_queue(chan);
 911	spin_unlock_irqrestore(&chan->desc_lock, flags);
 912
 913	/* Run the callback for each descriptor, in order */
 914	list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
 915		struct dma_async_tx_descriptor *txd = &desc->async_tx;
 916
 917		/* Remove from the list of transactions */
 918		list_del(&desc->node);
 919		/* Run the link descriptor callback function */
 920		dmaengine_desc_get_callback(txd, &cb);
 921		dmaengine_desc_callback_invoke(&cb, NULL);
 922
 923		dma_pool_free(chan->desc_pool, desc, txd->phys);
 924	}
 925}
 926
 927static int mmp_pdma_remove(struct platform_device *op)
 928{
 929	struct mmp_pdma_device *pdev = platform_get_drvdata(op);
 930	struct mmp_pdma_phy *phy;
 931	int i, irq = 0, irq_num = 0;
 932
 933
 934	for (i = 0; i < pdev->dma_channels; i++) {
 935		if (platform_get_irq(op, i) > 0)
 936			irq_num++;
 937	}
 938
 939	if (irq_num != pdev->dma_channels) {
 940		irq = platform_get_irq(op, 0);
 941		devm_free_irq(&op->dev, irq, pdev);
 942	} else {
 943		for (i = 0; i < pdev->dma_channels; i++) {
 944			phy = &pdev->phy[i];
 945			irq = platform_get_irq(op, i);
 946			devm_free_irq(&op->dev, irq, phy);
 947		}
 948	}
 949
 950	dma_async_device_unregister(&pdev->device);
 951	return 0;
 952}
 953
 954static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
 955{
 956	struct mmp_pdma_phy *phy  = &pdev->phy[idx];
 957	struct mmp_pdma_chan *chan;
 958	int ret;
 959
 960	chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
 961	if (chan == NULL)
 962		return -ENOMEM;
 963
 964	phy->idx = idx;
 965	phy->base = pdev->base;
 966
 967	if (irq) {
 968		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
 969				       IRQF_SHARED, "pdma", phy);
 970		if (ret) {
 971			dev_err(pdev->dev, "channel request irq fail!\n");
 972			return ret;
 973		}
 974	}
 975
 976	spin_lock_init(&chan->desc_lock);
 977	chan->dev = pdev->dev;
 978	chan->chan.device = &pdev->device;
 979	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
 980	INIT_LIST_HEAD(&chan->chain_pending);
 981	INIT_LIST_HEAD(&chan->chain_running);
 982
 983	/* register virt channel to dma engine */
 984	list_add_tail(&chan->chan.device_node, &pdev->device.channels);
 985
 986	return 0;
 987}
 988
 989static const struct of_device_id mmp_pdma_dt_ids[] = {
 990	{ .compatible = "marvell,pdma-1.0", },
 991	{}
 992};
 993MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
 994
 995static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
 996					   struct of_dma *ofdma)
 997{
 998	struct mmp_pdma_device *d = ofdma->of_dma_data;
 999	struct dma_chan *chan;
1000
1001	chan = dma_get_any_slave_channel(&d->device);
1002	if (!chan)
1003		return NULL;
1004
1005	to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
1006
1007	return chan;
1008}
1009
1010static int mmp_pdma_probe(struct platform_device *op)
1011{
1012	struct mmp_pdma_device *pdev;
1013	const struct of_device_id *of_id;
1014	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1015	struct resource *iores;
1016	int i, ret, irq = 0;
1017	int dma_channels = 0, irq_num = 0;
1018	const enum dma_slave_buswidth widths =
1019		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
1020		DMA_SLAVE_BUSWIDTH_4_BYTES;
1021
1022	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1023	if (!pdev)
1024		return -ENOMEM;
1025
1026	pdev->dev = &op->dev;
1027
1028	spin_lock_init(&pdev->phy_lock);
1029
1030	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1031	pdev->base = devm_ioremap_resource(pdev->dev, iores);
1032	if (IS_ERR(pdev->base))
1033		return PTR_ERR(pdev->base);
1034
1035	of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
1036	if (of_id)
1037		of_property_read_u32(pdev->dev->of_node, "#dma-channels",
1038				     &dma_channels);
1039	else if (pdata && pdata->dma_channels)
1040		dma_channels = pdata->dma_channels;
1041	else
1042		dma_channels = 32;	/* default 32 channel */
1043	pdev->dma_channels = dma_channels;
1044
1045	for (i = 0; i < dma_channels; i++) {
1046		if (platform_get_irq(op, i) > 0)
1047			irq_num++;
1048	}
1049
1050	pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
1051				 GFP_KERNEL);
1052	if (pdev->phy == NULL)
1053		return -ENOMEM;
1054
1055	INIT_LIST_HEAD(&pdev->device.channels);
1056
1057	if (irq_num != dma_channels) {
1058		/* all chan share one irq, demux inside */
1059		irq = platform_get_irq(op, 0);
1060		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
1061				       IRQF_SHARED, "pdma", pdev);
1062		if (ret)
1063			return ret;
1064	}
1065
1066	for (i = 0; i < dma_channels; i++) {
1067		irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
1068		ret = mmp_pdma_chan_init(pdev, i, irq);
1069		if (ret)
1070			return ret;
1071	}
1072
1073	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
1074	dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
1075	dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
1076	dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
1077	pdev->device.dev = &op->dev;
1078	pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
1079	pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
1080	pdev->device.device_tx_status = mmp_pdma_tx_status;
1081	pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
1082	pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
1083	pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
1084	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
1085	pdev->device.device_config = mmp_pdma_config;
1086	pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1087	pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
1088	pdev->device.src_addr_widths = widths;
1089	pdev->device.dst_addr_widths = widths;
1090	pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1091	pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1092
1093	if (pdev->dev->coherent_dma_mask)
1094		dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
1095	else
1096		dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
1097
1098	ret = dma_async_device_register(&pdev->device);
1099	if (ret) {
1100		dev_err(pdev->device.dev, "unable to register\n");
1101		return ret;
1102	}
1103
1104	if (op->dev.of_node) {
1105		/* Device-tree DMA controller registration */
1106		ret = of_dma_controller_register(op->dev.of_node,
1107						 mmp_pdma_dma_xlate, pdev);
1108		if (ret < 0) {
1109			dev_err(&op->dev, "of_dma_controller_register failed\n");
1110			return ret;
1111		}
1112	}
1113
1114	platform_set_drvdata(op, pdev);
1115	dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
1116	return 0;
1117}
1118
1119static const struct platform_device_id mmp_pdma_id_table[] = {
1120	{ "mmp-pdma", },
1121	{ },
1122};
1123
1124static struct platform_driver mmp_pdma_driver = {
1125	.driver		= {
1126		.name	= "mmp-pdma",
1127		.of_match_table = mmp_pdma_dt_ids,
1128	},
1129	.id_table	= mmp_pdma_id_table,
1130	.probe		= mmp_pdma_probe,
1131	.remove		= mmp_pdma_remove,
1132};
1133
1134bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
1135{
1136	struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
1137
1138	if (chan->device->dev->driver != &mmp_pdma_driver.driver)
1139		return false;
1140
1141	c->drcmr = *(unsigned int *)param;
1142
1143	return true;
1144}
1145EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
1146
1147module_platform_driver(mmp_pdma_driver);
1148
1149MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
1150MODULE_AUTHOR("Marvell International Ltd.");
1151MODULE_LICENSE("GPL v2");