Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * SA11x0 DMAengine support
   4 *
   5 * Copyright (C) 2012 Russell King
   6 *   Derived in part from arch/arm/mach-sa1100/dma.c,
   7 *   Copyright (C) 2000, 2001 by Nicolas Pitre
 
 
 
 
   8 */
   9#include <linux/sched.h>
  10#include <linux/device.h>
  11#include <linux/dmaengine.h>
  12#include <linux/init.h>
  13#include <linux/interrupt.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/platform_device.h>
 
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19
  20#include "virt-dma.h"
  21
  22#define NR_PHY_CHAN	6
  23#define DMA_ALIGN	3
  24#define DMA_MAX_SIZE	0x1fff
  25#define DMA_CHUNK_SIZE	0x1000
  26
  27#define DMA_DDAR	0x00
  28#define DMA_DCSR_S	0x04
  29#define DMA_DCSR_C	0x08
  30#define DMA_DCSR_R	0x0c
  31#define DMA_DBSA	0x10
  32#define DMA_DBTA	0x14
  33#define DMA_DBSB	0x18
  34#define DMA_DBTB	0x1c
  35#define DMA_SIZE	0x20
  36
  37#define DCSR_RUN	(1 << 0)
  38#define DCSR_IE		(1 << 1)
  39#define DCSR_ERROR	(1 << 2)
  40#define DCSR_DONEA	(1 << 3)
  41#define DCSR_STRTA	(1 << 4)
  42#define DCSR_DONEB	(1 << 5)
  43#define DCSR_STRTB	(1 << 6)
  44#define DCSR_BIU	(1 << 7)
  45
  46#define DDAR_RW		(1 << 0)	/* 0 = W, 1 = R */
  47#define DDAR_E		(1 << 1)	/* 0 = LE, 1 = BE */
  48#define DDAR_BS		(1 << 2)	/* 0 = BS4, 1 = BS8 */
  49#define DDAR_DW		(1 << 3)	/* 0 = 8b, 1 = 16b */
  50#define DDAR_Ser0UDCTr	(0x0 << 4)
  51#define DDAR_Ser0UDCRc	(0x1 << 4)
  52#define DDAR_Ser1SDLCTr	(0x2 << 4)
  53#define DDAR_Ser1SDLCRc	(0x3 << 4)
  54#define DDAR_Ser1UARTTr	(0x4 << 4)
  55#define DDAR_Ser1UARTRc	(0x5 << 4)
  56#define DDAR_Ser2ICPTr	(0x6 << 4)
  57#define DDAR_Ser2ICPRc	(0x7 << 4)
  58#define DDAR_Ser3UARTTr	(0x8 << 4)
  59#define DDAR_Ser3UARTRc	(0x9 << 4)
  60#define DDAR_Ser4MCP0Tr	(0xa << 4)
  61#define DDAR_Ser4MCP0Rc	(0xb << 4)
  62#define DDAR_Ser4MCP1Tr	(0xc << 4)
  63#define DDAR_Ser4MCP1Rc	(0xd << 4)
  64#define DDAR_Ser4SSPTr	(0xe << 4)
  65#define DDAR_Ser4SSPRc	(0xf << 4)
  66
  67struct sa11x0_dma_sg {
  68	u32			addr;
  69	u32			len;
  70};
  71
  72struct sa11x0_dma_desc {
  73	struct virt_dma_desc	vd;
  74
  75	u32			ddar;
  76	size_t			size;
  77	unsigned		period;
  78	bool			cyclic;
  79
 
 
  80	unsigned		sglen;
  81	struct sa11x0_dma_sg	sg[] __counted_by(sglen);
  82};
  83
  84struct sa11x0_dma_phy;
  85
  86struct sa11x0_dma_chan {
  87	struct virt_dma_chan	vc;
 
 
  88
  89	/* protected by c->vc.lock */
  90	struct sa11x0_dma_phy	*phy;
  91	enum dma_status		status;
 
 
  92
  93	/* protected by d->lock */
  94	struct list_head	node;
  95
  96	u32			ddar;
  97	const char		*name;
  98};
  99
 100struct sa11x0_dma_phy {
 101	void __iomem		*base;
 102	struct sa11x0_dma_dev	*dev;
 103	unsigned		num;
 104
 105	struct sa11x0_dma_chan	*vchan;
 106
 107	/* Protected by c->vc.lock */
 108	unsigned		sg_load;
 109	struct sa11x0_dma_desc	*txd_load;
 110	unsigned		sg_done;
 111	struct sa11x0_dma_desc	*txd_done;
 
 112	u32			dbs[2];
 113	u32			dbt[2];
 114	u32			dcsr;
 
 115};
 116
 117struct sa11x0_dma_dev {
 118	struct dma_device	slave;
 119	void __iomem		*base;
 120	spinlock_t		lock;
 121	struct tasklet_struct	task;
 122	struct list_head	chan_pending;
 
 123	struct sa11x0_dma_phy	phy[NR_PHY_CHAN];
 124};
 125
 126static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
 127{
 128	return container_of(chan, struct sa11x0_dma_chan, vc.chan);
 129}
 130
 131static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
 132{
 133	return container_of(dmadev, struct sa11x0_dma_dev, slave);
 134}
 135
 136static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
 137{
 138	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
 139
 140	return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
 141}
 142
 143static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
 144{
 145	kfree(container_of(vd, struct sa11x0_dma_desc, vd));
 
 
 
 146}
 147
 148static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
 149{
 150	list_del(&txd->vd.node);
 151	p->txd_load = txd;
 152	p->sg_load = 0;
 153
 154	dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
 155		p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
 156}
 157
 158static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
 159	struct sa11x0_dma_chan *c)
 160{
 161	struct sa11x0_dma_desc *txd = p->txd_load;
 162	struct sa11x0_dma_sg *sg;
 163	void __iomem *base = p->base;
 164	unsigned dbsx, dbtx;
 165	u32 dcsr;
 166
 167	if (!txd)
 168		return;
 169
 170	dcsr = readl_relaxed(base + DMA_DCSR_R);
 171
 172	/* Don't try to load the next transfer if both buffers are started */
 173	if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
 174		return;
 175
 176	if (p->sg_load == txd->sglen) {
 177		if (!txd->cyclic) {
 178			struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
 179
 180			/*
 181			 * We have reached the end of the current descriptor.
 182			 * Peek at the next descriptor, and if compatible with
 183			 * the current, start processing it.
 184			 */
 185			if (txn && txn->ddar == txd->ddar) {
 186				txd = txn;
 187				sa11x0_dma_start_desc(p, txn);
 188			} else {
 189				p->txd_load = NULL;
 190				return;
 191			}
 192		} else {
 193			/* Cyclic: reset back to beginning */
 194			p->sg_load = 0;
 195		}
 196	}
 197
 198	sg = &txd->sg[p->sg_load++];
 199
 200	/* Select buffer to load according to channel status */
 201	if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
 202	    ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
 203		dbsx = DMA_DBSA;
 204		dbtx = DMA_DBTA;
 205		dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
 206	} else {
 207		dbsx = DMA_DBSB;
 208		dbtx = DMA_DBTB;
 209		dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
 210	}
 211
 212	writel_relaxed(sg->addr, base + dbsx);
 213	writel_relaxed(sg->len, base + dbtx);
 214	writel(dcsr, base + DMA_DCSR_S);
 215
 216	dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
 217		p->num, dcsr,
 218		'A' + (dbsx == DMA_DBSB), sg->addr,
 219		'A' + (dbtx == DMA_DBTB), sg->len);
 220}
 221
 222static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
 223	struct sa11x0_dma_chan *c)
 224{
 225	struct sa11x0_dma_desc *txd = p->txd_done;
 226
 227	if (++p->sg_done == txd->sglen) {
 228		if (!txd->cyclic) {
 229			vchan_cookie_complete(&txd->vd);
 230
 231			p->sg_done = 0;
 232			p->txd_done = p->txd_load;
 233
 234			if (!p->txd_done)
 235				tasklet_schedule(&p->dev->task);
 236		} else {
 237			if ((p->sg_done % txd->period) == 0)
 238				vchan_cyclic_callback(&txd->vd);
 239
 240			/* Cyclic: reset back to beginning */
 241			p->sg_done = 0;
 242		}
 
 
 
 
 
 243	}
 244
 245	sa11x0_dma_start_sg(p, c);
 246}
 247
 248static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
 249{
 250	struct sa11x0_dma_phy *p = dev_id;
 251	struct sa11x0_dma_dev *d = p->dev;
 252	struct sa11x0_dma_chan *c;
 253	u32 dcsr;
 254
 255	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
 256	if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
 257		return IRQ_NONE;
 258
 259	/* Clear reported status bits */
 260	writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
 261		p->base + DMA_DCSR_C);
 262
 263	dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
 264
 265	if (dcsr & DCSR_ERROR) {
 266		dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
 267			p->num, dcsr,
 268			readl_relaxed(p->base + DMA_DDAR),
 269			readl_relaxed(p->base + DMA_DBSA),
 270			readl_relaxed(p->base + DMA_DBTA),
 271			readl_relaxed(p->base + DMA_DBSB),
 272			readl_relaxed(p->base + DMA_DBTB));
 273	}
 274
 275	c = p->vchan;
 276	if (c) {
 277		unsigned long flags;
 278
 279		spin_lock_irqsave(&c->vc.lock, flags);
 280		/*
 281		 * Now that we're holding the lock, check that the vchan
 282		 * really is associated with this pchan before touching the
 283		 * hardware.  This should always succeed, because we won't
 284		 * change p->vchan or c->phy while the channel is actively
 285		 * transferring.
 286		 */
 287		if (c->phy == p) {
 288			if (dcsr & DCSR_DONEA)
 289				sa11x0_dma_complete(p, c);
 290			if (dcsr & DCSR_DONEB)
 291				sa11x0_dma_complete(p, c);
 292		}
 293		spin_unlock_irqrestore(&c->vc.lock, flags);
 294	}
 295
 296	return IRQ_HANDLED;
 297}
 298
 299static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
 300{
 301	struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
 302
 303	/* If the issued list is empty, we have no further txds to process */
 304	if (txd) {
 305		struct sa11x0_dma_phy *p = c->phy;
 306
 307		sa11x0_dma_start_desc(p, txd);
 308		p->txd_done = txd;
 309		p->sg_done = 0;
 310
 311		/* The channel should not have any transfers started */
 312		WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
 313				      (DCSR_STRTA | DCSR_STRTB));
 314
 315		/* Clear the run and start bits before changing DDAR */
 316		writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
 317			       p->base + DMA_DCSR_C);
 318		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
 319
 320		/* Try to start both buffers */
 321		sa11x0_dma_start_sg(p, c);
 322		sa11x0_dma_start_sg(p, c);
 323	}
 324}
 325
 326static void sa11x0_dma_tasklet(struct tasklet_struct *t)
 327{
 328	struct sa11x0_dma_dev *d = from_tasklet(d, t, task);
 329	struct sa11x0_dma_phy *p;
 330	struct sa11x0_dma_chan *c;
 
 
 331	unsigned pch, pch_alloc = 0;
 332
 333	dev_dbg(d->slave.dev, "tasklet enter\n");
 334
 335	list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
 336		spin_lock_irq(&c->vc.lock);
 
 
 
 
 
 
 
 
 
 
 337		p = c->phy;
 338		if (p && !p->txd_done) {
 339			sa11x0_dma_start_txd(c);
 
 340			if (!p->txd_done) {
 341				/* No current txd associated with this channel */
 342				dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
 343
 344				/* Mark this channel free */
 345				c->phy = NULL;
 346				p->vchan = NULL;
 347			}
 348		}
 349		spin_unlock_irq(&c->vc.lock);
 350	}
 351
 352	spin_lock_irq(&d->lock);
 353	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
 354		p = &d->phy[pch];
 355
 356		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
 357			c = list_first_entry(&d->chan_pending,
 358				struct sa11x0_dma_chan, node);
 359			list_del_init(&c->node);
 360
 361			pch_alloc |= 1 << pch;
 362
 363			/* Mark this channel allocated */
 364			p->vchan = c;
 365
 366			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
 367		}
 368	}
 369	spin_unlock_irq(&d->lock);
 370
 371	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
 372		if (pch_alloc & (1 << pch)) {
 373			p = &d->phy[pch];
 374			c = p->vchan;
 375
 376			spin_lock_irq(&c->vc.lock);
 377			c->phy = p;
 378
 379			sa11x0_dma_start_txd(c);
 380			spin_unlock_irq(&c->vc.lock);
 381		}
 382	}
 383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 384	dev_dbg(d->slave.dev, "tasklet exit\n");
 385}
 386
 387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 388static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
 389{
 390	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 391	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 392	unsigned long flags;
 
 393
 394	spin_lock_irqsave(&d->lock, flags);
 
 395	list_del_init(&c->node);
 396	spin_unlock_irqrestore(&d->lock, flags);
 
 
 
 
 397
 398	vchan_free_chan_resources(&c->vc);
 399}
 400
 401static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
 402{
 403	unsigned reg;
 404	u32 dcsr;
 405
 406	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
 407
 408	if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
 409	    (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
 410		reg = DMA_DBSA;
 411	else
 412		reg = DMA_DBSB;
 413
 414	return readl_relaxed(p->base + reg);
 415}
 416
 417static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 418	dma_cookie_t cookie, struct dma_tx_state *state)
 419{
 420	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 421	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 422	struct sa11x0_dma_phy *p;
 423	struct virt_dma_desc *vd;
 
 424	unsigned long flags;
 425	enum dma_status ret;
 
 426
 427	ret = dma_cookie_status(&c->vc.chan, cookie, state);
 428	if (ret == DMA_COMPLETE)
 429		return ret;
 430
 431	if (!state)
 432		return c->status;
 
 
 
 433
 434	spin_lock_irqsave(&c->vc.lock, flags);
 435	p = c->phy;
 
 
 
 436
 437	/*
 438	 * If the cookie is on our issue queue, then the residue is
 439	 * its total size.
 440	 */
 441	vd = vchan_find_desc(&c->vc, cookie);
 442	if (vd) {
 443		state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
 444	} else if (!p) {
 445		state->residue = 0;
 446	} else {
 447		struct sa11x0_dma_desc *txd;
 448		size_t bytes = 0;
 449
 450		if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
 451			txd = p->txd_done;
 452		else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
 453			txd = p->txd_load;
 454		else
 455			txd = NULL;
 456
 457		ret = c->status;
 458		if (txd) {
 459			dma_addr_t addr = sa11x0_dma_pos(p);
 460			unsigned i;
 461
 462			dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
 463
 464			for (i = 0; i < txd->sglen; i++) {
 465				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
 466					i, txd->sg[i].addr, txd->sg[i].len);
 467				if (addr >= txd->sg[i].addr &&
 468				    addr < txd->sg[i].addr + txd->sg[i].len) {
 469					unsigned len;
 470
 471					len = txd->sg[i].len -
 472						(addr - txd->sg[i].addr);
 473					dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
 474						i, len);
 475					bytes += len;
 476					i++;
 477					break;
 478				}
 479			}
 480			for (; i < txd->sglen; i++) {
 481				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
 482					i, txd->sg[i].addr, txd->sg[i].len);
 483				bytes += txd->sg[i].len;
 484			}
 485		}
 486		state->residue = bytes;
 
 487	}
 488	spin_unlock_irqrestore(&c->vc.lock, flags);
 
 
 
 
 
 489
 490	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
 491
 492	return ret;
 493}
 494
 495/*
 496 * Move pending txds to the issued list, and re-init pending list.
 497 * If not already pending, add this channel to the list of pending
 498 * channels and trigger the tasklet to run.
 499 */
 500static void sa11x0_dma_issue_pending(struct dma_chan *chan)
 501{
 502	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 503	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 504	unsigned long flags;
 505
 506	spin_lock_irqsave(&c->vc.lock, flags);
 507	if (vchan_issue_pending(&c->vc)) {
 508		if (!c->phy) {
 509			spin_lock(&d->lock);
 510			if (list_empty(&c->node)) {
 511				list_add_tail(&c->node, &d->chan_pending);
 512				tasklet_schedule(&d->task);
 513				dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
 514			}
 515			spin_unlock(&d->lock);
 516		}
 
 517	} else
 518		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
 519	spin_unlock_irqrestore(&c->vc.lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520}
 521
 522static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 523	struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
 524	enum dma_transfer_direction dir, unsigned long flags, void *context)
 525{
 526	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 527	struct sa11x0_dma_desc *txd;
 528	struct scatterlist *sgent;
 529	unsigned i, j = sglen;
 530	size_t size = 0;
 531
 532	/* SA11x0 channels can only operate in their native direction */
 533	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
 534		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
 535			&c->vc, c->ddar, dir);
 536		return NULL;
 537	}
 538
 539	/* Do not allow zero-sized txds */
 540	if (sglen == 0)
 541		return NULL;
 542
 543	for_each_sg(sg, sgent, sglen, i) {
 544		dma_addr_t addr = sg_dma_address(sgent);
 545		unsigned int len = sg_dma_len(sgent);
 546
 547		if (len > DMA_MAX_SIZE)
 548			j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
 549		if (addr & DMA_ALIGN) {
 550			dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
 551				&c->vc, &addr);
 552			return NULL;
 553		}
 554	}
 555
 556	txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
 557	if (!txd) {
 558		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
 559		return NULL;
 560	}
 561	txd->sglen = j;
 562
 563	j = 0;
 564	for_each_sg(sg, sgent, sglen, i) {
 565		dma_addr_t addr = sg_dma_address(sgent);
 566		unsigned len = sg_dma_len(sgent);
 567
 568		size += len;
 569
 570		do {
 571			unsigned tlen = len;
 572
 573			/*
 574			 * Check whether the transfer will fit.  If not, try
 575			 * to split the transfer up such that we end up with
 576			 * equal chunks - but make sure that we preserve the
 577			 * alignment.  This avoids small segments.
 578			 */
 579			if (tlen > DMA_MAX_SIZE) {
 580				unsigned mult = DIV_ROUND_UP(tlen,
 581					DMA_MAX_SIZE & ~DMA_ALIGN);
 582
 583				tlen = (tlen / mult) & ~DMA_ALIGN;
 584			}
 585
 586			txd->sg[j].addr = addr;
 587			txd->sg[j].len = tlen;
 588
 589			addr += tlen;
 590			len -= tlen;
 591			j++;
 592		} while (len);
 593	}
 594
 
 
 
 595	txd->ddar = c->ddar;
 596	txd->size = size;
 
 597
 598	dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
 599		&c->vc, &txd->vd, txd->size, txd->sglen);
 600
 601	return vchan_tx_prep(&c->vc, &txd->vd, flags);
 602}
 603
 604static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
 605	struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
 606	enum dma_transfer_direction dir, unsigned long flags)
 607{
 608	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 609	struct sa11x0_dma_desc *txd;
 610	unsigned i, j, k, sglen, sgperiod;
 611
 612	/* SA11x0 channels can only operate in their native direction */
 613	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
 614		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
 615			&c->vc, c->ddar, dir);
 616		return NULL;
 617	}
 618
 619	sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
 620	sglen = size * sgperiod / period;
 621
 622	/* Do not allow zero-sized txds */
 623	if (sglen == 0)
 624		return NULL;
 625
 626	txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
 627	if (!txd) {
 628		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
 629		return NULL;
 630	}
 631	txd->sglen = sglen;
 632
 633	for (i = k = 0; i < size / period; i++) {
 634		size_t tlen, len = period;
 635
 636		for (j = 0; j < sgperiod; j++, k++) {
 637			tlen = len;
 638
 639			if (tlen > DMA_MAX_SIZE) {
 640				unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
 641				tlen = (tlen / mult) & ~DMA_ALIGN;
 642			}
 643
 644			txd->sg[k].addr = addr;
 645			txd->sg[k].len = tlen;
 646			addr += tlen;
 647			len -= tlen;
 648		}
 649
 650		WARN_ON(len != 0);
 651	}
 652
 653	WARN_ON(k != sglen);
 654
 655	txd->ddar = c->ddar;
 656	txd->size = size;
 657	txd->cyclic = 1;
 658	txd->period = sgperiod;
 659
 660	return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 661}
 662
 663static int sa11x0_dma_device_config(struct dma_chan *chan,
 664				    struct dma_slave_config *cfg)
 665{
 666	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 667	u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
 668	dma_addr_t addr;
 669	enum dma_slave_buswidth width;
 670	u32 maxburst;
 671
 672	if (ddar & DDAR_RW) {
 673		addr = cfg->src_addr;
 674		width = cfg->src_addr_width;
 675		maxburst = cfg->src_maxburst;
 676	} else {
 677		addr = cfg->dst_addr;
 678		width = cfg->dst_addr_width;
 679		maxburst = cfg->dst_maxburst;
 680	}
 681
 682	if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
 683	     width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
 684	    (maxburst != 4 && maxburst != 8))
 685		return -EINVAL;
 686
 687	if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
 688		ddar |= DDAR_DW;
 689	if (maxburst == 8)
 690		ddar |= DDAR_BS;
 691
 692	dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
 693		&c->vc, &addr, width, maxburst);
 694
 695	c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
 696
 697	return 0;
 698}
 699
 700static int sa11x0_dma_device_pause(struct dma_chan *chan)
 
 701{
 702	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 703	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 704	struct sa11x0_dma_phy *p;
 
 705	unsigned long flags;
 
 706
 707	dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
 708	spin_lock_irqsave(&c->vc.lock, flags);
 709	if (c->status == DMA_IN_PROGRESS) {
 710		c->status = DMA_PAUSED;
 
 
 
 
 
 
 711
 712		p = c->phy;
 713		if (p) {
 714			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
 715		} else {
 716			spin_lock(&d->lock);
 717			list_del_init(&c->node);
 718			spin_unlock(&d->lock);
 719		}
 720	}
 721	spin_unlock_irqrestore(&c->vc.lock, flags);
 722
 723	return 0;
 724}
 725
 726static int sa11x0_dma_device_resume(struct dma_chan *chan)
 727{
 728	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 729	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 730	struct sa11x0_dma_phy *p;
 731	unsigned long flags;
 732
 733	dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
 734	spin_lock_irqsave(&c->vc.lock, flags);
 735	if (c->status == DMA_PAUSED) {
 736		c->status = DMA_IN_PROGRESS;
 737
 738		p = c->phy;
 739		if (p) {
 740			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
 741		} else if (!list_empty(&c->vc.desc_issued)) {
 
 
 742			spin_lock(&d->lock);
 743			list_add_tail(&c->node, &d->chan_pending);
 744			spin_unlock(&d->lock);
 
 745		}
 746	}
 747	spin_unlock_irqrestore(&c->vc.lock, flags);
 748
 749	return 0;
 750}
 751
 752static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
 753{
 754	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 755	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 756	struct sa11x0_dma_phy *p;
 757	LIST_HEAD(head);
 758	unsigned long flags;
 759
 760	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
 761	/* Clear the tx descriptor lists */
 762	spin_lock_irqsave(&c->vc.lock, flags);
 763	vchan_get_all_descriptors(&c->vc, &head);
 764
 765	p = c->phy;
 766	if (p) {
 767		dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
 768		/* vchan is assigned to a pchan - stop the channel */
 769		writel(DCSR_RUN | DCSR_IE |
 770		       DCSR_STRTA | DCSR_DONEA |
 771		       DCSR_STRTB | DCSR_DONEB,
 772		       p->base + DMA_DCSR_C);
 773
 774		if (p->txd_load) {
 775			if (p->txd_load != p->txd_done)
 776				list_add_tail(&p->txd_load->vd.node, &head);
 777			p->txd_load = NULL;
 778		}
 779		if (p->txd_done) {
 780			list_add_tail(&p->txd_done->vd.node, &head);
 781			p->txd_done = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782		}
 783		c->phy = NULL;
 784		spin_lock(&d->lock);
 785		p->vchan = NULL;
 786		spin_unlock(&d->lock);
 787		tasklet_schedule(&d->task);
 
 
 788	}
 789	spin_unlock_irqrestore(&c->vc.lock, flags);
 790	vchan_dma_desc_free_list(&c->vc, &head);
 791
 792	return 0;
 793}
 794
 795struct sa11x0_dma_channel_desc {
 796	u32 ddar;
 797	const char *name;
 798};
 799
 800#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
 801static const struct sa11x0_dma_channel_desc chan_desc[] = {
 802	CD(Ser0UDCTr, 0),
 803	CD(Ser0UDCRc, DDAR_RW),
 804	CD(Ser1SDLCTr, 0),
 805	CD(Ser1SDLCRc, DDAR_RW),
 806	CD(Ser1UARTTr, 0),
 807	CD(Ser1UARTRc, DDAR_RW),
 808	CD(Ser2ICPTr, 0),
 809	CD(Ser2ICPRc, DDAR_RW),
 810	CD(Ser3UARTTr, 0),
 811	CD(Ser3UARTRc, DDAR_RW),
 812	CD(Ser4MCP0Tr, 0),
 813	CD(Ser4MCP0Rc, DDAR_RW),
 814	CD(Ser4MCP1Tr, 0),
 815	CD(Ser4MCP1Rc, DDAR_RW),
 816	CD(Ser4SSPTr, 0),
 817	CD(Ser4SSPRc, DDAR_RW),
 818};
 819
 820static const struct dma_slave_map sa11x0_dma_map[] = {
 821	{ "sa11x0-ir", "tx", "Ser2ICPTr" },
 822	{ "sa11x0-ir", "rx", "Ser2ICPRc" },
 823	{ "sa11x0-ssp", "tx", "Ser4SSPTr" },
 824	{ "sa11x0-ssp", "rx", "Ser4SSPRc" },
 825};
 826
 827static bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
 828{
 829	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 830	const char *p = param;
 831
 832	return !strcmp(c->name, p);
 833}
 834
 835static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
 836	struct device *dev)
 837{
 838	unsigned i;
 839
 
 840	INIT_LIST_HEAD(&dmadev->channels);
 841	dmadev->dev = dev;
 
 842	dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
 843	dmadev->device_config = sa11x0_dma_device_config;
 844	dmadev->device_pause = sa11x0_dma_device_pause;
 845	dmadev->device_resume = sa11x0_dma_device_resume;
 846	dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
 847	dmadev->device_tx_status = sa11x0_dma_tx_status;
 848	dmadev->device_issue_pending = sa11x0_dma_issue_pending;
 849
 850	for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
 851		struct sa11x0_dma_chan *c;
 852
 853		c = kzalloc(sizeof(*c), GFP_KERNEL);
 854		if (!c) {
 855			dev_err(dev, "no memory for channel %u\n", i);
 856			return -ENOMEM;
 857		}
 858
 
 859		c->status = DMA_IN_PROGRESS;
 860		c->ddar = chan_desc[i].ddar;
 861		c->name = chan_desc[i].name;
 
 
 
 862		INIT_LIST_HEAD(&c->node);
 863
 864		c->vc.desc_free = sa11x0_dma_free_desc;
 865		vchan_init(&c->vc, dmadev);
 866	}
 867
 868	return dma_async_device_register(dmadev);
 869}
 870
 871static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
 872	void *data)
 873{
 874	int irq = platform_get_irq(pdev, nr);
 875
 876	if (irq <= 0)
 877		return -ENXIO;
 878
 879	return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
 880}
 881
 882static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
 883	void *data)
 884{
 885	int irq = platform_get_irq(pdev, nr);
 886	if (irq > 0)
 887		free_irq(irq, data);
 888}
 889
 890static void sa11x0_dma_free_channels(struct dma_device *dmadev)
 891{
 892	struct sa11x0_dma_chan *c, *cn;
 893
 894	list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
 895		list_del(&c->vc.chan.device_node);
 896		tasklet_kill(&c->vc.task);
 897		kfree(c);
 898	}
 899}
 900
 901static int sa11x0_dma_probe(struct platform_device *pdev)
 902{
 903	struct sa11x0_dma_dev *d;
 904	struct resource *res;
 905	unsigned i;
 906	int ret;
 907
 908	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 909	if (!res)
 910		return -ENXIO;
 911
 912	d = kzalloc(sizeof(*d), GFP_KERNEL);
 913	if (!d) {
 914		ret = -ENOMEM;
 915		goto err_alloc;
 916	}
 917
 918	spin_lock_init(&d->lock);
 919	INIT_LIST_HEAD(&d->chan_pending);
 920
 921	d->slave.filter.fn = sa11x0_dma_filter_fn;
 922	d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
 923	d->slave.filter.map = sa11x0_dma_map;
 924
 925	d->base = ioremap(res->start, resource_size(res));
 926	if (!d->base) {
 927		ret = -ENOMEM;
 928		goto err_ioremap;
 929	}
 930
 931	tasklet_setup(&d->task, sa11x0_dma_tasklet);
 932
 933	for (i = 0; i < NR_PHY_CHAN; i++) {
 934		struct sa11x0_dma_phy *p = &d->phy[i];
 935
 936		p->dev = d;
 937		p->num = i;
 938		p->base = d->base + i * DMA_SIZE;
 939		writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
 940			DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
 941			p->base + DMA_DCSR_C);
 942		writel_relaxed(0, p->base + DMA_DDAR);
 943
 944		ret = sa11x0_dma_request_irq(pdev, i, p);
 945		if (ret) {
 946			while (i) {
 947				i--;
 948				sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
 949			}
 950			goto err_irq;
 951		}
 952	}
 953
 954	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
 955	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
 956	d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
 957	d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
 958	d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 959	d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 960	d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
 961				   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
 962	d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
 963				   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
 964	ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
 965	if (ret) {
 966		dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
 967			ret);
 968		goto err_slave_reg;
 969	}
 970
 971	platform_set_drvdata(pdev, d);
 972	return 0;
 973
 974 err_slave_reg:
 975	sa11x0_dma_free_channels(&d->slave);
 976	for (i = 0; i < NR_PHY_CHAN; i++)
 977		sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
 978 err_irq:
 979	tasklet_kill(&d->task);
 980	iounmap(d->base);
 981 err_ioremap:
 982	kfree(d);
 983 err_alloc:
 984	return ret;
 985}
 986
 987static void sa11x0_dma_remove(struct platform_device *pdev)
 988{
 989	struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
 990	unsigned pch;
 991
 992	dma_async_device_unregister(&d->slave);
 993
 994	sa11x0_dma_free_channels(&d->slave);
 995	for (pch = 0; pch < NR_PHY_CHAN; pch++)
 996		sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
 997	tasklet_kill(&d->task);
 998	iounmap(d->base);
 999	kfree(d);
 
 
1000}
1001
1002static __maybe_unused int sa11x0_dma_suspend(struct device *dev)
 
1003{
1004	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1005	unsigned pch;
1006
1007	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1008		struct sa11x0_dma_phy *p = &d->phy[pch];
1009		u32 dcsr, saved_dcsr;
1010
1011		dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1012		if (dcsr & DCSR_RUN) {
1013			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
1014			dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1015		}
1016
1017		saved_dcsr &= DCSR_RUN | DCSR_IE;
1018		if (dcsr & DCSR_BIU) {
1019			p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1020			p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1021			p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1022			p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1023			saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1024				      (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1025		} else {
1026			p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1027			p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1028			p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1029			p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1030			saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1031		}
1032		p->dcsr = saved_dcsr;
1033
1034		writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1035	}
1036
1037	return 0;
1038}
1039
1040static __maybe_unused int sa11x0_dma_resume(struct device *dev)
1041{
1042	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1043	unsigned pch;
1044
1045	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1046		struct sa11x0_dma_phy *p = &d->phy[pch];
1047		struct sa11x0_dma_desc *txd = NULL;
1048		u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1049
1050		WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1051
1052		if (p->txd_done)
1053			txd = p->txd_done;
1054		else if (p->txd_load)
1055			txd = p->txd_load;
1056
1057		if (!txd)
1058			continue;
1059
1060		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1061
1062		writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1063		writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1064		writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1065		writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1066		writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1067	}
1068
1069	return 0;
1070}
 
1071
1072static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1073	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sa11x0_dma_suspend, sa11x0_dma_resume)
 
 
 
 
 
1074};
1075
1076static struct platform_driver sa11x0_dma_driver = {
1077	.driver = {
1078		.name	= "sa11x0-dma",
 
1079		.pm	= &sa11x0_dma_pm_ops,
1080	},
1081	.probe		= sa11x0_dma_probe,
1082	.remove_new	= sa11x0_dma_remove,
1083};
 
 
 
 
 
 
 
 
 
 
 
 
1084
1085static int __init sa11x0_dma_init(void)
1086{
1087	return platform_driver_register(&sa11x0_dma_driver);
1088}
1089subsys_initcall(sa11x0_dma_init);
1090
1091static void __exit sa11x0_dma_exit(void)
1092{
1093	platform_driver_unregister(&sa11x0_dma_driver);
1094}
1095module_exit(sa11x0_dma_exit);
1096
1097MODULE_AUTHOR("Russell King");
1098MODULE_DESCRIPTION("SA-11x0 DMA driver");
1099MODULE_LICENSE("GPL v2");
1100MODULE_ALIAS("platform:sa11x0-dma");
v3.5.6
 
   1/*
   2 * SA11x0 DMAengine support
   3 *
   4 * Copyright (C) 2012 Russell King
   5 *   Derived in part from arch/arm/mach-sa1100/dma.c,
   6 *   Copyright (C) 2000, 2001 by Nicolas Pitre
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#include <linux/sched.h>
  13#include <linux/device.h>
  14#include <linux/dmaengine.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/platform_device.h>
  20#include <linux/sa11x0-dma.h>
  21#include <linux/slab.h>
  22#include <linux/spinlock.h>
  23
 
 
  24#define NR_PHY_CHAN	6
  25#define DMA_ALIGN	3
  26#define DMA_MAX_SIZE	0x1fff
  27#define DMA_CHUNK_SIZE	0x1000
  28
  29#define DMA_DDAR	0x00
  30#define DMA_DCSR_S	0x04
  31#define DMA_DCSR_C	0x08
  32#define DMA_DCSR_R	0x0c
  33#define DMA_DBSA	0x10
  34#define DMA_DBTA	0x14
  35#define DMA_DBSB	0x18
  36#define DMA_DBTB	0x1c
  37#define DMA_SIZE	0x20
  38
  39#define DCSR_RUN	(1 << 0)
  40#define DCSR_IE		(1 << 1)
  41#define DCSR_ERROR	(1 << 2)
  42#define DCSR_DONEA	(1 << 3)
  43#define DCSR_STRTA	(1 << 4)
  44#define DCSR_DONEB	(1 << 5)
  45#define DCSR_STRTB	(1 << 6)
  46#define DCSR_BIU	(1 << 7)
  47
  48#define DDAR_RW		(1 << 0)	/* 0 = W, 1 = R */
  49#define DDAR_E		(1 << 1)	/* 0 = LE, 1 = BE */
  50#define DDAR_BS		(1 << 2)	/* 0 = BS4, 1 = BS8 */
  51#define DDAR_DW		(1 << 3)	/* 0 = 8b, 1 = 16b */
  52#define DDAR_Ser0UDCTr	(0x0 << 4)
  53#define DDAR_Ser0UDCRc	(0x1 << 4)
  54#define DDAR_Ser1SDLCTr	(0x2 << 4)
  55#define DDAR_Ser1SDLCRc	(0x3 << 4)
  56#define DDAR_Ser1UARTTr	(0x4 << 4)
  57#define DDAR_Ser1UARTRc	(0x5 << 4)
  58#define DDAR_Ser2ICPTr	(0x6 << 4)
  59#define DDAR_Ser2ICPRc	(0x7 << 4)
  60#define DDAR_Ser3UARTTr	(0x8 << 4)
  61#define DDAR_Ser3UARTRc	(0x9 << 4)
  62#define DDAR_Ser4MCP0Tr	(0xa << 4)
  63#define DDAR_Ser4MCP0Rc	(0xb << 4)
  64#define DDAR_Ser4MCP1Tr	(0xc << 4)
  65#define DDAR_Ser4MCP1Rc	(0xd << 4)
  66#define DDAR_Ser4SSPTr	(0xe << 4)
  67#define DDAR_Ser4SSPRc	(0xf << 4)
  68
  69struct sa11x0_dma_sg {
  70	u32			addr;
  71	u32			len;
  72};
  73
  74struct sa11x0_dma_desc {
  75	struct dma_async_tx_descriptor tx;
 
  76	u32			ddar;
  77	size_t			size;
 
 
  78
  79	/* maybe protected by c->lock */
  80	struct list_head	node;
  81	unsigned		sglen;
  82	struct sa11x0_dma_sg	sg[0];
  83};
  84
  85struct sa11x0_dma_phy;
  86
  87struct sa11x0_dma_chan {
  88	struct dma_chan		chan;
  89	spinlock_t		lock;
  90	dma_cookie_t		lc;
  91
  92	/* protected by c->lock */
  93	struct sa11x0_dma_phy	*phy;
  94	enum dma_status		status;
  95	struct list_head	desc_submitted;
  96	struct list_head	desc_issued;
  97
  98	/* protected by d->lock */
  99	struct list_head	node;
 100
 101	u32			ddar;
 102	const char		*name;
 103};
 104
 105struct sa11x0_dma_phy {
 106	void __iomem		*base;
 107	struct sa11x0_dma_dev	*dev;
 108	unsigned		num;
 109
 110	struct sa11x0_dma_chan	*vchan;
 111
 112	/* Protected by c->lock */
 113	unsigned		sg_load;
 114	struct sa11x0_dma_desc	*txd_load;
 115	unsigned		sg_done;
 116	struct sa11x0_dma_desc	*txd_done;
 117#ifdef CONFIG_PM_SLEEP
 118	u32			dbs[2];
 119	u32			dbt[2];
 120	u32			dcsr;
 121#endif
 122};
 123
 124struct sa11x0_dma_dev {
 125	struct dma_device	slave;
 126	void __iomem		*base;
 127	spinlock_t		lock;
 128	struct tasklet_struct	task;
 129	struct list_head	chan_pending;
 130	struct list_head	desc_complete;
 131	struct sa11x0_dma_phy	phy[NR_PHY_CHAN];
 132};
 133
 134static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
 135{
 136	return container_of(chan, struct sa11x0_dma_chan, chan);
 137}
 138
 139static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
 140{
 141	return container_of(dmadev, struct sa11x0_dma_dev, slave);
 142}
 143
 144static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx)
 145{
 146	return container_of(tx, struct sa11x0_dma_desc, tx);
 
 
 147}
 148
 149static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
 150{
 151	if (list_empty(&c->desc_issued))
 152		return NULL;
 153
 154	return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
 155}
 156
 157static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
 158{
 159	list_del(&txd->node);
 160	p->txd_load = txd;
 161	p->sg_load = 0;
 162
 163	dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
 164		p->num, txd, txd->tx.cookie, txd->ddar);
 165}
 166
 167static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
 168	struct sa11x0_dma_chan *c)
 169{
 170	struct sa11x0_dma_desc *txd = p->txd_load;
 171	struct sa11x0_dma_sg *sg;
 172	void __iomem *base = p->base;
 173	unsigned dbsx, dbtx;
 174	u32 dcsr;
 175
 176	if (!txd)
 177		return;
 178
 179	dcsr = readl_relaxed(base + DMA_DCSR_R);
 180
 181	/* Don't try to load the next transfer if both buffers are started */
 182	if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
 183		return;
 184
 185	if (p->sg_load == txd->sglen) {
 186		struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
 
 187
 188		/*
 189		 * We have reached the end of the current descriptor.
 190		 * Peek at the next descriptor, and if compatible with
 191		 * the current, start processing it.
 192		 */
 193		if (txn && txn->ddar == txd->ddar) {
 194			txd = txn;
 195			sa11x0_dma_start_desc(p, txn);
 
 
 
 
 196		} else {
 197			p->txd_load = NULL;
 198			return;
 199		}
 200	}
 201
 202	sg = &txd->sg[p->sg_load++];
 203
 204	/* Select buffer to load according to channel status */
 205	if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
 206	    ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
 207		dbsx = DMA_DBSA;
 208		dbtx = DMA_DBTA;
 209		dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
 210	} else {
 211		dbsx = DMA_DBSB;
 212		dbtx = DMA_DBTB;
 213		dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
 214	}
 215
 216	writel_relaxed(sg->addr, base + dbsx);
 217	writel_relaxed(sg->len, base + dbtx);
 218	writel(dcsr, base + DMA_DCSR_S);
 219
 220	dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
 221		p->num, dcsr,
 222		'A' + (dbsx == DMA_DBSB), sg->addr,
 223		'A' + (dbtx == DMA_DBTB), sg->len);
 224}
 225
 226static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
 227	struct sa11x0_dma_chan *c)
 228{
 229	struct sa11x0_dma_desc *txd = p->txd_done;
 230
 231	if (++p->sg_done == txd->sglen) {
 232		struct sa11x0_dma_dev *d = p->dev;
 
 233
 234		dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
 235			p->num, p->txd_done, p->txd_done->tx.cookie);
 236
 237		c->lc = txd->tx.cookie;
 
 
 
 
 238
 239		spin_lock(&d->lock);
 240		list_add_tail(&txd->node, &d->desc_complete);
 241		spin_unlock(&d->lock);
 242
 243		p->sg_done = 0;
 244		p->txd_done = p->txd_load;
 245
 246		tasklet_schedule(&d->task);
 247	}
 248
 249	sa11x0_dma_start_sg(p, c);
 250}
 251
 252static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
 253{
 254	struct sa11x0_dma_phy *p = dev_id;
 255	struct sa11x0_dma_dev *d = p->dev;
 256	struct sa11x0_dma_chan *c;
 257	u32 dcsr;
 258
 259	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
 260	if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
 261		return IRQ_NONE;
 262
 263	/* Clear reported status bits */
 264	writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
 265		p->base + DMA_DCSR_C);
 266
 267	dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
 268
 269	if (dcsr & DCSR_ERROR) {
 270		dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
 271			p->num, dcsr,
 272			readl_relaxed(p->base + DMA_DDAR),
 273			readl_relaxed(p->base + DMA_DBSA),
 274			readl_relaxed(p->base + DMA_DBTA),
 275			readl_relaxed(p->base + DMA_DBSB),
 276			readl_relaxed(p->base + DMA_DBTB));
 277	}
 278
 279	c = p->vchan;
 280	if (c) {
 281		unsigned long flags;
 282
 283		spin_lock_irqsave(&c->lock, flags);
 284		/*
 285		 * Now that we're holding the lock, check that the vchan
 286		 * really is associated with this pchan before touching the
 287		 * hardware.  This should always succeed, because we won't
 288		 * change p->vchan or c->phy while the channel is actively
 289		 * transferring.
 290		 */
 291		if (c->phy == p) {
 292			if (dcsr & DCSR_DONEA)
 293				sa11x0_dma_complete(p, c);
 294			if (dcsr & DCSR_DONEB)
 295				sa11x0_dma_complete(p, c);
 296		}
 297		spin_unlock_irqrestore(&c->lock, flags);
 298	}
 299
 300	return IRQ_HANDLED;
 301}
 302
 303static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
 304{
 305	struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
 306
 307	/* If the issued list is empty, we have no further txds to process */
 308	if (txd) {
 309		struct sa11x0_dma_phy *p = c->phy;
 310
 311		sa11x0_dma_start_desc(p, txd);
 312		p->txd_done = txd;
 313		p->sg_done = 0;
 314
 315		/* The channel should not have any transfers started */
 316		WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
 317				      (DCSR_STRTA | DCSR_STRTB));
 318
 319		/* Clear the run and start bits before changing DDAR */
 320		writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
 321			       p->base + DMA_DCSR_C);
 322		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
 323
 324		/* Try to start both buffers */
 325		sa11x0_dma_start_sg(p, c);
 326		sa11x0_dma_start_sg(p, c);
 327	}
 328}
 329
 330static void sa11x0_dma_tasklet(unsigned long arg)
 331{
 332	struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
 333	struct sa11x0_dma_phy *p;
 334	struct sa11x0_dma_chan *c;
 335	struct sa11x0_dma_desc *txd, *txn;
 336	LIST_HEAD(head);
 337	unsigned pch, pch_alloc = 0;
 338
 339	dev_dbg(d->slave.dev, "tasklet enter\n");
 340
 341	/* Get the completed tx descriptors */
 342	spin_lock_irq(&d->lock);
 343	list_splice_init(&d->desc_complete, &head);
 344	spin_unlock_irq(&d->lock);
 345
 346	list_for_each_entry(txd, &head, node) {
 347		c = to_sa11x0_dma_chan(txd->tx.chan);
 348
 349		dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
 350			c, txd, txd->tx.cookie);
 351
 352		spin_lock_irq(&c->lock);
 353		p = c->phy;
 354		if (p) {
 355			if (!p->txd_done)
 356				sa11x0_dma_start_txd(c);
 357			if (!p->txd_done) {
 358				/* No current txd associated with this channel */
 359				dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
 360
 361				/* Mark this channel free */
 362				c->phy = NULL;
 363				p->vchan = NULL;
 364			}
 365		}
 366		spin_unlock_irq(&c->lock);
 367	}
 368
 369	spin_lock_irq(&d->lock);
 370	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
 371		p = &d->phy[pch];
 372
 373		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
 374			c = list_first_entry(&d->chan_pending,
 375				struct sa11x0_dma_chan, node);
 376			list_del_init(&c->node);
 377
 378			pch_alloc |= 1 << pch;
 379
 380			/* Mark this channel allocated */
 381			p->vchan = c;
 382
 383			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c);
 384		}
 385	}
 386	spin_unlock_irq(&d->lock);
 387
 388	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
 389		if (pch_alloc & (1 << pch)) {
 390			p = &d->phy[pch];
 391			c = p->vchan;
 392
 393			spin_lock_irq(&c->lock);
 394			c->phy = p;
 395
 396			sa11x0_dma_start_txd(c);
 397			spin_unlock_irq(&c->lock);
 398		}
 399	}
 400
 401	/* Now free the completed tx descriptor, and call their callbacks */
 402	list_for_each_entry_safe(txd, txn, &head, node) {
 403		dma_async_tx_callback callback = txd->tx.callback;
 404		void *callback_param = txd->tx.callback_param;
 405
 406		dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
 407			txd, txd->tx.cookie);
 408
 409		kfree(txd);
 410
 411		if (callback)
 412			callback(callback_param);
 413	}
 414
 415	dev_dbg(d->slave.dev, "tasklet exit\n");
 416}
 417
 418
 419static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
 420{
 421	struct sa11x0_dma_desc *txd, *txn;
 422
 423	list_for_each_entry_safe(txd, txn, head, node) {
 424		dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
 425		kfree(txd);
 426	}
 427}
 428
 429static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
 430{
 431	return 0;
 432}
 433
 434static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
 435{
 436	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 437	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 438	unsigned long flags;
 439	LIST_HEAD(head);
 440
 441	spin_lock_irqsave(&c->lock, flags);
 442	spin_lock(&d->lock);
 443	list_del_init(&c->node);
 444	spin_unlock(&d->lock);
 445
 446	list_splice_tail_init(&c->desc_submitted, &head);
 447	list_splice_tail_init(&c->desc_issued, &head);
 448	spin_unlock_irqrestore(&c->lock, flags);
 449
 450	sa11x0_dma_desc_free(d, &head);
 451}
 452
 453static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
 454{
 455	unsigned reg;
 456	u32 dcsr;
 457
 458	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
 459
 460	if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
 461	    (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
 462		reg = DMA_DBSA;
 463	else
 464		reg = DMA_DBSB;
 465
 466	return readl_relaxed(p->base + reg);
 467}
 468
 469static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 470	dma_cookie_t cookie, struct dma_tx_state *state)
 471{
 472	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 473	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 474	struct sa11x0_dma_phy *p;
 475	struct sa11x0_dma_desc *txd;
 476	dma_cookie_t last_used, last_complete;
 477	unsigned long flags;
 478	enum dma_status ret;
 479	size_t bytes = 0;
 480
 481	last_used = c->chan.cookie;
 482	last_complete = c->lc;
 
 483
 484	ret = dma_async_is_complete(cookie, last_complete, last_used);
 485	if (ret == DMA_SUCCESS) {
 486		dma_set_tx_state(state, last_complete, last_used, 0);
 487		return ret;
 488	}
 489
 490	spin_lock_irqsave(&c->lock, flags);
 491	p = c->phy;
 492	ret = c->status;
 493	if (p) {
 494		dma_addr_t addr = sa11x0_dma_pos(p);
 495
 496		dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
 
 
 
 
 
 
 
 
 
 
 
 497
 498		txd = p->txd_done;
 
 
 
 
 
 
 
 499		if (txd) {
 
 500			unsigned i;
 501
 
 
 502			for (i = 0; i < txd->sglen; i++) {
 503				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
 504					i, txd->sg[i].addr, txd->sg[i].len);
 505				if (addr >= txd->sg[i].addr &&
 506				    addr < txd->sg[i].addr + txd->sg[i].len) {
 507					unsigned len;
 508
 509					len = txd->sg[i].len -
 510						(addr - txd->sg[i].addr);
 511					dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
 512						i, len);
 513					bytes += len;
 514					i++;
 515					break;
 516				}
 517			}
 518			for (; i < txd->sglen; i++) {
 519				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
 520					i, txd->sg[i].addr, txd->sg[i].len);
 521				bytes += txd->sg[i].len;
 522			}
 523		}
 524		if (txd != p->txd_load && p->txd_load)
 525			bytes += p->txd_load->size;
 526	}
 527	list_for_each_entry(txd, &c->desc_issued, node) {
 528		bytes += txd->size;
 529	}
 530	spin_unlock_irqrestore(&c->lock, flags);
 531
 532	dma_set_tx_state(state, last_complete, last_used, bytes);
 533
 534	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
 535
 536	return ret;
 537}
 538
 539/*
 540 * Move pending txds to the issued list, and re-init pending list.
 541 * If not already pending, add this channel to the list of pending
 542 * channels and trigger the tasklet to run.
 543 */
 544static void sa11x0_dma_issue_pending(struct dma_chan *chan)
 545{
 546	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 547	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 548	unsigned long flags;
 549
 550	spin_lock_irqsave(&c->lock, flags);
 551	list_splice_tail_init(&c->desc_submitted, &c->desc_issued);
 552	if (!list_empty(&c->desc_issued)) {
 553		spin_lock(&d->lock);
 554		if (!c->phy && list_empty(&c->node)) {
 555			list_add_tail(&c->node, &d->chan_pending);
 556			tasklet_schedule(&d->task);
 557			dev_dbg(d->slave.dev, "vchan %p: issued\n", c);
 
 
 558		}
 559		spin_unlock(&d->lock);
 560	} else
 561		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c);
 562	spin_unlock_irqrestore(&c->lock, flags);
 563}
 564
 565static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 566{
 567	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
 568	struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
 569	unsigned long flags;
 570
 571	spin_lock_irqsave(&c->lock, flags);
 572	c->chan.cookie += 1;
 573	if (c->chan.cookie < 0)
 574		c->chan.cookie = 1;
 575	txd->tx.cookie = c->chan.cookie;
 576
 577	list_add_tail(&txd->node, &c->desc_submitted);
 578	spin_unlock_irqrestore(&c->lock, flags);
 579
 580	dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
 581		c, txd, txd->tx.cookie);
 582
 583	return txd->tx.cookie;
 584}
 585
 586static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 587	struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
 588	enum dma_transfer_direction dir, unsigned long flags, void *context)
 589{
 590	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 591	struct sa11x0_dma_desc *txd;
 592	struct scatterlist *sgent;
 593	unsigned i, j = sglen;
 594	size_t size = 0;
 595
 596	/* SA11x0 channels can only operate in their native direction */
 597	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
 598		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
 599			c, c->ddar, dir);
 600		return NULL;
 601	}
 602
 603	/* Do not allow zero-sized txds */
 604	if (sglen == 0)
 605		return NULL;
 606
 607	for_each_sg(sg, sgent, sglen, i) {
 608		dma_addr_t addr = sg_dma_address(sgent);
 609		unsigned int len = sg_dma_len(sgent);
 610
 611		if (len > DMA_MAX_SIZE)
 612			j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
 613		if (addr & DMA_ALIGN) {
 614			dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
 615				c, addr);
 616			return NULL;
 617		}
 618	}
 619
 620	txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
 621	if (!txd) {
 622		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c);
 623		return NULL;
 624	}
 
 625
 626	j = 0;
 627	for_each_sg(sg, sgent, sglen, i) {
 628		dma_addr_t addr = sg_dma_address(sgent);
 629		unsigned len = sg_dma_len(sgent);
 630
 631		size += len;
 632
 633		do {
 634			unsigned tlen = len;
 635
 636			/*
 637			 * Check whether the transfer will fit.  If not, try
 638			 * to split the transfer up such that we end up with
 639			 * equal chunks - but make sure that we preserve the
 640			 * alignment.  This avoids small segments.
 641			 */
 642			if (tlen > DMA_MAX_SIZE) {
 643				unsigned mult = DIV_ROUND_UP(tlen,
 644					DMA_MAX_SIZE & ~DMA_ALIGN);
 645
 646				tlen = (tlen / mult) & ~DMA_ALIGN;
 647			}
 648
 649			txd->sg[j].addr = addr;
 650			txd->sg[j].len = tlen;
 651
 652			addr += tlen;
 653			len -= tlen;
 654			j++;
 655		} while (len);
 656	}
 657
 658	dma_async_tx_descriptor_init(&txd->tx, &c->chan);
 659	txd->tx.flags = flags;
 660	txd->tx.tx_submit = sa11x0_dma_tx_submit;
 661	txd->ddar = c->ddar;
 662	txd->size = size;
 663	txd->sglen = j;
 664
 665	dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
 666		c, txd, txd->size, txd->sglen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667
 668	return &txd->tx;
 
 
 
 
 
 669}
 670
 671static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
 
 672{
 
 673	u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
 674	dma_addr_t addr;
 675	enum dma_slave_buswidth width;
 676	u32 maxburst;
 677
 678	if (ddar & DDAR_RW) {
 679		addr = cfg->src_addr;
 680		width = cfg->src_addr_width;
 681		maxburst = cfg->src_maxburst;
 682	} else {
 683		addr = cfg->dst_addr;
 684		width = cfg->dst_addr_width;
 685		maxburst = cfg->dst_maxburst;
 686	}
 687
 688	if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
 689	     width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
 690	    (maxburst != 4 && maxburst != 8))
 691		return -EINVAL;
 692
 693	if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
 694		ddar |= DDAR_DW;
 695	if (maxburst == 8)
 696		ddar |= DDAR_BS;
 697
 698	dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
 699		c, addr, width, maxburst);
 700
 701	c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
 702
 703	return 0;
 704}
 705
 706static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 707	unsigned long arg)
 708{
 709	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 710	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 711	struct sa11x0_dma_phy *p;
 712	LIST_HEAD(head);
 713	unsigned long flags;
 714	int ret;
 715
 716	switch (cmd) {
 717	case DMA_SLAVE_CONFIG:
 718		return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
 719
 720	case DMA_TERMINATE_ALL:
 721		dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c);
 722		/* Clear the tx descriptor lists */
 723		spin_lock_irqsave(&c->lock, flags);
 724		list_splice_tail_init(&c->desc_submitted, &head);
 725		list_splice_tail_init(&c->desc_issued, &head);
 726
 727		p = c->phy;
 728		if (p) {
 729			struct sa11x0_dma_desc *txd, *txn;
 
 
 
 
 
 
 
 730
 731			dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
 732			/* vchan is assigned to a pchan - stop the channel */
 733			writel(DCSR_RUN | DCSR_IE |
 734				DCSR_STRTA | DCSR_DONEA |
 735				DCSR_STRTB | DCSR_DONEB,
 736				p->base + DMA_DCSR_C);
 737
 738			list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
 739				if (txd->tx.chan == &c->chan)
 740					list_move(&txd->node, &head);
 741
 742			if (p->txd_load) {
 743				if (p->txd_load != p->txd_done)
 744					list_add_tail(&p->txd_load->node, &head);
 745				p->txd_load = NULL;
 746			}
 747			if (p->txd_done) {
 748				list_add_tail(&p->txd_done->node, &head);
 749				p->txd_done = NULL;
 750			}
 751			c->phy = NULL;
 752			spin_lock(&d->lock);
 753			p->vchan = NULL;
 754			spin_unlock(&d->lock);
 755			tasklet_schedule(&d->task);
 756		}
 757		spin_unlock_irqrestore(&c->lock, flags);
 758		sa11x0_dma_desc_free(d, &head);
 759		ret = 0;
 760		break;
 761
 762	case DMA_PAUSE:
 763		dev_dbg(d->slave.dev, "vchan %p: pause\n", c);
 764		spin_lock_irqsave(&c->lock, flags);
 765		if (c->status == DMA_IN_PROGRESS) {
 766			c->status = DMA_PAUSED;
 767
 768			p = c->phy;
 769			if (p) {
 770				writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
 771			} else {
 772				spin_lock(&d->lock);
 773				list_del_init(&c->node);
 774				spin_unlock(&d->lock);
 775			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 776		}
 777		spin_unlock_irqrestore(&c->lock, flags);
 778		ret = 0;
 779		break;
 780
 781	case DMA_RESUME:
 782		dev_dbg(d->slave.dev, "vchan %p: resume\n", c);
 783		spin_lock_irqsave(&c->lock, flags);
 784		if (c->status == DMA_PAUSED) {
 785			c->status = DMA_IN_PROGRESS;
 786
 787			p = c->phy;
 788			if (p) {
 789				writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
 790			} else if (!list_empty(&c->desc_issued)) {
 791				spin_lock(&d->lock);
 792				list_add_tail(&c->node, &d->chan_pending);
 793				spin_unlock(&d->lock);
 794			}
 795		}
 796		spin_unlock_irqrestore(&c->lock, flags);
 797		ret = 0;
 798		break;
 799
 800	default:
 801		ret = -ENXIO;
 802		break;
 803	}
 
 
 804
 805	return ret;
 806}
 807
 808struct sa11x0_dma_channel_desc {
 809	u32 ddar;
 810	const char *name;
 811};
 812
 813#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
 814static const struct sa11x0_dma_channel_desc chan_desc[] = {
 815	CD(Ser0UDCTr, 0),
 816	CD(Ser0UDCRc, DDAR_RW),
 817	CD(Ser1SDLCTr, 0),
 818	CD(Ser1SDLCRc, DDAR_RW),
 819	CD(Ser1UARTTr, 0),
 820	CD(Ser1UARTRc, DDAR_RW),
 821	CD(Ser2ICPTr, 0),
 822	CD(Ser2ICPRc, DDAR_RW),
 823	CD(Ser3UARTTr, 0),
 824	CD(Ser3UARTRc, DDAR_RW),
 825	CD(Ser4MCP0Tr, 0),
 826	CD(Ser4MCP0Rc, DDAR_RW),
 827	CD(Ser4MCP1Tr, 0),
 828	CD(Ser4MCP1Rc, DDAR_RW),
 829	CD(Ser4SSPTr, 0),
 830	CD(Ser4SSPRc, DDAR_RW),
 831};
 832
 833static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 834	struct device *dev)
 835{
 836	unsigned i;
 837
 838	dmadev->chancnt = ARRAY_SIZE(chan_desc);
 839	INIT_LIST_HEAD(&dmadev->channels);
 840	dmadev->dev = dev;
 841	dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
 842	dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
 843	dmadev->device_control = sa11x0_dma_control;
 
 
 
 844	dmadev->device_tx_status = sa11x0_dma_tx_status;
 845	dmadev->device_issue_pending = sa11x0_dma_issue_pending;
 846
 847	for (i = 0; i < dmadev->chancnt; i++) {
 848		struct sa11x0_dma_chan *c;
 849
 850		c = kzalloc(sizeof(*c), GFP_KERNEL);
 851		if (!c) {
 852			dev_err(dev, "no memory for channel %u\n", i);
 853			return -ENOMEM;
 854		}
 855
 856		c->chan.device = dmadev;
 857		c->status = DMA_IN_PROGRESS;
 858		c->ddar = chan_desc[i].ddar;
 859		c->name = chan_desc[i].name;
 860		spin_lock_init(&c->lock);
 861		INIT_LIST_HEAD(&c->desc_submitted);
 862		INIT_LIST_HEAD(&c->desc_issued);
 863		INIT_LIST_HEAD(&c->node);
 864		list_add_tail(&c->chan.device_node, &dmadev->channels);
 
 
 865	}
 866
 867	return dma_async_device_register(dmadev);
 868}
 869
 870static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
 871	void *data)
 872{
 873	int irq = platform_get_irq(pdev, nr);
 874
 875	if (irq <= 0)
 876		return -ENXIO;
 877
 878	return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
 879}
 880
 881static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
 882	void *data)
 883{
 884	int irq = platform_get_irq(pdev, nr);
 885	if (irq > 0)
 886		free_irq(irq, data);
 887}
 888
 889static void sa11x0_dma_free_channels(struct dma_device *dmadev)
 890{
 891	struct sa11x0_dma_chan *c, *cn;
 892
 893	list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) {
 894		list_del(&c->chan.device_node);
 
 895		kfree(c);
 896	}
 897}
 898
 899static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
 900{
 901	struct sa11x0_dma_dev *d;
 902	struct resource *res;
 903	unsigned i;
 904	int ret;
 905
 906	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 907	if (!res)
 908		return -ENXIO;
 909
 910	d = kzalloc(sizeof(*d), GFP_KERNEL);
 911	if (!d) {
 912		ret = -ENOMEM;
 913		goto err_alloc;
 914	}
 915
 916	spin_lock_init(&d->lock);
 917	INIT_LIST_HEAD(&d->chan_pending);
 918	INIT_LIST_HEAD(&d->desc_complete);
 
 
 
 919
 920	d->base = ioremap(res->start, resource_size(res));
 921	if (!d->base) {
 922		ret = -ENOMEM;
 923		goto err_ioremap;
 924	}
 925
 926	tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
 927
 928	for (i = 0; i < NR_PHY_CHAN; i++) {
 929		struct sa11x0_dma_phy *p = &d->phy[i];
 930
 931		p->dev = d;
 932		p->num = i;
 933		p->base = d->base + i * DMA_SIZE;
 934		writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
 935			DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
 936			p->base + DMA_DCSR_C);
 937		writel_relaxed(0, p->base + DMA_DDAR);
 938
 939		ret = sa11x0_dma_request_irq(pdev, i, p);
 940		if (ret) {
 941			while (i) {
 942				i--;
 943				sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
 944			}
 945			goto err_irq;
 946		}
 947	}
 948
 949	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
 
 950	d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
 
 
 
 
 
 
 
 951	ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
 952	if (ret) {
 953		dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
 954			ret);
 955		goto err_slave_reg;
 956	}
 957
 958	platform_set_drvdata(pdev, d);
 959	return 0;
 960
 961 err_slave_reg:
 962	sa11x0_dma_free_channels(&d->slave);
 963	for (i = 0; i < NR_PHY_CHAN; i++)
 964		sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
 965 err_irq:
 966	tasklet_kill(&d->task);
 967	iounmap(d->base);
 968 err_ioremap:
 969	kfree(d);
 970 err_alloc:
 971	return ret;
 972}
 973
 974static int __devexit sa11x0_dma_remove(struct platform_device *pdev)
 975{
 976	struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
 977	unsigned pch;
 978
 979	dma_async_device_unregister(&d->slave);
 980
 981	sa11x0_dma_free_channels(&d->slave);
 982	for (pch = 0; pch < NR_PHY_CHAN; pch++)
 983		sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
 984	tasklet_kill(&d->task);
 985	iounmap(d->base);
 986	kfree(d);
 987
 988	return 0;
 989}
 990
 991#ifdef CONFIG_PM_SLEEP
 992static int sa11x0_dma_suspend(struct device *dev)
 993{
 994	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
 995	unsigned pch;
 996
 997	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
 998		struct sa11x0_dma_phy *p = &d->phy[pch];
 999		u32 dcsr, saved_dcsr;
1000
1001		dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1002		if (dcsr & DCSR_RUN) {
1003			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
1004			dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1005		}
1006
1007		saved_dcsr &= DCSR_RUN | DCSR_IE;
1008		if (dcsr & DCSR_BIU) {
1009			p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1010			p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1011			p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1012			p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1013			saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1014				      (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1015		} else {
1016			p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1017			p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1018			p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1019			p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1020			saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1021		}
1022		p->dcsr = saved_dcsr;
1023
1024		writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1025	}
1026
1027	return 0;
1028}
1029
1030static int sa11x0_dma_resume(struct device *dev)
1031{
1032	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1033	unsigned pch;
1034
1035	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1036		struct sa11x0_dma_phy *p = &d->phy[pch];
1037		struct sa11x0_dma_desc *txd = NULL;
1038		u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1039
1040		WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1041
1042		if (p->txd_done)
1043			txd = p->txd_done;
1044		else if (p->txd_load)
1045			txd = p->txd_load;
1046
1047		if (!txd)
1048			continue;
1049
1050		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1051
1052		writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1053		writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1054		writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1055		writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1056		writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1057	}
1058
1059	return 0;
1060}
1061#endif
1062
1063static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1064	.suspend_noirq = sa11x0_dma_suspend,
1065	.resume_noirq = sa11x0_dma_resume,
1066	.freeze_noirq = sa11x0_dma_suspend,
1067	.thaw_noirq = sa11x0_dma_resume,
1068	.poweroff_noirq = sa11x0_dma_suspend,
1069	.restore_noirq = sa11x0_dma_resume,
1070};
1071
1072static struct platform_driver sa11x0_dma_driver = {
1073	.driver = {
1074		.name	= "sa11x0-dma",
1075		.owner	= THIS_MODULE,
1076		.pm	= &sa11x0_dma_pm_ops,
1077	},
1078	.probe		= sa11x0_dma_probe,
1079	.remove		= __devexit_p(sa11x0_dma_remove),
1080};
1081
1082bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
1083{
1084	if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
1085		struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
1086		const char *p = param;
1087
1088		return !strcmp(c->name, p);
1089	}
1090	return false;
1091}
1092EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1093
1094static int __init sa11x0_dma_init(void)
1095{
1096	return platform_driver_register(&sa11x0_dma_driver);
1097}
1098subsys_initcall(sa11x0_dma_init);
1099
1100static void __exit sa11x0_dma_exit(void)
1101{
1102	platform_driver_unregister(&sa11x0_dma_driver);
1103}
1104module_exit(sa11x0_dma_exit);
1105
1106MODULE_AUTHOR("Russell King");
1107MODULE_DESCRIPTION("SA-11x0 DMA driver");
1108MODULE_LICENSE("GPL v2");
1109MODULE_ALIAS("platform:sa11x0-dma");