Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *
   3 * BRIEF MODULE DESCRIPTION
   4 *      The Descriptor Based DMA channel manager that first appeared
   5 *	on the Au1550.  I started with dma.c, but I think all that is
   6 *	left is this initial comment :-)
   7 *
   8 * Copyright 2004 Embedded Edge, LLC
   9 *	dan@embeddededge.com
  10 *
  11 *  This program is free software; you can redistribute  it and/or modify it
  12 *  under  the terms of  the GNU General  Public License as published by the
  13 *  Free Software Foundation;  either version 2 of the  License, or (at your
  14 *  option) any later version.
  15 *
  16 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
  17 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
  18 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  19 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
  20 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
  22 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  23 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  24 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26 *
  27 *  You should have received a copy of the  GNU General Public License along
  28 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  29 *  675 Mass Ave, Cambridge, MA 02139, USA.
  30 *
  31 */
  32
 
  33#include <linux/init.h>
  34#include <linux/kernel.h>
  35#include <linux/slab.h>
  36#include <linux/spinlock.h>
  37#include <linux/interrupt.h>
  38#include <linux/module.h>
  39#include <linux/syscore_ops.h>
  40#include <asm/mach-au1x00/au1000.h>
  41#include <asm/mach-au1x00/au1xxx_dbdma.h>
  42
  43#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
  44
  45/*
  46 * The Descriptor Based DMA supports up to 16 channels.
  47 *
  48 * There are 32 devices defined. We keep an internal structure
  49 * of devices using these channels, along with additional
  50 * information.
  51 *
  52 * We allocate the descriptors and allow access to them through various
  53 * functions.  The drivers allocate the data buffers and assign them
  54 * to the descriptors.
  55 */
  56static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
  57
  58/* I couldn't find a macro that did this... */
  59#define ALIGN_ADDR(x, a)	((((u32)(x)) + (a-1)) & ~(a-1))
  60
  61static dbdma_global_t *dbdma_gptr =
  62			(dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
  63static int dbdma_initialized;
  64
  65static dbdev_tab_t dbdev_tab[] = {
  66#ifdef CONFIG_SOC_AU1550
 
  67	/* UARTS */
  68	{ DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
  69	{ DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
  70	{ DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
  71	{ DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 },
  72
  73	/* EXT DMA */
  74	{ DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
  75	{ DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
  76	{ DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
  77	{ DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
  78
  79	/* USB DEV */
  80	{ DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 },
  81	{ DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
  82	{ DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
  83	{ DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
  84	{ DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 },
  85	{ DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 },
  86
  87	/* PSC 0 */
  88	{ DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
  89	{ DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 },
  90
  91	/* PSC 1 */
  92	{ DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
  93	{ DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 },
  94
  95	/* PSC 2 */
  96	{ DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
  97	{ DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 },
  98
  99	/* PSC 3 */
 100	{ DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
 101	{ DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 },
 102
 103	{ DSCR_CMD0_PCI_WRITE, 0, 0, 0, 0x00000000, 0, 0 },	/* PCI */
 104	{ DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 },	/* NAND */
 105
 106	/* MAC 0 */
 107	{ DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
 108	{ DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
 109
 110	/* MAC 1 */
 111	{ DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
 112	{ DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
 113
 114#endif /* CONFIG_SOC_AU1550 */
 115
 116#ifdef CONFIG_SOC_AU1200
 117	{ DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
 118	{ DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
 119	{ DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
 120	{ DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x11200000, 0, 0 },
 121
 122	{ DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
 123	{ DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
 124
 125	{ DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 126	{ DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 127	{ DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 128	{ DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 129
 130	{ DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
 131	{ DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
 132	{ DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
 133	{ DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 134
 135	{ DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
 136	{ DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
 137
 138	{ DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
 139	{ DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x11a0001c, 0, 0 },
 140	{ DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 141
 142	{ DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
 143	{ DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x11b0001c, 0, 0 },
 144	{ DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 145
 146	{ DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
 147	{ DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
 148	{ DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
 149	{ DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 150
 151	{ DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
 152
 153#endif /* CONFIG_SOC_AU1200 */
 
 154
 155	{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 156	{ DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 157
 158	/* Provide 16 user definable device types */
 159	{ ~0, 0, 0, 0, 0, 0, 0 },
 160	{ ~0, 0, 0, 0, 0, 0, 0 },
 161	{ ~0, 0, 0, 0, 0, 0, 0 },
 162	{ ~0, 0, 0, 0, 0, 0, 0 },
 163	{ ~0, 0, 0, 0, 0, 0, 0 },
 164	{ ~0, 0, 0, 0, 0, 0, 0 },
 165	{ ~0, 0, 0, 0, 0, 0, 0 },
 166	{ ~0, 0, 0, 0, 0, 0, 0 },
 167	{ ~0, 0, 0, 0, 0, 0, 0 },
 168	{ ~0, 0, 0, 0, 0, 0, 0 },
 169	{ ~0, 0, 0, 0, 0, 0, 0 },
 170	{ ~0, 0, 0, 0, 0, 0, 0 },
 171	{ ~0, 0, 0, 0, 0, 0, 0 },
 172	{ ~0, 0, 0, 0, 0, 0, 0 },
 173	{ ~0, 0, 0, 0, 0, 0, 0 },
 174	{ ~0, 0, 0, 0, 0, 0, 0 },
 175};
 176
 177#define DBDEV_TAB_SIZE	ARRAY_SIZE(dbdev_tab)
 178
 179
 180static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
 181
 182static dbdev_tab_t *find_dbdev_id(u32 id)
 183{
 184	int i;
 185	dbdev_tab_t *p;
 186	for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
 187		p = &dbdev_tab[i];
 188		if (p->dev_id == id)
 189			return p;
 190	}
 191	return NULL;
 192}
 193
 194void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
 195{
 196	return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 197}
 198EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
 199
 200u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
 201{
 202	u32 ret = 0;
 203	dbdev_tab_t *p;
 204	static u16 new_id = 0x1000;
 205
 206	p = find_dbdev_id(~0);
 207	if (NULL != p) {
 208		memcpy(p, dev, sizeof(dbdev_tab_t));
 209		p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
 210		ret = p->dev_id;
 211		new_id++;
 212#if 0
 213		printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
 214				  p->dev_id, p->dev_flags, p->dev_physaddr);
 215#endif
 216	}
 217
 218	return ret;
 219}
 220EXPORT_SYMBOL(au1xxx_ddma_add_device);
 221
 222void au1xxx_ddma_del_device(u32 devid)
 223{
 224	dbdev_tab_t *p = find_dbdev_id(devid);
 225
 226	if (p != NULL) {
 227		memset(p, 0, sizeof(dbdev_tab_t));
 228		p->dev_id = ~0;
 229	}
 230}
 231EXPORT_SYMBOL(au1xxx_ddma_del_device);
 232
 233/* Allocate a channel and return a non-zero descriptor if successful. */
 234u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
 235       void (*callback)(int, void *), void *callparam)
 236{
 237	unsigned long   flags;
 238	u32		used, chan;
 239	u32		dcp;
 240	int		i;
 241	dbdev_tab_t	*stp, *dtp;
 242	chan_tab_t	*ctp;
 243	au1x_dma_chan_t *cp;
 244
 245	/*
 246	 * We do the intialization on the first channel allocation.
 247	 * We have to wait because of the interrupt handler initialization
 248	 * which can't be done successfully during board set up.
 249	 */
 250	if (!dbdma_initialized)
 251		return 0;
 252
 253	stp = find_dbdev_id(srcid);
 254	if (stp == NULL)
 255		return 0;
 256	dtp = find_dbdev_id(destid);
 257	if (dtp == NULL)
 258		return 0;
 259
 260	used = 0;
 261
 262	/* Check to see if we can get both channels. */
 263	spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
 264	if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
 265	     (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
 266		/* Got source */
 267		stp->dev_flags |= DEV_FLAGS_INUSE;
 268		if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
 269		     (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
 270			/* Got destination */
 271			dtp->dev_flags |= DEV_FLAGS_INUSE;
 272		} else {
 273			/* Can't get dest.  Release src. */
 274			stp->dev_flags &= ~DEV_FLAGS_INUSE;
 275			used++;
 276		}
 277	} else
 278		used++;
 279	spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
 280
 281	if (used)
 282		return 0;
 283
 284	/* Let's see if we can allocate a channel for it. */
 285	ctp = NULL;
 286	chan = 0;
 287	spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
 288	for (i = 0; i < NUM_DBDMA_CHANS; i++)
 289		if (chan_tab_ptr[i] == NULL) {
 290			/*
 291			 * If kmalloc fails, it is caught below same
 292			 * as a channel not available.
 293			 */
 294			ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
 295			chan_tab_ptr[i] = ctp;
 296			break;
 297		}
 298	spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
 299
 300	if (ctp != NULL) {
 301		memset(ctp, 0, sizeof(chan_tab_t));
 302		ctp->chan_index = chan = i;
 303		dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
 304		dcp += (0x0100 * chan);
 305		ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
 306		cp = (au1x_dma_chan_t *)dcp;
 307		ctp->chan_src = stp;
 308		ctp->chan_dest = dtp;
 309		ctp->chan_callback = callback;
 310		ctp->chan_callparam = callparam;
 311
 312		/* Initialize channel configuration. */
 313		i = 0;
 314		if (stp->dev_intlevel)
 315			i |= DDMA_CFG_SED;
 316		if (stp->dev_intpolarity)
 317			i |= DDMA_CFG_SP;
 318		if (dtp->dev_intlevel)
 319			i |= DDMA_CFG_DED;
 320		if (dtp->dev_intpolarity)
 321			i |= DDMA_CFG_DP;
 322		if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
 323			(dtp->dev_flags & DEV_FLAGS_SYNC))
 324				i |= DDMA_CFG_SYNC;
 325		cp->ddma_cfg = i;
 326		au_sync();
 327
 328		/*
 329		 * Return a non-zero value that can be used to find the channel
 330		 * information in subsequent operations.
 331		 */
 332		return (u32)(&chan_tab_ptr[chan]);
 333	}
 334
 335	/* Release devices */
 336	stp->dev_flags &= ~DEV_FLAGS_INUSE;
 337	dtp->dev_flags &= ~DEV_FLAGS_INUSE;
 338
 339	return 0;
 340}
 341EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
 342
 343/*
 344 * Set the device width if source or destination is a FIFO.
 345 * Should be 8, 16, or 32 bits.
 346 */
 347u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
 348{
 349	u32		rv;
 350	chan_tab_t	*ctp;
 351	dbdev_tab_t	*stp, *dtp;
 352
 353	ctp = *((chan_tab_t **)chanid);
 354	stp = ctp->chan_src;
 355	dtp = ctp->chan_dest;
 356	rv = 0;
 357
 358	if (stp->dev_flags & DEV_FLAGS_IN) {	/* Source in fifo */
 359		rv = stp->dev_devwidth;
 360		stp->dev_devwidth = bits;
 361	}
 362	if (dtp->dev_flags & DEV_FLAGS_OUT) {	/* Destination out fifo */
 363		rv = dtp->dev_devwidth;
 364		dtp->dev_devwidth = bits;
 365	}
 366
 367	return rv;
 368}
 369EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
 370
 371/* Allocate a descriptor ring, initializing as much as possible. */
 372u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
 373{
 374	int			i;
 375	u32			desc_base, srcid, destid;
 376	u32			cmd0, cmd1, src1, dest1;
 377	u32			src0, dest0;
 378	chan_tab_t		*ctp;
 379	dbdev_tab_t		*stp, *dtp;
 380	au1x_ddma_desc_t	*dp;
 381
 382	/*
 383	 * I guess we could check this to be within the
 384	 * range of the table......
 385	 */
 386	ctp = *((chan_tab_t **)chanid);
 387	stp = ctp->chan_src;
 388	dtp = ctp->chan_dest;
 389
 390	/*
 391	 * The descriptors must be 32-byte aligned.  There is a
 392	 * possibility the allocation will give us such an address,
 393	 * and if we try that first we are likely to not waste larger
 394	 * slabs of memory.
 395	 */
 396	desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t),
 397				 GFP_KERNEL|GFP_DMA);
 398	if (desc_base == 0)
 399		return 0;
 400
 401	if (desc_base & 0x1f) {
 402		/*
 403		 * Lost....do it again, allocate extra, and round
 404		 * the address base.
 405		 */
 406		kfree((const void *)desc_base);
 407		i = entries * sizeof(au1x_ddma_desc_t);
 408		i += (sizeof(au1x_ddma_desc_t) - 1);
 409		desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
 410		if (desc_base == 0)
 411			return 0;
 412
 413		ctp->cdb_membase = desc_base;
 414		desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
 415	} else
 416		ctp->cdb_membase = desc_base;
 417
 418	dp = (au1x_ddma_desc_t *)desc_base;
 419
 420	/* Keep track of the base descriptor. */
 421	ctp->chan_desc_base = dp;
 422
 423	/* Initialize the rings with as much information as we know. */
 424	srcid = stp->dev_id;
 425	destid = dtp->dev_id;
 426
 427	cmd0 = cmd1 = src1 = dest1 = 0;
 428	src0 = dest0 = 0;
 429
 430	cmd0 |= DSCR_CMD0_SID(srcid);
 431	cmd0 |= DSCR_CMD0_DID(destid);
 432	cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
 433	cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
 434
 435	/* Is it mem to mem transfer? */
 436	if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
 437	     (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
 438	    ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
 439	     (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
 440		cmd0 |= DSCR_CMD0_MEM;
 441
 442	switch (stp->dev_devwidth) {
 443	case 8:
 444		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
 445		break;
 446	case 16:
 447		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
 448		break;
 449	case 32:
 450	default:
 451		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
 452		break;
 453	}
 454
 455	switch (dtp->dev_devwidth) {
 456	case 8:
 457		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
 458		break;
 459	case 16:
 460		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
 461		break;
 462	case 32:
 463	default:
 464		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
 465		break;
 466	}
 467
 468	/*
 469	 * If the device is marked as an in/out FIFO, ensure it is
 470	 * set non-coherent.
 471	 */
 472	if (stp->dev_flags & DEV_FLAGS_IN)
 473		cmd0 |= DSCR_CMD0_SN;		/* Source in FIFO */
 474	if (dtp->dev_flags & DEV_FLAGS_OUT)
 475		cmd0 |= DSCR_CMD0_DN;		/* Destination out FIFO */
 476
 477	/*
 478	 * Set up source1.  For now, assume no stride and increment.
 479	 * A channel attribute update can change this later.
 480	 */
 481	switch (stp->dev_tsize) {
 482	case 1:
 483		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
 484		break;
 485	case 2:
 486		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
 487		break;
 488	case 4:
 489		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
 490		break;
 491	case 8:
 492	default:
 493		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
 494		break;
 495	}
 496
 497	/* If source input is FIFO, set static address.	*/
 498	if (stp->dev_flags & DEV_FLAGS_IN) {
 499		if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
 500			src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
 501		else
 502			src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
 503	}
 504
 505	if (stp->dev_physaddr)
 506		src0 = stp->dev_physaddr;
 507
 508	/*
 509	 * Set up dest1.  For now, assume no stride and increment.
 510	 * A channel attribute update can change this later.
 511	 */
 512	switch (dtp->dev_tsize) {
 513	case 1:
 514		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
 515		break;
 516	case 2:
 517		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
 518		break;
 519	case 4:
 520		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
 521		break;
 522	case 8:
 523	default:
 524		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
 525		break;
 526	}
 527
 528	/* If destination output is FIFO, set static address. */
 529	if (dtp->dev_flags & DEV_FLAGS_OUT) {
 530		if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
 531			dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
 532		else
 533			dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
 534	}
 535
 536	if (dtp->dev_physaddr)
 537		dest0 = dtp->dev_physaddr;
 538
 539#if 0
 540		printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
 541				  "source1:%x dest0:%x dest1:%x\n",
 542				  dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
 543				  src1, dest0, dest1);
 544#endif
 545	for (i = 0; i < entries; i++) {
 546		dp->dscr_cmd0 = cmd0;
 547		dp->dscr_cmd1 = cmd1;
 548		dp->dscr_source0 = src0;
 549		dp->dscr_source1 = src1;
 550		dp->dscr_dest0 = dest0;
 551		dp->dscr_dest1 = dest1;
 552		dp->dscr_stat = 0;
 553		dp->sw_context = 0;
 554		dp->sw_status = 0;
 555		dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
 556		dp++;
 557	}
 558
 559	/* Make last descrptor point to the first. */
 560	dp--;
 561	dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
 562	ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
 563
 564	return (u32)ctp->chan_desc_base;
 565}
 566EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
 567
 568/*
 569 * Put a source buffer into the DMA ring.
 570 * This updates the source pointer and byte count.  Normally used
 571 * for memory to fifo transfers.
 572 */
 573u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
 574{
 575	chan_tab_t		*ctp;
 576	au1x_ddma_desc_t	*dp;
 577
 578	/*
 579	 * I guess we could check this to be within the
 580	 * range of the table......
 581	 */
 582	ctp = *(chan_tab_t **)chanid;
 583
 584	/*
 585	 * We should have multiple callers for a particular channel,
 586	 * an interrupt doesn't affect this pointer nor the descriptor,
 587	 * so no locking should be needed.
 588	 */
 589	dp = ctp->put_ptr;
 590
 591	/*
 592	 * If the descriptor is valid, we are way ahead of the DMA
 593	 * engine, so just return an error condition.
 594	 */
 595	if (dp->dscr_cmd0 & DSCR_CMD0_V)
 596		return 0;
 597
 598	/* Load up buffer address and byte count. */
 599	dp->dscr_source0 = buf & ~0UL;
 600	dp->dscr_cmd1 = nbytes;
 601	/* Check flags */
 602	if (flags & DDMA_FLAGS_IE)
 603		dp->dscr_cmd0 |= DSCR_CMD0_IE;
 604	if (flags & DDMA_FLAGS_NOIE)
 605		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 606
 607	/*
 608	 * There is an errata on the Au1200/Au1550 parts that could result
 609	 * in "stale" data being DMA'ed. It has to do with the snoop logic on
 610	 * the cache eviction buffer.  DMA_NONCOHERENT is on by default for
 611	 * these parts. If it is fixed in the future, these dma_cache_inv will
 612	 * just be nothing more than empty macros. See io.h.
 613	 */
 614	dma_cache_wback_inv((unsigned long)buf, nbytes);
 
 615	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
 616	au_sync();
 617	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
 618	ctp->chan_ptr->ddma_dbell = 0;
 
 619
 620	/* Get next descriptor pointer.	*/
 621	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 622
 623	/* Return something non-zero. */
 624	return nbytes;
 625}
 626EXPORT_SYMBOL(au1xxx_dbdma_put_source);
 627
 628/* Put a destination buffer into the DMA ring.
 629 * This updates the destination pointer and byte count.  Normally used
 630 * to place an empty buffer into the ring for fifo to memory transfers.
 631 */
 632u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
 633{
 634	chan_tab_t		*ctp;
 635	au1x_ddma_desc_t	*dp;
 636
 637	/* I guess we could check this to be within the
 638	 * range of the table......
 639	 */
 640	ctp = *((chan_tab_t **)chanid);
 641
 642	/* We should have multiple callers for a particular channel,
 643	 * an interrupt doesn't affect this pointer nor the descriptor,
 644	 * so no locking should be needed.
 645	 */
 646	dp = ctp->put_ptr;
 647
 648	/* If the descriptor is valid, we are way ahead of the DMA
 649	 * engine, so just return an error condition.
 650	 */
 651	if (dp->dscr_cmd0 & DSCR_CMD0_V)
 652		return 0;
 653
 654	/* Load up buffer address and byte count */
 655
 656	/* Check flags  */
 657	if (flags & DDMA_FLAGS_IE)
 658		dp->dscr_cmd0 |= DSCR_CMD0_IE;
 659	if (flags & DDMA_FLAGS_NOIE)
 660		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 661
 662	dp->dscr_dest0 = buf & ~0UL;
 663	dp->dscr_cmd1 = nbytes;
 664#if 0
 665	printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
 666			  dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
 667			  dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
 668#endif
 669	/*
 670	 * There is an errata on the Au1200/Au1550 parts that could result in
 671	 * "stale" data being DMA'ed. It has to do with the snoop logic on the
 672	 * cache eviction buffer.  DMA_NONCOHERENT is on by default for these
 673	 * parts. If it is fixed in the future, these dma_cache_inv will just
 674	 * be nothing more than empty macros. See io.h.
 675	 */
 676	dma_cache_inv((unsigned long)buf, nbytes);
 
 677	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
 678	au_sync();
 679	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
 680	ctp->chan_ptr->ddma_dbell = 0;
 
 681
 682	/* Get next descriptor pointer.	*/
 683	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 684
 685	/* Return something non-zero. */
 686	return nbytes;
 687}
 688EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
 689
 690/*
 691 * Get a destination buffer into the DMA ring.
 692 * Normally used to get a full buffer from the ring during fifo
 693 * to memory transfers.  This does not set the valid bit, you will
 694 * have to put another destination buffer to keep the DMA going.
 695 */
 696u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
 697{
 698	chan_tab_t		*ctp;
 699	au1x_ddma_desc_t	*dp;
 700	u32			rv;
 701
 702	/*
 703	 * I guess we could check this to be within the
 704	 * range of the table......
 705	 */
 706	ctp = *((chan_tab_t **)chanid);
 707
 708	/*
 709	 * We should have multiple callers for a particular channel,
 710	 * an interrupt doesn't affect this pointer nor the descriptor,
 711	 * so no locking should be needed.
 712	 */
 713	dp = ctp->get_ptr;
 714
 715	/*
 716	 * If the descriptor is valid, we are way ahead of the DMA
 717	 * engine, so just return an error condition.
 718	 */
 719	if (dp->dscr_cmd0 & DSCR_CMD0_V)
 720		return 0;
 721
 722	/* Return buffer address and byte count. */
 723	*buf = (void *)(phys_to_virt(dp->dscr_dest0));
 724	*nbytes = dp->dscr_cmd1;
 725	rv = dp->dscr_stat;
 726
 727	/* Get next descriptor pointer.	*/
 728	ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 729
 730	/* Return something non-zero. */
 731	return rv;
 732}
 733EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
 734
 735void au1xxx_dbdma_stop(u32 chanid)
 736{
 737	chan_tab_t	*ctp;
 738	au1x_dma_chan_t *cp;
 739	int halt_timeout = 0;
 740
 741	ctp = *((chan_tab_t **)chanid);
 742
 743	cp = ctp->chan_ptr;
 744	cp->ddma_cfg &= ~DDMA_CFG_EN;	/* Disable channel */
 745	au_sync();
 746	while (!(cp->ddma_stat & DDMA_STAT_H)) {
 747		udelay(1);
 748		halt_timeout++;
 749		if (halt_timeout > 100) {
 750			printk(KERN_WARNING "warning: DMA channel won't halt\n");
 751			break;
 752		}
 753	}
 754	/* clear current desc valid and doorbell */
 755	cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
 756	au_sync();
 757}
 758EXPORT_SYMBOL(au1xxx_dbdma_stop);
 759
 760/*
 761 * Start using the current descriptor pointer.  If the DBDMA encounters
 762 * a non-valid descriptor, it will stop.  In this case, we can just
 763 * continue by adding a buffer to the list and starting again.
 764 */
 765void au1xxx_dbdma_start(u32 chanid)
 766{
 767	chan_tab_t	*ctp;
 768	au1x_dma_chan_t *cp;
 769
 770	ctp = *((chan_tab_t **)chanid);
 771	cp = ctp->chan_ptr;
 772	cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
 773	cp->ddma_cfg |= DDMA_CFG_EN;	/* Enable channel */
 774	au_sync();
 775	cp->ddma_dbell = 0;
 776	au_sync();
 777}
 778EXPORT_SYMBOL(au1xxx_dbdma_start);
 779
 780void au1xxx_dbdma_reset(u32 chanid)
 781{
 782	chan_tab_t		*ctp;
 783	au1x_ddma_desc_t	*dp;
 784
 785	au1xxx_dbdma_stop(chanid);
 786
 787	ctp = *((chan_tab_t **)chanid);
 788	ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
 789
 790	/* Run through the descriptors and reset the valid indicator. */
 791	dp = ctp->chan_desc_base;
 792
 793	do {
 794		dp->dscr_cmd0 &= ~DSCR_CMD0_V;
 795		/*
 796		 * Reset our software status -- this is used to determine
 797		 * if a descriptor is in use by upper level software. Since
 798		 * posting can reset 'V' bit.
 799		 */
 800		dp->sw_status = 0;
 801		dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 802	} while (dp != ctp->chan_desc_base);
 803}
 804EXPORT_SYMBOL(au1xxx_dbdma_reset);
 805
 806u32 au1xxx_get_dma_residue(u32 chanid)
 807{
 808	chan_tab_t	*ctp;
 809	au1x_dma_chan_t *cp;
 810	u32		rv;
 811
 812	ctp = *((chan_tab_t **)chanid);
 813	cp = ctp->chan_ptr;
 814
 815	/* This is only valid if the channel is stopped. */
 816	rv = cp->ddma_bytecnt;
 817	au_sync();
 818
 819	return rv;
 820}
 821EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
 822
 823void au1xxx_dbdma_chan_free(u32 chanid)
 824{
 825	chan_tab_t	*ctp;
 826	dbdev_tab_t	*stp, *dtp;
 827
 828	ctp = *((chan_tab_t **)chanid);
 829	stp = ctp->chan_src;
 830	dtp = ctp->chan_dest;
 831
 832	au1xxx_dbdma_stop(chanid);
 833
 834	kfree((void *)ctp->cdb_membase);
 835
 836	stp->dev_flags &= ~DEV_FLAGS_INUSE;
 837	dtp->dev_flags &= ~DEV_FLAGS_INUSE;
 838	chan_tab_ptr[ctp->chan_index] = NULL;
 839
 840	kfree(ctp);
 841}
 842EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
 843
 844static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
 845{
 846	u32 intstat;
 847	u32 chan_index;
 848	chan_tab_t		*ctp;
 849	au1x_ddma_desc_t	*dp;
 850	au1x_dma_chan_t *cp;
 851
 852	intstat = dbdma_gptr->ddma_intstat;
 853	au_sync();
 854	chan_index = __ffs(intstat);
 855
 856	ctp = chan_tab_ptr[chan_index];
 857	cp = ctp->chan_ptr;
 858	dp = ctp->cur_ptr;
 859
 860	/* Reset interrupt. */
 861	cp->ddma_irq = 0;
 862	au_sync();
 863
 864	if (ctp->chan_callback)
 865		ctp->chan_callback(irq, ctp->chan_callparam);
 866
 867	ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 868	return IRQ_RETVAL(1);
 869}
 870
 871void au1xxx_dbdma_dump(u32 chanid)
 872{
 873	chan_tab_t	 *ctp;
 874	au1x_ddma_desc_t *dp;
 875	dbdev_tab_t	 *stp, *dtp;
 876	au1x_dma_chan_t  *cp;
 877	u32 i		 = 0;
 878
 879	ctp = *((chan_tab_t **)chanid);
 880	stp = ctp->chan_src;
 881	dtp = ctp->chan_dest;
 882	cp = ctp->chan_ptr;
 883
 884	printk(KERN_DEBUG "Chan %x, stp %x (dev %d)  dtp %x (dev %d)\n",
 885			  (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
 886			  dtp - dbdev_tab);
 887	printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
 888			  (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
 889			  (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
 890
 891	printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
 892	printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
 893			  cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
 894	printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
 895			  cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
 896			  cp->ddma_bytecnt);
 897
 898	/* Run through the descriptors */
 899	dp = ctp->chan_desc_base;
 900
 901	do {
 902		printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
 903				  i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
 904		printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
 905				  dp->dscr_source0, dp->dscr_source1,
 906				  dp->dscr_dest0, dp->dscr_dest1);
 907		printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
 908				  dp->dscr_stat, dp->dscr_nxtptr);
 909		dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 910	} while (dp != ctp->chan_desc_base);
 911}
 912
 913/* Put a descriptor into the DMA ring.
 914 * This updates the source/destination pointers and byte count.
 915 */
 916u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
 917{
 918	chan_tab_t *ctp;
 919	au1x_ddma_desc_t *dp;
 920	u32 nbytes = 0;
 921
 922	/*
 923	 * I guess we could check this to be within the
 924	 * range of the table......
 925	 */
 926	ctp = *((chan_tab_t **)chanid);
 927
 928	/*
 929	 * We should have multiple callers for a particular channel,
 930	 * an interrupt doesn't affect this pointer nor the descriptor,
 931	 * so no locking should be needed.
 932	 */
 933	dp = ctp->put_ptr;
 934
 935	/*
 936	 * If the descriptor is valid, we are way ahead of the DMA
 937	 * engine, so just return an error condition.
 938	 */
 939	if (dp->dscr_cmd0 & DSCR_CMD0_V)
 940		return 0;
 941
 942	/* Load up buffer addresses and byte count. */
 943	dp->dscr_dest0 = dscr->dscr_dest0;
 944	dp->dscr_source0 = dscr->dscr_source0;
 945	dp->dscr_dest1 = dscr->dscr_dest1;
 946	dp->dscr_source1 = dscr->dscr_source1;
 947	dp->dscr_cmd1 = dscr->dscr_cmd1;
 948	nbytes = dscr->dscr_cmd1;
 949	/* Allow the caller to specifiy if an interrupt is generated */
 950	dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 951	dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
 952	ctp->chan_ptr->ddma_dbell = 0;
 953
 954	/* Get next descriptor pointer.	*/
 955	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 956
 957	/* Return something non-zero. */
 958	return nbytes;
 959}
 960
 961
 962static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
 963
 964static int alchemy_dbdma_suspend(void)
 965{
 966	int i;
 967	void __iomem *addr;
 968
 969	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
 970	alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
 971	alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
 972	alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
 973	alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
 974
 975	/* save channel configurations */
 976	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
 977	for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
 978		alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
 979		alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
 980		alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
 981		alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
 982		alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
 983		alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
 984
 985		/* halt channel */
 986		__raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
 987		wmb();
 988		while (!(__raw_readl(addr + 0x14) & 1))
 989			wmb();
 990
 991		addr += 0x100;	/* next channel base */
 992	}
 993	/* disable channel interrupts */
 994	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
 995	__raw_writel(0, addr + 0x0c);
 996	wmb();
 997
 998	return 0;
 999}
1000
1001static void alchemy_dbdma_resume(void)
1002{
1003	int i;
1004	void __iomem *addr;
1005
1006	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1007	__raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
1008	__raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
1009	__raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
1010	__raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
1011
1012	/* restore channel configurations */
1013	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
1014	for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
1015		__raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
1016		__raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
1017		__raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
1018		__raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
1019		__raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
1020		__raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
1021		wmb();
1022		addr += 0x100;	/* next channel base */
1023	}
1024}
1025
1026static struct syscore_ops alchemy_dbdma_syscore_ops = {
1027	.suspend	= alchemy_dbdma_suspend,
1028	.resume		= alchemy_dbdma_resume,
1029};
1030
1031static int __init au1xxx_dbdma_init(void)
1032{
1033	int irq_nr, ret;
 
 
 
 
 
 
 
 
1034
1035	dbdma_gptr->ddma_config = 0;
1036	dbdma_gptr->ddma_throttle = 0;
1037	dbdma_gptr->ddma_inten = 0xffff;
1038	au_sync();
1039
1040	switch (alchemy_get_cputype()) {
1041	case ALCHEMY_CPU_AU1550:
1042		irq_nr = AU1550_DDMA_INT;
1043		break;
1044	case ALCHEMY_CPU_AU1200:
1045		irq_nr = AU1200_DDMA_INT;
1046		break;
1047	default:
1048		return -ENODEV;
1049	}
1050
1051	ret = request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED,
1052			"Au1xxx dbdma", (void *)dbdma_gptr);
1053	if (ret)
1054		printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
1055	else {
1056		dbdma_initialized = 1;
1057		printk(KERN_INFO "Alchemy DBDMA initialized\n");
1058		register_syscore_ops(&alchemy_dbdma_syscore_ops);
1059	}
1060
1061	return ret;
1062}
1063subsys_initcall(au1xxx_dbdma_init);
1064
1065#endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */
 
 
 
 
 
 
 
 
 
 
 
 
v6.8
   1/*
   2 *
   3 * BRIEF MODULE DESCRIPTION
   4 *      The Descriptor Based DMA channel manager that first appeared
   5 *	on the Au1550.  I started with dma.c, but I think all that is
   6 *	left is this initial comment :-)
   7 *
   8 * Copyright 2004 Embedded Edge, LLC
   9 *	dan@embeddededge.com
  10 *
  11 *  This program is free software; you can redistribute  it and/or modify it
  12 *  under  the terms of  the GNU General  Public License as published by the
  13 *  Free Software Foundation;  either version 2 of the  License, or (at your
  14 *  option) any later version.
  15 *
  16 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
  17 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
  18 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  19 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
  20 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
  22 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  23 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  24 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26 *
  27 *  You should have received a copy of the  GNU General Public License along
  28 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  29 *  675 Mass Ave, Cambridge, MA 02139, USA.
  30 *
  31 */
  32
  33#include <linux/dma-map-ops.h> /* for dma_default_coherent */
  34#include <linux/init.h>
  35#include <linux/kernel.h>
  36#include <linux/slab.h>
  37#include <linux/spinlock.h>
  38#include <linux/interrupt.h>
  39#include <linux/export.h>
  40#include <linux/syscore_ops.h>
  41#include <asm/mach-au1x00/au1000.h>
  42#include <asm/mach-au1x00/au1xxx_dbdma.h>
  43
 
 
  44/*
  45 * The Descriptor Based DMA supports up to 16 channels.
  46 *
  47 * There are 32 devices defined. We keep an internal structure
  48 * of devices using these channels, along with additional
  49 * information.
  50 *
  51 * We allocate the descriptors and allow access to them through various
  52 * functions.  The drivers allocate the data buffers and assign them
  53 * to the descriptors.
  54 */
  55static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
  56
  57/* I couldn't find a macro that did this... */
  58#define ALIGN_ADDR(x, a)	((((u32)(x)) + (a-1)) & ~(a-1))
  59
  60static dbdma_global_t *dbdma_gptr =
  61			(dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
  62static int dbdma_initialized;
  63
  64static dbdev_tab_t *dbdev_tab;
  65
  66static dbdev_tab_t au1550_dbdev_tab[] __initdata = {
  67	/* UARTS */
  68	{ AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
  69	{ AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
  70	{ AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
  71	{ AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN,  0, 8, 0x11400000, 0, 0 },
  72
  73	/* EXT DMA */
  74	{ AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
  75	{ AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
  76	{ AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
  77	{ AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
  78
  79	/* USB DEV */
  80	{ AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN,  4, 8, 0x10200000, 0, 0 },
  81	{ AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
  82	{ AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
  83	{ AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
  84	{ AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN,  4, 8, 0x10200010, 0, 0 },
  85	{ AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN,  4, 8, 0x10200014, 0, 0 },
  86
  87	/* PSCs */
  88	{ AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
  89	{ AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN,  0, 0, 0x11a0001c, 0, 0 },
  90	{ AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
  91	{ AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN,  0, 0, 0x11b0001c, 0, 0 },
  92	{ AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
  93	{ AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN,  0, 0, 0x10a0001c, 0, 0 },
  94	{ AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
  95	{ AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN,  0, 0, 0x10b0001c, 0, 0 },
 
 
 
 
 
 
  96
  97	{ AU1550_DSCR_CMD0_PCI_WRITE,  0, 0, 0, 0x00000000, 0, 0 },  /* PCI */
  98	{ AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
  99
 100	/* MAC 0 */
 101	{ AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
 102	{ AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
 103
 104	/* MAC 1 */
 105	{ AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
 106	{ AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
 
 
 
 
 
 
 
 
 107
 108	{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 109	{ DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 110};
 
 
 
 
 111
 112static dbdev_tab_t au1200_dbdev_tab[] __initdata = {
 113	{ AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
 114	{ AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
 115	{ AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
 116	{ AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN,  0, 8, 0x11200000, 0, 0 },
 117
 118	{ AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
 119	{ AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
 120
 121	{ AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 122	{ AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 123	{ AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 124	{ AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 125
 126	{ AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
 127	{ AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN,  4, 8, 0x10600004, 0, 0 },
 128	{ AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
 129	{ AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN,  4, 8, 0x10680004, 0, 0 },
 130
 131	{ AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
 132	{ AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
 133
 134	{ AU1200_DSCR_CMD0_PSC0_TX,   DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
 135	{ AU1200_DSCR_CMD0_PSC0_RX,   DEV_FLAGS_IN,  0, 16, 0x11a0001c, 0, 0 },
 136	{ AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 137	{ AU1200_DSCR_CMD0_PSC1_TX,   DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
 138	{ AU1200_DSCR_CMD0_PSC1_RX,   DEV_FLAGS_IN,  0, 16, 0x11b0001c, 0, 0 },
 139	{ AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 140
 141	{ AU1200_DSCR_CMD0_CIM_RXA,  DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
 142	{ AU1200_DSCR_CMD0_CIM_RXB,  DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
 143	{ AU1200_DSCR_CMD0_CIM_RXC,  DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
 144	{ AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 145
 146	{ AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
 
 147
 148	{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 149	{ DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 150};
 151
 152static dbdev_tab_t au1300_dbdev_tab[] __initdata = {
 153	{ AU1300_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8,  0x10100004, 0, 0 },
 154	{ AU1300_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8,  0x10100000, 0, 0 },
 155	{ AU1300_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8,  0x10101004, 0, 0 },
 156	{ AU1300_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN,  0, 8,  0x10101000, 0, 0 },
 157	{ AU1300_DSCR_CMD0_UART2_TX, DEV_FLAGS_OUT, 0, 8,  0x10102004, 0, 0 },
 158	{ AU1300_DSCR_CMD0_UART2_RX, DEV_FLAGS_IN,  0, 8,  0x10102000, 0, 0 },
 159	{ AU1300_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8,  0x10103004, 0, 0 },
 160	{ AU1300_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN,  0, 8,  0x10103000, 0, 0 },
 161
 162	{ AU1300_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8,  0x10600000, 0, 0 },
 163	{ AU1300_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN,  4, 8,  0x10600004, 0, 0 },
 164	{ AU1300_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 8, 8,  0x10601000, 0, 0 },
 165	{ AU1300_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN,  8, 8,  0x10601004, 0, 0 },
 166
 167	{ AU1300_DSCR_CMD0_AES_RX, DEV_FLAGS_IN ,   4, 32, 0x10300008, 0, 0 },
 168	{ AU1300_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT,   4, 32, 0x10300004, 0, 0 },
 169
 170	{ AU1300_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0001c, 0, 0 },
 171	{ AU1300_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN,   0, 16, 0x10a0001c, 0, 0 },
 172	{ AU1300_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0101c, 0, 0 },
 173	{ AU1300_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN,   0, 16, 0x10a0101c, 0, 0 },
 174	{ AU1300_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0201c, 0, 0 },
 175	{ AU1300_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN,   0, 16, 0x10a0201c, 0, 0 },
 176	{ AU1300_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0301c, 0, 0 },
 177	{ AU1300_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN,   0, 16, 0x10a0301c, 0, 0 },
 178
 179	{ AU1300_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE,   0, 0,  0x00000000, 0, 0 },
 180	{ AU1300_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
 181
 182	{ AU1300_DSCR_CMD0_SDMS_TX2, DEV_FLAGS_OUT, 4, 8,  0x10602000, 0, 0 },
 183	{ AU1300_DSCR_CMD0_SDMS_RX2, DEV_FLAGS_IN,  4, 8,  0x10602004, 0, 0 },
 184
 185	{ AU1300_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 
 
 
 186
 187	{ AU1300_DSCR_CMD0_UDMA, DEV_FLAGS_ANYUSE,  0, 32, 0x14001810, 0, 0 },
 188
 189	{ AU1300_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
 190	{ AU1300_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
 191
 192	{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 193	{ DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194};
 195
 196/* 32 predefined plus 32 custom */
 197#define DBDEV_TAB_SIZE		64
 198
 199static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
 200
 201static dbdev_tab_t *find_dbdev_id(u32 id)
 202{
 203	int i;
 204	dbdev_tab_t *p;
 205	for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
 206		p = &dbdev_tab[i];
 207		if (p->dev_id == id)
 208			return p;
 209	}
 210	return NULL;
 211}
 212
 213void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
 214{
 215	return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 216}
 217EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
 218
 219u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
 220{
 221	u32 ret = 0;
 222	dbdev_tab_t *p;
 223	static u16 new_id = 0x1000;
 224
 225	p = find_dbdev_id(~0);
 226	if (NULL != p) {
 227		memcpy(p, dev, sizeof(dbdev_tab_t));
 228		p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
 229		ret = p->dev_id;
 230		new_id++;
 231#if 0
 232		printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
 233				  p->dev_id, p->dev_flags, p->dev_physaddr);
 234#endif
 235	}
 236
 237	return ret;
 238}
 239EXPORT_SYMBOL(au1xxx_ddma_add_device);
 240
 241void au1xxx_ddma_del_device(u32 devid)
 242{
 243	dbdev_tab_t *p = find_dbdev_id(devid);
 244
 245	if (p != NULL) {
 246		memset(p, 0, sizeof(dbdev_tab_t));
 247		p->dev_id = ~0;
 248	}
 249}
 250EXPORT_SYMBOL(au1xxx_ddma_del_device);
 251
 252/* Allocate a channel and return a non-zero descriptor if successful. */
 253u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
 254       void (*callback)(int, void *), void *callparam)
 255{
 256	unsigned long	flags;
 257	u32		used, chan;
 258	u32		dcp;
 259	int		i;
 260	dbdev_tab_t	*stp, *dtp;
 261	chan_tab_t	*ctp;
 262	au1x_dma_chan_t *cp;
 263
 264	/*
 265	 * We do the initialization on the first channel allocation.
 266	 * We have to wait because of the interrupt handler initialization
 267	 * which can't be done successfully during board set up.
 268	 */
 269	if (!dbdma_initialized)
 270		return 0;
 271
 272	stp = find_dbdev_id(srcid);
 273	if (stp == NULL)
 274		return 0;
 275	dtp = find_dbdev_id(destid);
 276	if (dtp == NULL)
 277		return 0;
 278
 279	used = 0;
 280
 281	/* Check to see if we can get both channels. */
 282	spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
 283	if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
 284	     (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
 285		/* Got source */
 286		stp->dev_flags |= DEV_FLAGS_INUSE;
 287		if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
 288		     (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
 289			/* Got destination */
 290			dtp->dev_flags |= DEV_FLAGS_INUSE;
 291		} else {
 292			/* Can't get dest.  Release src. */
 293			stp->dev_flags &= ~DEV_FLAGS_INUSE;
 294			used++;
 295		}
 296	} else
 297		used++;
 298	spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
 299
 300	if (used)
 301		return 0;
 302
 303	/* Let's see if we can allocate a channel for it. */
 304	ctp = NULL;
 305	chan = 0;
 306	spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
 307	for (i = 0; i < NUM_DBDMA_CHANS; i++)
 308		if (chan_tab_ptr[i] == NULL) {
 309			/*
 310			 * If kmalloc fails, it is caught below same
 311			 * as a channel not available.
 312			 */
 313			ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
 314			chan_tab_ptr[i] = ctp;
 315			break;
 316		}
 317	spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
 318
 319	if (ctp != NULL) {
 320		memset(ctp, 0, sizeof(chan_tab_t));
 321		ctp->chan_index = chan = i;
 322		dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
 323		dcp += (0x0100 * chan);
 324		ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
 325		cp = (au1x_dma_chan_t *)dcp;
 326		ctp->chan_src = stp;
 327		ctp->chan_dest = dtp;
 328		ctp->chan_callback = callback;
 329		ctp->chan_callparam = callparam;
 330
 331		/* Initialize channel configuration. */
 332		i = 0;
 333		if (stp->dev_intlevel)
 334			i |= DDMA_CFG_SED;
 335		if (stp->dev_intpolarity)
 336			i |= DDMA_CFG_SP;
 337		if (dtp->dev_intlevel)
 338			i |= DDMA_CFG_DED;
 339		if (dtp->dev_intpolarity)
 340			i |= DDMA_CFG_DP;
 341		if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
 342			(dtp->dev_flags & DEV_FLAGS_SYNC))
 343				i |= DDMA_CFG_SYNC;
 344		cp->ddma_cfg = i;
 345		wmb(); /* drain writebuffer */
 346
 347		/*
 348		 * Return a non-zero value that can be used to find the channel
 349		 * information in subsequent operations.
 350		 */
 351		return (u32)(&chan_tab_ptr[chan]);
 352	}
 353
 354	/* Release devices */
 355	stp->dev_flags &= ~DEV_FLAGS_INUSE;
 356	dtp->dev_flags &= ~DEV_FLAGS_INUSE;
 357
 358	return 0;
 359}
 360EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
 361
 362/*
 363 * Set the device width if source or destination is a FIFO.
 364 * Should be 8, 16, or 32 bits.
 365 */
 366u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
 367{
 368	u32		rv;
 369	chan_tab_t	*ctp;
 370	dbdev_tab_t	*stp, *dtp;
 371
 372	ctp = *((chan_tab_t **)chanid);
 373	stp = ctp->chan_src;
 374	dtp = ctp->chan_dest;
 375	rv = 0;
 376
 377	if (stp->dev_flags & DEV_FLAGS_IN) {	/* Source in fifo */
 378		rv = stp->dev_devwidth;
 379		stp->dev_devwidth = bits;
 380	}
 381	if (dtp->dev_flags & DEV_FLAGS_OUT) {	/* Destination out fifo */
 382		rv = dtp->dev_devwidth;
 383		dtp->dev_devwidth = bits;
 384	}
 385
 386	return rv;
 387}
 388EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
 389
 390/* Allocate a descriptor ring, initializing as much as possible. */
 391u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
 392{
 393	int			i;
 394	u32			desc_base, srcid, destid;
 395	u32			cmd0, cmd1, src1, dest1;
 396	u32			src0, dest0;
 397	chan_tab_t		*ctp;
 398	dbdev_tab_t		*stp, *dtp;
 399	au1x_ddma_desc_t	*dp;
 400
 401	/*
 402	 * I guess we could check this to be within the
 403	 * range of the table......
 404	 */
 405	ctp = *((chan_tab_t **)chanid);
 406	stp = ctp->chan_src;
 407	dtp = ctp->chan_dest;
 408
 409	/*
 410	 * The descriptors must be 32-byte aligned.  There is a
 411	 * possibility the allocation will give us such an address,
 412	 * and if we try that first we are likely to not waste larger
 413	 * slabs of memory.
 414	 */
 415	desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t),
 416				       GFP_KERNEL|GFP_DMA);
 417	if (desc_base == 0)
 418		return 0;
 419
 420	if (desc_base & 0x1f) {
 421		/*
 422		 * Lost....do it again, allocate extra, and round
 423		 * the address base.
 424		 */
 425		kfree((const void *)desc_base);
 426		i = entries * sizeof(au1x_ddma_desc_t);
 427		i += (sizeof(au1x_ddma_desc_t) - 1);
 428		desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
 429		if (desc_base == 0)
 430			return 0;
 431
 432		ctp->cdb_membase = desc_base;
 433		desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
 434	} else
 435		ctp->cdb_membase = desc_base;
 436
 437	dp = (au1x_ddma_desc_t *)desc_base;
 438
 439	/* Keep track of the base descriptor. */
 440	ctp->chan_desc_base = dp;
 441
 442	/* Initialize the rings with as much information as we know. */
 443	srcid = stp->dev_id;
 444	destid = dtp->dev_id;
 445
 446	cmd0 = cmd1 = src1 = dest1 = 0;
 447	src0 = dest0 = 0;
 448
 449	cmd0 |= DSCR_CMD0_SID(srcid);
 450	cmd0 |= DSCR_CMD0_DID(destid);
 451	cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
 452	cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
 453
 454	/* Is it mem to mem transfer? */
 455	if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
 456	     (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
 457	    ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
 458	     (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
 459		cmd0 |= DSCR_CMD0_MEM;
 460
 461	switch (stp->dev_devwidth) {
 462	case 8:
 463		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
 464		break;
 465	case 16:
 466		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
 467		break;
 468	case 32:
 469	default:
 470		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
 471		break;
 472	}
 473
 474	switch (dtp->dev_devwidth) {
 475	case 8:
 476		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
 477		break;
 478	case 16:
 479		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
 480		break;
 481	case 32:
 482	default:
 483		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
 484		break;
 485	}
 486
 487	/*
 488	 * If the device is marked as an in/out FIFO, ensure it is
 489	 * set non-coherent.
 490	 */
 491	if (stp->dev_flags & DEV_FLAGS_IN)
 492		cmd0 |= DSCR_CMD0_SN;		/* Source in FIFO */
 493	if (dtp->dev_flags & DEV_FLAGS_OUT)
 494		cmd0 |= DSCR_CMD0_DN;		/* Destination out FIFO */
 495
 496	/*
 497	 * Set up source1.  For now, assume no stride and increment.
 498	 * A channel attribute update can change this later.
 499	 */
 500	switch (stp->dev_tsize) {
 501	case 1:
 502		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
 503		break;
 504	case 2:
 505		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
 506		break;
 507	case 4:
 508		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
 509		break;
 510	case 8:
 511	default:
 512		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
 513		break;
 514	}
 515
 516	/* If source input is FIFO, set static address. */
 517	if (stp->dev_flags & DEV_FLAGS_IN) {
 518		if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
 519			src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
 520		else
 521			src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
 522	}
 523
 524	if (stp->dev_physaddr)
 525		src0 = stp->dev_physaddr;
 526
 527	/*
 528	 * Set up dest1.  For now, assume no stride and increment.
 529	 * A channel attribute update can change this later.
 530	 */
 531	switch (dtp->dev_tsize) {
 532	case 1:
 533		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
 534		break;
 535	case 2:
 536		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
 537		break;
 538	case 4:
 539		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
 540		break;
 541	case 8:
 542	default:
 543		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
 544		break;
 545	}
 546
 547	/* If destination output is FIFO, set static address. */
 548	if (dtp->dev_flags & DEV_FLAGS_OUT) {
 549		if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
 550			dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
 551		else
 552			dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
 553	}
 554
 555	if (dtp->dev_physaddr)
 556		dest0 = dtp->dev_physaddr;
 557
 558#if 0
 559		printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
 560				  "source1:%x dest0:%x dest1:%x\n",
 561				  dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
 562				  src1, dest0, dest1);
 563#endif
 564	for (i = 0; i < entries; i++) {
 565		dp->dscr_cmd0 = cmd0;
 566		dp->dscr_cmd1 = cmd1;
 567		dp->dscr_source0 = src0;
 568		dp->dscr_source1 = src1;
 569		dp->dscr_dest0 = dest0;
 570		dp->dscr_dest1 = dest1;
 571		dp->dscr_stat = 0;
 572		dp->sw_context = 0;
 573		dp->sw_status = 0;
 574		dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
 575		dp++;
 576	}
 577
 578	/* Make last descriptor point to the first. */
 579	dp--;
 580	dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
 581	ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
 582
 583	return (u32)ctp->chan_desc_base;
 584}
 585EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
 586
 587/*
 588 * Put a source buffer into the DMA ring.
 589 * This updates the source pointer and byte count.  Normally used
 590 * for memory to fifo transfers.
 591 */
 592u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
 593{
 594	chan_tab_t		*ctp;
 595	au1x_ddma_desc_t	*dp;
 596
 597	/*
 598	 * I guess we could check this to be within the
 599	 * range of the table......
 600	 */
 601	ctp = *(chan_tab_t **)chanid;
 602
 603	/*
 604	 * We should have multiple callers for a particular channel,
 605	 * an interrupt doesn't affect this pointer nor the descriptor,
 606	 * so no locking should be needed.
 607	 */
 608	dp = ctp->put_ptr;
 609
 610	/*
 611	 * If the descriptor is valid, we are way ahead of the DMA
 612	 * engine, so just return an error condition.
 613	 */
 614	if (dp->dscr_cmd0 & DSCR_CMD0_V)
 615		return 0;
 616
 617	/* Load up buffer address and byte count. */
 618	dp->dscr_source0 = buf & ~0UL;
 619	dp->dscr_cmd1 = nbytes;
 620	/* Check flags */
 621	if (flags & DDMA_FLAGS_IE)
 622		dp->dscr_cmd0 |= DSCR_CMD0_IE;
 623	if (flags & DDMA_FLAGS_NOIE)
 624		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 625
 626	/*
 627	 * There is an erratum on certain Au1200/Au1550 revisions that could
 628	 * result in "stale" data being DMA'ed. It has to do with the snoop
 629	 * logic on the cache eviction buffer.  dma_default_coherent is set
 630	 * to false on these parts.
 
 631	 */
 632	if (!dma_default_coherent)
 633		dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
 634	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
 635	wmb(); /* drain writebuffer */
 636	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
 637	ctp->chan_ptr->ddma_dbell = 0;
 638	wmb(); /* force doorbell write out to dma engine */
 639
 640	/* Get next descriptor pointer. */
 641	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 642
 643	/* Return something non-zero. */
 644	return nbytes;
 645}
 646EXPORT_SYMBOL(au1xxx_dbdma_put_source);
 647
 648/* Put a destination buffer into the DMA ring.
 649 * This updates the destination pointer and byte count.  Normally used
 650 * to place an empty buffer into the ring for fifo to memory transfers.
 651 */
 652u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
 653{
 654	chan_tab_t		*ctp;
 655	au1x_ddma_desc_t	*dp;
 656
 657	/* I guess we could check this to be within the
 658	 * range of the table......
 659	 */
 660	ctp = *((chan_tab_t **)chanid);
 661
 662	/* We should have multiple callers for a particular channel,
 663	 * an interrupt doesn't affect this pointer nor the descriptor,
 664	 * so no locking should be needed.
 665	 */
 666	dp = ctp->put_ptr;
 667
 668	/* If the descriptor is valid, we are way ahead of the DMA
 669	 * engine, so just return an error condition.
 670	 */
 671	if (dp->dscr_cmd0 & DSCR_CMD0_V)
 672		return 0;
 673
 674	/* Load up buffer address and byte count */
 675
 676	/* Check flags  */
 677	if (flags & DDMA_FLAGS_IE)
 678		dp->dscr_cmd0 |= DSCR_CMD0_IE;
 679	if (flags & DDMA_FLAGS_NOIE)
 680		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 681
 682	dp->dscr_dest0 = buf & ~0UL;
 683	dp->dscr_cmd1 = nbytes;
 684#if 0
 685	printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
 686			  dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
 687			  dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
 688#endif
 689	/*
 690	 * There is an erratum on certain Au1200/Au1550 revisions that could
 691	 * result in "stale" data being DMA'ed. It has to do with the snoop
 692	 * logic on the cache eviction buffer.  dma_default_coherent is set
 693	 * to false on these parts.
 
 694	 */
 695	if (!dma_default_coherent)
 696		dma_cache_inv(KSEG0ADDR(buf), nbytes);
 697	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
 698	wmb(); /* drain writebuffer */
 699	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
 700	ctp->chan_ptr->ddma_dbell = 0;
 701	wmb(); /* force doorbell write out to dma engine */
 702
 703	/* Get next descriptor pointer. */
 704	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 705
 706	/* Return something non-zero. */
 707	return nbytes;
 708}
 709EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
 710
 711/*
 712 * Get a destination buffer into the DMA ring.
 713 * Normally used to get a full buffer from the ring during fifo
 714 * to memory transfers.  This does not set the valid bit, you will
 715 * have to put another destination buffer to keep the DMA going.
 716 */
 717u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
 718{
 719	chan_tab_t		*ctp;
 720	au1x_ddma_desc_t	*dp;
 721	u32			rv;
 722
 723	/*
 724	 * I guess we could check this to be within the
 725	 * range of the table......
 726	 */
 727	ctp = *((chan_tab_t **)chanid);
 728
 729	/*
 730	 * We should have multiple callers for a particular channel,
 731	 * an interrupt doesn't affect this pointer nor the descriptor,
 732	 * so no locking should be needed.
 733	 */
 734	dp = ctp->get_ptr;
 735
 736	/*
 737	 * If the descriptor is valid, we are way ahead of the DMA
 738	 * engine, so just return an error condition.
 739	 */
 740	if (dp->dscr_cmd0 & DSCR_CMD0_V)
 741		return 0;
 742
 743	/* Return buffer address and byte count. */
 744	*buf = (void *)(phys_to_virt(dp->dscr_dest0));
 745	*nbytes = dp->dscr_cmd1;
 746	rv = dp->dscr_stat;
 747
 748	/* Get next descriptor pointer. */
 749	ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 750
 751	/* Return something non-zero. */
 752	return rv;
 753}
 754EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
 755
 756void au1xxx_dbdma_stop(u32 chanid)
 757{
 758	chan_tab_t	*ctp;
 759	au1x_dma_chan_t *cp;
 760	int halt_timeout = 0;
 761
 762	ctp = *((chan_tab_t **)chanid);
 763
 764	cp = ctp->chan_ptr;
 765	cp->ddma_cfg &= ~DDMA_CFG_EN;	/* Disable channel */
 766	wmb(); /* drain writebuffer */
 767	while (!(cp->ddma_stat & DDMA_STAT_H)) {
 768		udelay(1);
 769		halt_timeout++;
 770		if (halt_timeout > 100) {
 771			printk(KERN_WARNING "warning: DMA channel won't halt\n");
 772			break;
 773		}
 774	}
 775	/* clear current desc valid and doorbell */
 776	cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
 777	wmb(); /* drain writebuffer */
 778}
 779EXPORT_SYMBOL(au1xxx_dbdma_stop);
 780
 781/*
 782 * Start using the current descriptor pointer.  If the DBDMA encounters
 783 * a non-valid descriptor, it will stop.  In this case, we can just
 784 * continue by adding a buffer to the list and starting again.
 785 */
 786void au1xxx_dbdma_start(u32 chanid)
 787{
 788	chan_tab_t	*ctp;
 789	au1x_dma_chan_t *cp;
 790
 791	ctp = *((chan_tab_t **)chanid);
 792	cp = ctp->chan_ptr;
 793	cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
 794	cp->ddma_cfg |= DDMA_CFG_EN;	/* Enable channel */
 795	wmb(); /* drain writebuffer */
 796	cp->ddma_dbell = 0;
 797	wmb(); /* drain writebuffer */
 798}
 799EXPORT_SYMBOL(au1xxx_dbdma_start);
 800
 801void au1xxx_dbdma_reset(u32 chanid)
 802{
 803	chan_tab_t		*ctp;
 804	au1x_ddma_desc_t	*dp;
 805
 806	au1xxx_dbdma_stop(chanid);
 807
 808	ctp = *((chan_tab_t **)chanid);
 809	ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
 810
 811	/* Run through the descriptors and reset the valid indicator. */
 812	dp = ctp->chan_desc_base;
 813
 814	do {
 815		dp->dscr_cmd0 &= ~DSCR_CMD0_V;
 816		/*
 817		 * Reset our software status -- this is used to determine
 818		 * if a descriptor is in use by upper level software. Since
 819		 * posting can reset 'V' bit.
 820		 */
 821		dp->sw_status = 0;
 822		dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 823	} while (dp != ctp->chan_desc_base);
 824}
 825EXPORT_SYMBOL(au1xxx_dbdma_reset);
 826
 827u32 au1xxx_get_dma_residue(u32 chanid)
 828{
 829	chan_tab_t	*ctp;
 830	au1x_dma_chan_t *cp;
 831	u32		rv;
 832
 833	ctp = *((chan_tab_t **)chanid);
 834	cp = ctp->chan_ptr;
 835
 836	/* This is only valid if the channel is stopped. */
 837	rv = cp->ddma_bytecnt;
 838	wmb(); /* drain writebuffer */
 839
 840	return rv;
 841}
 842EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
 843
 844void au1xxx_dbdma_chan_free(u32 chanid)
 845{
 846	chan_tab_t	*ctp;
 847	dbdev_tab_t	*stp, *dtp;
 848
 849	ctp = *((chan_tab_t **)chanid);
 850	stp = ctp->chan_src;
 851	dtp = ctp->chan_dest;
 852
 853	au1xxx_dbdma_stop(chanid);
 854
 855	kfree((void *)ctp->cdb_membase);
 856
 857	stp->dev_flags &= ~DEV_FLAGS_INUSE;
 858	dtp->dev_flags &= ~DEV_FLAGS_INUSE;
 859	chan_tab_ptr[ctp->chan_index] = NULL;
 860
 861	kfree(ctp);
 862}
 863EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
 864
 865static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
 866{
 867	u32 intstat;
 868	u32 chan_index;
 869	chan_tab_t		*ctp;
 870	au1x_ddma_desc_t	*dp;
 871	au1x_dma_chan_t *cp;
 872
 873	intstat = dbdma_gptr->ddma_intstat;
 874	wmb(); /* drain writebuffer */
 875	chan_index = __ffs(intstat);
 876
 877	ctp = chan_tab_ptr[chan_index];
 878	cp = ctp->chan_ptr;
 879	dp = ctp->cur_ptr;
 880
 881	/* Reset interrupt. */
 882	cp->ddma_irq = 0;
 883	wmb(); /* drain writebuffer */
 884
 885	if (ctp->chan_callback)
 886		ctp->chan_callback(irq, ctp->chan_callparam);
 887
 888	ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 889	return IRQ_RETVAL(1);
 890}
 891
 892void au1xxx_dbdma_dump(u32 chanid)
 893{
 894	chan_tab_t	 *ctp;
 895	au1x_ddma_desc_t *dp;
 896	dbdev_tab_t	 *stp, *dtp;
 897	au1x_dma_chan_t	 *cp;
 898	u32 i		 = 0;
 899
 900	ctp = *((chan_tab_t **)chanid);
 901	stp = ctp->chan_src;
 902	dtp = ctp->chan_dest;
 903	cp = ctp->chan_ptr;
 904
 905	printk(KERN_DEBUG "Chan %x, stp %x (dev %d)  dtp %x (dev %d)\n",
 906			  (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
 907			  dtp - dbdev_tab);
 908	printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
 909			  (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
 910			  (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
 911
 912	printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
 913	printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
 914			  cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
 915	printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
 916			  cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
 917			  cp->ddma_bytecnt);
 918
 919	/* Run through the descriptors */
 920	dp = ctp->chan_desc_base;
 921
 922	do {
 923		printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
 924				  i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
 925		printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
 926				  dp->dscr_source0, dp->dscr_source1,
 927				  dp->dscr_dest0, dp->dscr_dest1);
 928		printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
 929				  dp->dscr_stat, dp->dscr_nxtptr);
 930		dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 931	} while (dp != ctp->chan_desc_base);
 932}
 933
 934/* Put a descriptor into the DMA ring.
 935 * This updates the source/destination pointers and byte count.
 936 */
 937u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
 938{
 939	chan_tab_t *ctp;
 940	au1x_ddma_desc_t *dp;
 941	u32 nbytes = 0;
 942
 943	/*
 944	 * I guess we could check this to be within the
 945	 * range of the table......
 946	 */
 947	ctp = *((chan_tab_t **)chanid);
 948
 949	/*
 950	 * We should have multiple callers for a particular channel,
 951	 * an interrupt doesn't affect this pointer nor the descriptor,
 952	 * so no locking should be needed.
 953	 */
 954	dp = ctp->put_ptr;
 955
 956	/*
 957	 * If the descriptor is valid, we are way ahead of the DMA
 958	 * engine, so just return an error condition.
 959	 */
 960	if (dp->dscr_cmd0 & DSCR_CMD0_V)
 961		return 0;
 962
 963	/* Load up buffer addresses and byte count. */
 964	dp->dscr_dest0 = dscr->dscr_dest0;
 965	dp->dscr_source0 = dscr->dscr_source0;
 966	dp->dscr_dest1 = dscr->dscr_dest1;
 967	dp->dscr_source1 = dscr->dscr_source1;
 968	dp->dscr_cmd1 = dscr->dscr_cmd1;
 969	nbytes = dscr->dscr_cmd1;
 970	/* Allow the caller to specify if an interrupt is generated */
 971	dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 972	dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
 973	ctp->chan_ptr->ddma_dbell = 0;
 974
 975	/* Get next descriptor pointer. */
 976	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 977
 978	/* Return something non-zero. */
 979	return nbytes;
 980}
 981
 982
 983static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
 984
 985static int alchemy_dbdma_suspend(void)
 986{
 987	int i;
 988	void __iomem *addr;
 989
 990	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
 991	alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
 992	alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
 993	alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
 994	alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
 995
 996	/* save channel configurations */
 997	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
 998	for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
 999		alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
1000		alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
1001		alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
1002		alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
1003		alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
1004		alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
1005
1006		/* halt channel */
1007		__raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
1008		wmb();
1009		while (!(__raw_readl(addr + 0x14) & 1))
1010			wmb();
1011
1012		addr += 0x100;	/* next channel base */
1013	}
1014	/* disable channel interrupts */
1015	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1016	__raw_writel(0, addr + 0x0c);
1017	wmb();
1018
1019	return 0;
1020}
1021
1022static void alchemy_dbdma_resume(void)
1023{
1024	int i;
1025	void __iomem *addr;
1026
1027	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1028	__raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
1029	__raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
1030	__raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
1031	__raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
1032
1033	/* restore channel configurations */
1034	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
1035	for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
1036		__raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
1037		__raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
1038		__raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
1039		__raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
1040		__raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
1041		__raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
1042		wmb();
1043		addr += 0x100;	/* next channel base */
1044	}
1045}
1046
1047static struct syscore_ops alchemy_dbdma_syscore_ops = {
1048	.suspend	= alchemy_dbdma_suspend,
1049	.resume		= alchemy_dbdma_resume,
1050};
1051
1052static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
1053{
1054	int ret;
1055
1056	dbdev_tab = kcalloc(DBDEV_TAB_SIZE, sizeof(dbdev_tab_t), GFP_KERNEL);
1057	if (!dbdev_tab)
1058		return -ENOMEM;
1059
1060	memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t));
1061	for (ret = 32; ret < DBDEV_TAB_SIZE; ret++)
1062		dbdev_tab[ret].dev_id = ~0;
1063
1064	dbdma_gptr->ddma_config = 0;
1065	dbdma_gptr->ddma_throttle = 0;
1066	dbdma_gptr->ddma_inten = 0xffff;
1067	wmb(); /* drain writebuffer */
 
 
 
 
 
 
 
 
 
 
 
1068
1069	ret = request_irq(irq, dbdma_interrupt, 0, "dbdma", (void *)dbdma_gptr);
 
1070	if (ret)
1071		printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
1072	else {
1073		dbdma_initialized = 1;
 
1074		register_syscore_ops(&alchemy_dbdma_syscore_ops);
1075	}
1076
1077	return ret;
1078}
 
1079
1080static int __init alchemy_dbdma_init(void)
1081{
1082	switch (alchemy_get_cputype()) {
1083	case ALCHEMY_CPU_AU1550:
1084		return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab);
1085	case ALCHEMY_CPU_AU1200:
1086		return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab);
1087	case ALCHEMY_CPU_AU1300:
1088		return dbdma_setup(AU1300_DDMA_INT, au1300_dbdev_tab);
1089	}
1090	return 0;
1091}
1092subsys_initcall(alchemy_dbdma_init);