Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 *
  3 * BRIEF MODULE DESCRIPTION
  4 *      A DMA channel allocator for Au1x00. API is modeled loosely off of
  5 *      linux/kernel/dma.c.
  6 *
  7 * Copyright 2000, 2008 MontaVista Software Inc.
  8 * Author: MontaVista Software, Inc. <source@mvista.com>
  9 * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
 10 *
 11 *  This program is free software; you can redistribute  it and/or modify it
 12 *  under  the terms of  the GNU General  Public License as published by the
 13 *  Free Software Foundation;  either version 2 of the  License, or (at your
 14 *  option) any later version.
 15 *
 16 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
 17 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
 18 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
 19 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
 20 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 21 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
 22 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 23 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
 24 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 25 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 26 *
 27 *  You should have received a copy of the  GNU General Public License along
 28 *  with this program; if not, write  to the Free Software Foundation, Inc.,
 29 *  675 Mass Ave, Cambridge, MA 02139, USA.
 30 *
 31 */
 32
 33#include <linux/init.h>
 34#include <linux/module.h>
 35#include <linux/kernel.h>
 36#include <linux/errno.h>
 37#include <linux/spinlock.h>
 38#include <linux/interrupt.h>
 39
 40#include <asm/mach-au1x00/au1000.h>
 41#include <asm/mach-au1x00/au1000_dma.h>
 42
 
 
 43/*
 44 * A note on resource allocation:
 45 *
 46 * All drivers needing DMA channels, should allocate and release them
 47 * through the public routines `request_dma()' and `free_dma()'.
 48 *
 49 * In order to avoid problems, all processes should allocate resources in
 50 * the same sequence and release them in the reverse order.
 51 *
 52 * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ.
 53 * When releasing them, first release the IRQ, then release the DMA. The
 54 * main reason for this order is that, if you are requesting the DMA buffer
 55 * done interrupt, you won't know the irq number until the DMA channel is
 56 * returned from request_dma.
 57 */
 58
 59/* DMA Channel register block spacing */
 60#define DMA_CHANNEL_LEN		0x00000100
 61
 62DEFINE_SPINLOCK(au1000_dma_spin_lock);
 63
 64struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = {
 65      {.dev_id = -1,},
 66      {.dev_id = -1,},
 67      {.dev_id = -1,},
 68      {.dev_id = -1,},
 69      {.dev_id = -1,},
 70      {.dev_id = -1,},
 71      {.dev_id = -1,},
 72      {.dev_id = -1,}
 73};
 74EXPORT_SYMBOL(au1000_dma_table);
 75
 76/* Device FIFO addresses and default DMA modes */
 77static const struct dma_dev {
 78	unsigned int fifo_addr;
 79	unsigned int dma_mode;
 80} dma_dev_table[DMA_NUM_DEV] = {
 81	{ AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 },		/* UART0_TX */
 82	{ AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR },	/* UART0_RX */
 83	{ 0, 0 },	/* DMA_REQ0 */
 84	{ 0, 0 },	/* DMA_REQ1 */
 85	{ AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 },		/* AC97 TX c */
 86	{ AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR },	/* AC97 RX c */
 87	{ AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC },	/* UART3_TX */
 88	{ AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */
 89	{ AU1000_USB_UDC_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */
 90	{ AU1000_USB_UDC_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */
 91	{ AU1000_USB_UDC_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */
 92	{ AU1000_USB_UDC_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */
 93	{ AU1000_USB_UDC_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */
 94	{ AU1000_USB_UDC_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */
 95	/* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */
 96	{ AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC},	/* I2S TX */
 97	{ AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */
 98};
 99
100int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
101			 int length, int *eof, void *data)
102{
103	int i, len = 0;
104	struct dma_chan *chan;
105
106	for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) {
107		chan = get_dma_chan(i);
108		if (chan != NULL)
109			len += sprintf(buf + len, "%2d: %s\n",
110				       i, chan->dev_str);
111	}
112
113	if (fpos >= len) {
114		*start = buf;
115		*eof = 1;
116		return 0;
117	}
118	*start = buf + fpos;
119	len -= fpos;
120	if (len > length)
121		return length;
122	*eof = 1;
123	return len;
124}
125
126/* Device FIFO addresses and default DMA modes - 2nd bank */
127static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
128	{ AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 },		/* coherent */
129	{ AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR },	/* coherent */
130	{ AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 },		/* coherent */
131	{ AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }	/* coherent */
132};
133
134void dump_au1000_dma_channel(unsigned int dmanr)
135{
136	struct dma_chan *chan;
137
138	if (dmanr >= NUM_AU1000_DMA_CHANNELS)
139		return;
140	chan = &au1000_dma_table[dmanr];
141
142	printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr);
143	printk(KERN_INFO "  mode = 0x%08x\n",
144	       au_readl(chan->io + DMA_MODE_SET));
145	printk(KERN_INFO "  addr = 0x%08x\n",
146	       au_readl(chan->io + DMA_PERIPHERAL_ADDR));
147	printk(KERN_INFO "  start0 = 0x%08x\n",
148	       au_readl(chan->io + DMA_BUFFER0_START));
149	printk(KERN_INFO "  start1 = 0x%08x\n",
150	       au_readl(chan->io + DMA_BUFFER1_START));
151	printk(KERN_INFO "  count0 = 0x%08x\n",
152	       au_readl(chan->io + DMA_BUFFER0_COUNT));
153	printk(KERN_INFO "  count1 = 0x%08x\n",
154	       au_readl(chan->io + DMA_BUFFER1_COUNT));
155}
156
157/*
158 * Finds a free channel, and binds the requested device to it.
159 * Returns the allocated channel number, or negative on error.
160 * Requests the DMA done IRQ if irqhandler != NULL.
161 */
162int request_au1000_dma(int dev_id, const char *dev_str,
163		       irq_handler_t irqhandler,
164		       unsigned long irqflags,
165		       void *irq_dev_id)
166{
167	struct dma_chan *chan;
168	const struct dma_dev *dev;
169	int i, ret;
170
171	if (alchemy_get_cputype() == ALCHEMY_CPU_AU1100) {
172		if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2))
173			return -EINVAL;
174	} else {
175		if (dev_id < 0 || dev_id >= DMA_NUM_DEV)
176			return -EINVAL;
177	}
178
179	for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
180		if (au1000_dma_table[i].dev_id < 0)
181			break;
182
183	if (i == NUM_AU1000_DMA_CHANNELS)
184		return -ENODEV;
185
186	chan = &au1000_dma_table[i];
187
188	if (dev_id >= DMA_NUM_DEV) {
189		dev_id -= DMA_NUM_DEV;
190		dev = &dma_dev_table_bank2[dev_id];
191	} else
192		dev = &dma_dev_table[dev_id];
193
194	if (irqhandler) {
195		chan->irq_dev = irq_dev_id;
196		ret = request_irq(chan->irq, irqhandler, irqflags, dev_str,
197				  chan->irq_dev);
198		if (ret) {
199			chan->irq_dev = NULL;
200			return ret;
201		}
202	} else {
203		chan->irq_dev = NULL;
204	}
205
206	/* fill it in */
207	chan->io = KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + i * DMA_CHANNEL_LEN;
208	chan->dev_id = dev_id;
209	chan->dev_str = dev_str;
210	chan->fifo_addr = dev->fifo_addr;
211	chan->mode = dev->dma_mode;
212
213	/* initialize the channel before returning */
214	init_dma(i);
215
216	return i;
217}
218EXPORT_SYMBOL(request_au1000_dma);
219
220void free_au1000_dma(unsigned int dmanr)
221{
222	struct dma_chan *chan = get_dma_chan(dmanr);
223
224	if (!chan) {
225		printk(KERN_ERR "Error trying to free DMA%d\n", dmanr);
226		return;
227	}
228
229	disable_dma(dmanr);
230	if (chan->irq_dev)
231		free_irq(chan->irq, chan->irq_dev);
232
233	chan->irq_dev = NULL;
234	chan->dev_id = -1;
235}
236EXPORT_SYMBOL(free_au1000_dma);
237
238static int __init au1000_dma_init(void)
239{
240	int base, i;
241
242	switch (alchemy_get_cputype()) {
243	case ALCHEMY_CPU_AU1000:
244		base = AU1000_DMA_INT_BASE;
245		break;
246	case ALCHEMY_CPU_AU1500:
247		base = AU1500_DMA_INT_BASE;
248		break;
249	case ALCHEMY_CPU_AU1100:
250		base = AU1100_DMA_INT_BASE;
251		break;
252	default:
253		goto out;
254	}
255
256	for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
257		au1000_dma_table[i].irq = base + i;
258
259	printk(KERN_INFO "Alchemy DMA initialized\n");
260
261out:
262	return 0;
263}
264arch_initcall(au1000_dma_init);
v3.1
  1/*
  2 *
  3 * BRIEF MODULE DESCRIPTION
  4 *      A DMA channel allocator for Au1x00. API is modeled loosely off of
  5 *      linux/kernel/dma.c.
  6 *
  7 * Copyright 2000, 2008 MontaVista Software Inc.
  8 * Author: MontaVista Software, Inc. <source@mvista.com>
  9 * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
 10 *
 11 *  This program is free software; you can redistribute  it and/or modify it
 12 *  under  the terms of  the GNU General  Public License as published by the
 13 *  Free Software Foundation;  either version 2 of the  License, or (at your
 14 *  option) any later version.
 15 *
 16 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
 17 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
 18 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
 19 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
 20 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 21 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
 22 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 23 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
 24 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 25 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 26 *
 27 *  You should have received a copy of the  GNU General Public License along
 28 *  with this program; if not, write  to the Free Software Foundation, Inc.,
 29 *  675 Mass Ave, Cambridge, MA 02139, USA.
 30 *
 31 */
 32
 33#include <linux/init.h>
 34#include <linux/module.h>
 35#include <linux/kernel.h>
 36#include <linux/errno.h>
 37#include <linux/spinlock.h>
 38#include <linux/interrupt.h>
 39
 40#include <asm/mach-au1x00/au1000.h>
 41#include <asm/mach-au1x00/au1000_dma.h>
 42
 43#if defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1500) || \
 44    defined(CONFIG_SOC_AU1100)
 45/*
 46 * A note on resource allocation:
 47 *
 48 * All drivers needing DMA channels, should allocate and release them
 49 * through the public routines `request_dma()' and `free_dma()'.
 50 *
 51 * In order to avoid problems, all processes should allocate resources in
 52 * the same sequence and release them in the reverse order.
 53 *
 54 * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ.
 55 * When releasing them, first release the IRQ, then release the DMA. The
 56 * main reason for this order is that, if you are requesting the DMA buffer
 57 * done interrupt, you won't know the irq number until the DMA channel is
 58 * returned from request_dma.
 59 */
 60
 61/* DMA Channel register block spacing */
 62#define DMA_CHANNEL_LEN		0x00000100
 63
 64DEFINE_SPINLOCK(au1000_dma_spin_lock);
 65
 66struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = {
 67      {.dev_id = -1,},
 68      {.dev_id = -1,},
 69      {.dev_id = -1,},
 70      {.dev_id = -1,},
 71      {.dev_id = -1,},
 72      {.dev_id = -1,},
 73      {.dev_id = -1,},
 74      {.dev_id = -1,}
 75};
 76EXPORT_SYMBOL(au1000_dma_table);
 77
 78/* Device FIFO addresses and default DMA modes */
 79static const struct dma_dev {
 80	unsigned int fifo_addr;
 81	unsigned int dma_mode;
 82} dma_dev_table[DMA_NUM_DEV] = {
 83	{ AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 },		/* UART0_TX */
 84	{ AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR },	/* UART0_RX */
 85	{ 0, 0 },	/* DMA_REQ0 */
 86	{ 0, 0 },	/* DMA_REQ1 */
 87	{ AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 },		/* AC97 TX c */
 88	{ AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR },	/* AC97 RX c */
 89	{ AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC },	/* UART3_TX */
 90	{ AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */
 91	{ AU1000_USBD_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */
 92	{ AU1000_USBD_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */
 93	{ AU1000_USBD_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */
 94	{ AU1000_USBD_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */
 95	{ AU1000_USBD_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */
 96	{ AU1000_USBD_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */
 97	/* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */
 98	{ AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC},	/* I2S TX */
 99	{ AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */
100};
101
102int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
103			 int length, int *eof, void *data)
104{
105	int i, len = 0;
106	struct dma_chan *chan;
107
108	for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) {
109		chan = get_dma_chan(i);
110		if (chan != NULL)
111			len += sprintf(buf + len, "%2d: %s\n",
112				       i, chan->dev_str);
113	}
114
115	if (fpos >= len) {
116		*start = buf;
117		*eof = 1;
118		return 0;
119	}
120	*start = buf + fpos;
121	len -= fpos;
122	if (len > length)
123		return length;
124	*eof = 1;
125	return len;
126}
127
128/* Device FIFO addresses and default DMA modes - 2nd bank */
129static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
130	{ AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 },		/* coherent */
131	{ AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR },	/* coherent */
132	{ AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 },		/* coherent */
133	{ AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }	/* coherent */
134};
135
136void dump_au1000_dma_channel(unsigned int dmanr)
137{
138	struct dma_chan *chan;
139
140	if (dmanr >= NUM_AU1000_DMA_CHANNELS)
141		return;
142	chan = &au1000_dma_table[dmanr];
143
144	printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr);
145	printk(KERN_INFO "  mode = 0x%08x\n",
146	       au_readl(chan->io + DMA_MODE_SET));
147	printk(KERN_INFO "  addr = 0x%08x\n",
148	       au_readl(chan->io + DMA_PERIPHERAL_ADDR));
149	printk(KERN_INFO "  start0 = 0x%08x\n",
150	       au_readl(chan->io + DMA_BUFFER0_START));
151	printk(KERN_INFO "  start1 = 0x%08x\n",
152	       au_readl(chan->io + DMA_BUFFER1_START));
153	printk(KERN_INFO "  count0 = 0x%08x\n",
154	       au_readl(chan->io + DMA_BUFFER0_COUNT));
155	printk(KERN_INFO "  count1 = 0x%08x\n",
156	       au_readl(chan->io + DMA_BUFFER1_COUNT));
157}
158
159/*
160 * Finds a free channel, and binds the requested device to it.
161 * Returns the allocated channel number, or negative on error.
162 * Requests the DMA done IRQ if irqhandler != NULL.
163 */
164int request_au1000_dma(int dev_id, const char *dev_str,
165		       irq_handler_t irqhandler,
166		       unsigned long irqflags,
167		       void *irq_dev_id)
168{
169	struct dma_chan *chan;
170	const struct dma_dev *dev;
171	int i, ret;
172
173#if defined(CONFIG_SOC_AU1100)
174	if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2))
175		return -EINVAL;
176#else
177	if (dev_id < 0 || dev_id >= DMA_NUM_DEV)
178		return -EINVAL;
179#endif
180
181	for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
182		if (au1000_dma_table[i].dev_id < 0)
183			break;
184
185	if (i == NUM_AU1000_DMA_CHANNELS)
186		return -ENODEV;
187
188	chan = &au1000_dma_table[i];
189
190	if (dev_id >= DMA_NUM_DEV) {
191		dev_id -= DMA_NUM_DEV;
192		dev = &dma_dev_table_bank2[dev_id];
193	} else
194		dev = &dma_dev_table[dev_id];
195
196	if (irqhandler) {
197		chan->irq_dev = irq_dev_id;
198		ret = request_irq(chan->irq, irqhandler, irqflags, dev_str,
199				  chan->irq_dev);
200		if (ret) {
201			chan->irq_dev = NULL;
202			return ret;
203		}
204	} else {
205		chan->irq_dev = NULL;
206	}
207
208	/* fill it in */
209	chan->io = KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + i * DMA_CHANNEL_LEN;
210	chan->dev_id = dev_id;
211	chan->dev_str = dev_str;
212	chan->fifo_addr = dev->fifo_addr;
213	chan->mode = dev->dma_mode;
214
215	/* initialize the channel before returning */
216	init_dma(i);
217
218	return i;
219}
220EXPORT_SYMBOL(request_au1000_dma);
221
222void free_au1000_dma(unsigned int dmanr)
223{
224	struct dma_chan *chan = get_dma_chan(dmanr);
225
226	if (!chan) {
227		printk(KERN_ERR "Error trying to free DMA%d\n", dmanr);
228		return;
229	}
230
231	disable_dma(dmanr);
232	if (chan->irq_dev)
233		free_irq(chan->irq, chan->irq_dev);
234
235	chan->irq_dev = NULL;
236	chan->dev_id = -1;
237}
238EXPORT_SYMBOL(free_au1000_dma);
239
240static int __init au1000_dma_init(void)
241{
242        int base, i;
243
244        switch (alchemy_get_cputype()) {
245        case ALCHEMY_CPU_AU1000:
246                base = AU1000_DMA_INT_BASE;
247                break;
248        case ALCHEMY_CPU_AU1500:
249                base = AU1500_DMA_INT_BASE;
250                break;
251        case ALCHEMY_CPU_AU1100:
252                base = AU1100_DMA_INT_BASE;
253                break;
254        default:
255                goto out;
256        }
257
258        for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
259                au1000_dma_table[i].irq = base + i;
260
261        printk(KERN_INFO "Alchemy DMA initialized\n");
262
263out:
264        return 0;
265}
266arch_initcall(au1000_dma_init);
267
268#endif /* AU1000 AU1500 AU1100 */