Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1/*
   2 * Copyright 2003-2006 Simtec Electronics
   3 *	Ben Dooks <ben@simtec.co.uk>
   4 *
   5 * S3C2410 DMA core
   6 *
   7 * http://armlinux.simtec.co.uk/
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12*/
  13
  14
  15#ifdef CONFIG_S3C2410_DMA_DEBUG
  16#define DEBUG
  17#endif
  18
  19#include <linux/module.h>
  20#include <linux/init.h>
  21#include <linux/sched.h>
  22#include <linux/spinlock.h>
  23#include <linux/interrupt.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/slab.h>
  26#include <linux/errno.h>
  27#include <linux/io.h>
  28
  29#include <asm/irq.h>
  30#include <mach/hardware.h>
  31#include <mach/dma.h>
  32#include <mach/map.h>
  33
  34#include <plat/dma-s3c24xx.h>
  35#include <plat/regs-dma.h>
  36
  37/* io map for dma */
  38static void __iomem *dma_base;
  39static struct kmem_cache *dma_kmem;
  40
  41static int dma_channels;
  42
  43static struct s3c24xx_dma_selection dma_sel;
  44
  45
  46/* debugging functions */
  47
  48#define BUF_MAGIC (0xcafebabe)
  49
  50#define dmawarn(fmt...) printk(KERN_DEBUG fmt)
  51
  52#define dma_regaddr(chan, reg) ((chan)->regs + (reg))
  53
  54#if 1
  55#define dma_wrreg(chan, reg, val) writel((val), (chan)->regs + (reg))
  56#else
  57static inline void
  58dma_wrreg(struct s3c2410_dma_chan *chan, int reg, unsigned long val)
  59{
  60	pr_debug("writing %08x to register %08x\n",(unsigned int)val,reg);
  61	writel(val, dma_regaddr(chan, reg));
  62}
  63#endif
  64
  65#define dma_rdreg(chan, reg) readl((chan)->regs + (reg))
  66
  67/* captured register state for debug */
  68
  69struct s3c2410_dma_regstate {
  70	unsigned long         dcsrc;
  71	unsigned long         disrc;
  72	unsigned long         dstat;
  73	unsigned long         dcon;
  74	unsigned long         dmsktrig;
  75};
  76
  77#ifdef CONFIG_S3C2410_DMA_DEBUG
  78
  79/* dmadbg_showregs
  80 *
  81 * simple debug routine to print the current state of the dma registers
  82*/
  83
  84static void
  85dmadbg_capture(struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs)
  86{
  87	regs->dcsrc    = dma_rdreg(chan, S3C2410_DMA_DCSRC);
  88	regs->disrc    = dma_rdreg(chan, S3C2410_DMA_DISRC);
  89	regs->dstat    = dma_rdreg(chan, S3C2410_DMA_DSTAT);
  90	regs->dcon     = dma_rdreg(chan, S3C2410_DMA_DCON);
  91	regs->dmsktrig = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
  92}
  93
  94static void
  95dmadbg_dumpregs(const char *fname, int line, struct s3c2410_dma_chan *chan,
  96		 struct s3c2410_dma_regstate *regs)
  97{
  98	printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
  99	       chan->number, fname, line,
 100	       regs->dcsrc, regs->disrc, regs->dstat, regs->dmsktrig,
 101	       regs->dcon);
 102}
 103
 104static void
 105dmadbg_showchan(const char *fname, int line, struct s3c2410_dma_chan *chan)
 106{
 107	struct s3c2410_dma_regstate state;
 108
 109	dmadbg_capture(chan, &state);
 110
 111	printk(KERN_DEBUG "dma%d: %s:%d: ls=%d, cur=%p, %p %p\n",
 112	       chan->number, fname, line, chan->load_state,
 113	       chan->curr, chan->next, chan->end);
 114
 115	dmadbg_dumpregs(fname, line, chan, &state);
 116}
 117
 118static void
 119dmadbg_showregs(const char *fname, int line, struct s3c2410_dma_chan *chan)
 120{
 121	struct s3c2410_dma_regstate state;
 122
 123	dmadbg_capture(chan, &state);
 124	dmadbg_dumpregs(fname, line, chan, &state);
 125}
 126
 127#define dbg_showregs(chan) dmadbg_showregs(__func__, __LINE__, (chan))
 128#define dbg_showchan(chan) dmadbg_showchan(__func__, __LINE__, (chan))
 129#else
 130#define dbg_showregs(chan) do { } while(0)
 131#define dbg_showchan(chan) do { } while(0)
 132#endif /* CONFIG_S3C2410_DMA_DEBUG */
 133
 134/* s3c2410_dma_stats_timeout
 135 *
 136 * Update DMA stats from timeout info
 137*/
 138
 139static void
 140s3c2410_dma_stats_timeout(struct s3c2410_dma_stats *stats, int val)
 141{
 142	if (stats == NULL)
 143		return;
 144
 145	if (val > stats->timeout_longest)
 146		stats->timeout_longest = val;
 147	if (val < stats->timeout_shortest)
 148		stats->timeout_shortest = val;
 149
 150	stats->timeout_avg += val;
 151}
 152
 153/* s3c2410_dma_waitforload
 154 *
 155 * wait for the DMA engine to load a buffer, and update the state accordingly
 156*/
 157
 158static int
 159s3c2410_dma_waitforload(struct s3c2410_dma_chan *chan, int line)
 160{
 161	int timeout = chan->load_timeout;
 162	int took;
 163
 164	if (chan->load_state != S3C2410_DMALOAD_1LOADED) {
 165		printk(KERN_ERR "dma%d: s3c2410_dma_waitforload() called in loadstate %d from line %d\n", chan->number, chan->load_state, line);
 166		return 0;
 167	}
 168
 169	if (chan->stats != NULL)
 170		chan->stats->loads++;
 171
 172	while (--timeout > 0) {
 173		if ((dma_rdreg(chan, S3C2410_DMA_DSTAT) << (32-20)) != 0) {
 174			took = chan->load_timeout - timeout;
 175
 176			s3c2410_dma_stats_timeout(chan->stats, took);
 177
 178			switch (chan->load_state) {
 179			case S3C2410_DMALOAD_1LOADED:
 180				chan->load_state = S3C2410_DMALOAD_1RUNNING;
 181				break;
 182
 183			default:
 184				printk(KERN_ERR "dma%d: unknown load_state in s3c2410_dma_waitforload() %d\n", chan->number, chan->load_state);
 185			}
 186
 187			return 1;
 188		}
 189	}
 190
 191	if (chan->stats != NULL) {
 192		chan->stats->timeout_failed++;
 193	}
 194
 195	return 0;
 196}
 197
 198/* s3c2410_dma_loadbuffer
 199 *
 200 * load a buffer, and update the channel state
 201*/
 202
 203static inline int
 204s3c2410_dma_loadbuffer(struct s3c2410_dma_chan *chan,
 205		       struct s3c2410_dma_buf *buf)
 206{
 207	unsigned long reload;
 208
 209	if (buf == NULL) {
 210		dmawarn("buffer is NULL\n");
 211		return -EINVAL;
 212	}
 213
 214	pr_debug("s3c2410_chan_loadbuffer: loading buff %p (0x%08lx,0x%06x)\n",
 215		 buf, (unsigned long)buf->data, buf->size);
 216
 217	/* check the state of the channel before we do anything */
 218
 219	if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
 220		dmawarn("load_state is S3C2410_DMALOAD_1LOADED\n");
 221	}
 222
 223	if (chan->load_state == S3C2410_DMALOAD_1LOADED_1RUNNING) {
 224		dmawarn("state is S3C2410_DMALOAD_1LOADED_1RUNNING\n");
 225	}
 226
 227	/* it would seem sensible if we are the last buffer to not bother
 228	 * with the auto-reload bit, so that the DMA engine will not try
 229	 * and load another transfer after this one has finished...
 230	 */
 231	if (chan->load_state == S3C2410_DMALOAD_NONE) {
 232		pr_debug("load_state is none, checking for noreload (next=%p)\n",
 233			 buf->next);
 234		reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0;
 235	} else {
 236		//pr_debug("load_state is %d => autoreload\n", chan->load_state);
 237		reload = S3C2410_DCON_AUTORELOAD;
 238	}
 239
 240	if ((buf->data & 0xf0000000) != 0x30000000) {
 241		dmawarn("dmaload: buffer is %p\n", (void *)buf->data);
 242	}
 243
 244	writel(buf->data, chan->addr_reg);
 245
 246	dma_wrreg(chan, S3C2410_DMA_DCON,
 247		  chan->dcon | reload | (buf->size/chan->xfer_unit));
 248
 249	chan->next = buf->next;
 250
 251	/* update the state of the channel */
 252
 253	switch (chan->load_state) {
 254	case S3C2410_DMALOAD_NONE:
 255		chan->load_state = S3C2410_DMALOAD_1LOADED;
 256		break;
 257
 258	case S3C2410_DMALOAD_1RUNNING:
 259		chan->load_state = S3C2410_DMALOAD_1LOADED_1RUNNING;
 260		break;
 261
 262	default:
 263		dmawarn("dmaload: unknown state %d in loadbuffer\n",
 264			chan->load_state);
 265		break;
 266	}
 267
 268	return 0;
 269}
 270
 271/* s3c2410_dma_call_op
 272 *
 273 * small routine to call the op routine with the given op if it has been
 274 * registered
 275*/
 276
 277static void
 278s3c2410_dma_call_op(struct s3c2410_dma_chan *chan, enum s3c2410_chan_op op)
 279{
 280	if (chan->op_fn != NULL) {
 281		(chan->op_fn)(chan, op);
 282	}
 283}
 284
 285/* s3c2410_dma_buffdone
 286 *
 287 * small wrapper to check if callback routine needs to be called, and
 288 * if so, call it
 289*/
 290
 291static inline void
 292s3c2410_dma_buffdone(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf,
 293		     enum s3c2410_dma_buffresult result)
 294{
 295#if 0
 296	pr_debug("callback_fn=%p, buf=%p, id=%p, size=%d, result=%d\n",
 297		 chan->callback_fn, buf, buf->id, buf->size, result);
 298#endif
 299
 300	if (chan->callback_fn != NULL) {
 301		(chan->callback_fn)(chan, buf->id, buf->size, result);
 302	}
 303}
 304
 305/* s3c2410_dma_start
 306 *
 307 * start a dma channel going
 308*/
 309
 310static int s3c2410_dma_start(struct s3c2410_dma_chan *chan)
 311{
 312	unsigned long tmp;
 313	unsigned long flags;
 314
 315	pr_debug("s3c2410_start_dma: channel=%d\n", chan->number);
 316
 317	local_irq_save(flags);
 318
 319	if (chan->state == S3C2410_DMA_RUNNING) {
 320		pr_debug("s3c2410_start_dma: already running (%d)\n", chan->state);
 321		local_irq_restore(flags);
 322		return 0;
 323	}
 324
 325	chan->state = S3C2410_DMA_RUNNING;
 326
 327	/* check whether there is anything to load, and if not, see
 328	 * if we can find anything to load
 329	 */
 330
 331	if (chan->load_state == S3C2410_DMALOAD_NONE) {
 332		if (chan->next == NULL) {
 333			printk(KERN_ERR "dma%d: channel has nothing loaded\n",
 334			       chan->number);
 335			chan->state = S3C2410_DMA_IDLE;
 336			local_irq_restore(flags);
 337			return -EINVAL;
 338		}
 339
 340		s3c2410_dma_loadbuffer(chan, chan->next);
 341	}
 342
 343	dbg_showchan(chan);
 344
 345	/* enable the channel */
 346
 347	if (!chan->irq_enabled) {
 348		enable_irq(chan->irq);
 349		chan->irq_enabled = 1;
 350	}
 351
 352	/* start the channel going */
 353
 354	tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
 355	tmp &= ~S3C2410_DMASKTRIG_STOP;
 356	tmp |= S3C2410_DMASKTRIG_ON;
 357	dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
 358
 359	pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp);
 360
 361#if 0
 362	/* the dma buffer loads should take care of clearing the AUTO
 363	 * reloading feature */
 364	tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
 365	tmp &= ~S3C2410_DCON_NORELOAD;
 366	dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
 367#endif
 368
 369	s3c2410_dma_call_op(chan, S3C2410_DMAOP_START);
 370
 371	dbg_showchan(chan);
 372
 373	/* if we've only loaded one buffer onto the channel, then chec
 374	 * to see if we have another, and if so, try and load it so when
 375	 * the first buffer is finished, the new one will be loaded onto
 376	 * the channel */
 377
 378	if (chan->next != NULL) {
 379		if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
 380
 381			if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
 382				pr_debug("%s: buff not yet loaded, no more todo\n",
 383					 __func__);
 384			} else {
 385				chan->load_state = S3C2410_DMALOAD_1RUNNING;
 386				s3c2410_dma_loadbuffer(chan, chan->next);
 387			}
 388
 389		} else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
 390			s3c2410_dma_loadbuffer(chan, chan->next);
 391		}
 392	}
 393
 394
 395	local_irq_restore(flags);
 396
 397	return 0;
 398}
 399
 400/* s3c2410_dma_canload
 401 *
 402 * work out if we can queue another buffer into the DMA engine
 403*/
 404
 405static int
 406s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
 407{
 408	if (chan->load_state == S3C2410_DMALOAD_NONE ||
 409	    chan->load_state == S3C2410_DMALOAD_1RUNNING)
 410		return 1;
 411
 412	return 0;
 413}
 414
 415/* s3c2410_dma_enqueue
 416 *
 417 * queue an given buffer for dma transfer.
 418 *
 419 * id         the device driver's id information for this buffer
 420 * data       the physical address of the buffer data
 421 * size       the size of the buffer in bytes
 422 *
 423 * If the channel is not running, then the flag S3C2410_DMAF_AUTOSTART
 424 * is checked, and if set, the channel is started. If this flag isn't set,
 425 * then an error will be returned.
 426 *
 427 * It is possible to queue more than one DMA buffer onto a channel at
 428 * once, and the code will deal with the re-loading of the next buffer
 429 * when necessary.
 430*/
 431
 432int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
 433			dma_addr_t data, int size)
 434{
 435	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 436	struct s3c2410_dma_buf *buf;
 437	unsigned long flags;
 438
 439	if (chan == NULL)
 440		return -EINVAL;
 441
 442	pr_debug("%s: id=%p, data=%08x, size=%d\n",
 443		 __func__, id, (unsigned int)data, size);
 444
 445	buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
 446	if (buf == NULL) {
 447		pr_debug("%s: out of memory (%ld alloc)\n",
 448			 __func__, (long)sizeof(*buf));
 449		return -ENOMEM;
 450	}
 451
 452	//pr_debug("%s: new buffer %p\n", __func__, buf);
 453	//dbg_showchan(chan);
 454
 455	buf->next  = NULL;
 456	buf->data  = buf->ptr = data;
 457	buf->size  = size;
 458	buf->id    = id;
 459	buf->magic = BUF_MAGIC;
 460
 461	local_irq_save(flags);
 462
 463	if (chan->curr == NULL) {
 464		/* we've got nothing loaded... */
 465		pr_debug("%s: buffer %p queued onto empty channel\n",
 466			 __func__, buf);
 467
 468		chan->curr = buf;
 469		chan->end  = buf;
 470		chan->next = NULL;
 471	} else {
 472		pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
 473			 chan->number, __func__, buf);
 474
 475		if (chan->end == NULL) {
 476			pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
 477				 chan->number, __func__, chan);
 478		} else {
 479			chan->end->next = buf;
 480			chan->end = buf;
 481		}
 482	}
 483
 484	/* if necessary, update the next buffer field */
 485	if (chan->next == NULL)
 486		chan->next = buf;
 487
 488	/* check to see if we can load a buffer */
 489	if (chan->state == S3C2410_DMA_RUNNING) {
 490		if (chan->load_state == S3C2410_DMALOAD_1LOADED && 1) {
 491			if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
 492				printk(KERN_ERR "dma%d: loadbuffer:"
 493				       "timeout loading buffer\n",
 494				       chan->number);
 495				dbg_showchan(chan);
 496				local_irq_restore(flags);
 497				return -EINVAL;
 498			}
 499		}
 500
 501		while (s3c2410_dma_canload(chan) && chan->next != NULL) {
 502			s3c2410_dma_loadbuffer(chan, chan->next);
 503		}
 504	} else if (chan->state == S3C2410_DMA_IDLE) {
 505		if (chan->flags & S3C2410_DMAF_AUTOSTART) {
 506			s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL,
 507					 S3C2410_DMAOP_START);
 508		}
 509	}
 510
 511	local_irq_restore(flags);
 512	return 0;
 513}
 514
 515EXPORT_SYMBOL(s3c2410_dma_enqueue);
 516
 517static inline void
 518s3c2410_dma_freebuf(struct s3c2410_dma_buf *buf)
 519{
 520	int magicok = (buf->magic == BUF_MAGIC);
 521
 522	buf->magic = -1;
 523
 524	if (magicok) {
 525		kmem_cache_free(dma_kmem, buf);
 526	} else {
 527		printk("s3c2410_dma_freebuf: buff %p with bad magic\n", buf);
 528	}
 529}
 530
 531/* s3c2410_dma_lastxfer
 532 *
 533 * called when the system is out of buffers, to ensure that the channel
 534 * is prepared for shutdown.
 535*/
 536
 537static inline void
 538s3c2410_dma_lastxfer(struct s3c2410_dma_chan *chan)
 539{
 540#if 0
 541	pr_debug("dma%d: s3c2410_dma_lastxfer: load_state %d\n",
 542		 chan->number, chan->load_state);
 543#endif
 544
 545	switch (chan->load_state) {
 546	case S3C2410_DMALOAD_NONE:
 547		break;
 548
 549	case S3C2410_DMALOAD_1LOADED:
 550		if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
 551				/* flag error? */
 552			printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
 553			       chan->number, __func__);
 554			return;
 555		}
 556		break;
 557
 558	case S3C2410_DMALOAD_1LOADED_1RUNNING:
 559		/* I believe in this case we do not have anything to do
 560		 * until the next buffer comes along, and we turn off the
 561		 * reload */
 562		return;
 563
 564	default:
 565		pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n",
 566			 chan->number, chan->load_state);
 567		return;
 568
 569	}
 570
 571	/* hopefully this'll shut the damned thing up after the transfer... */
 572	dma_wrreg(chan, S3C2410_DMA_DCON, chan->dcon | S3C2410_DCON_NORELOAD);
 573}
 574
 575
 576#define dmadbg2(x...)
 577
 578static irqreturn_t
 579s3c2410_dma_irq(int irq, void *devpw)
 580{
 581	struct s3c2410_dma_chan *chan = (struct s3c2410_dma_chan *)devpw;
 582	struct s3c2410_dma_buf  *buf;
 583
 584	buf = chan->curr;
 585
 586	dbg_showchan(chan);
 587
 588	/* modify the channel state */
 589
 590	switch (chan->load_state) {
 591	case S3C2410_DMALOAD_1RUNNING:
 592		/* TODO - if we are running only one buffer, we probably
 593		 * want to reload here, and then worry about the buffer
 594		 * callback */
 595
 596		chan->load_state = S3C2410_DMALOAD_NONE;
 597		break;
 598
 599	case S3C2410_DMALOAD_1LOADED:
 600		/* iirc, we should go back to NONE loaded here, we
 601		 * had a buffer, and it was never verified as being
 602		 * loaded.
 603		 */
 604
 605		chan->load_state = S3C2410_DMALOAD_NONE;
 606		break;
 607
 608	case S3C2410_DMALOAD_1LOADED_1RUNNING:
 609		/* we'll worry about checking to see if another buffer is
 610		 * ready after we've called back the owner. This should
 611		 * ensure we do not wait around too long for the DMA
 612		 * engine to start the next transfer
 613		 */
 614
 615		chan->load_state = S3C2410_DMALOAD_1LOADED;
 616		break;
 617
 618	case S3C2410_DMALOAD_NONE:
 619		printk(KERN_ERR "dma%d: IRQ with no loaded buffer?\n",
 620		       chan->number);
 621		break;
 622
 623	default:
 624		printk(KERN_ERR "dma%d: IRQ in invalid load_state %d\n",
 625		       chan->number, chan->load_state);
 626		break;
 627	}
 628
 629	if (buf != NULL) {
 630		/* update the chain to make sure that if we load any more
 631		 * buffers when we call the callback function, things should
 632		 * work properly */
 633
 634		chan->curr = buf->next;
 635		buf->next  = NULL;
 636
 637		if (buf->magic != BUF_MAGIC) {
 638			printk(KERN_ERR "dma%d: %s: buf %p incorrect magic\n",
 639			       chan->number, __func__, buf);
 640			return IRQ_HANDLED;
 641		}
 642
 643		s3c2410_dma_buffdone(chan, buf, S3C2410_RES_OK);
 644
 645		/* free resouces */
 646		s3c2410_dma_freebuf(buf);
 647	} else {
 648	}
 649
 650	/* only reload if the channel is still running... our buffer done
 651	 * routine may have altered the state by requesting the dma channel
 652	 * to stop or shutdown... */
 653
 654	/* todo: check that when the channel is shut-down from inside this
 655	 * function, we cope with unsetting reload, etc */
 656
 657	if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) {
 658		unsigned long flags;
 659
 660		switch (chan->load_state) {
 661		case S3C2410_DMALOAD_1RUNNING:
 662			/* don't need to do anything for this state */
 663			break;
 664
 665		case S3C2410_DMALOAD_NONE:
 666			/* can load buffer immediately */
 667			break;
 668
 669		case S3C2410_DMALOAD_1LOADED:
 670			if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
 671				/* flag error? */
 672				printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
 673				       chan->number, __func__);
 674				return IRQ_HANDLED;
 675			}
 676
 677			break;
 678
 679		case S3C2410_DMALOAD_1LOADED_1RUNNING:
 680			goto no_load;
 681
 682		default:
 683			printk(KERN_ERR "dma%d: unknown load_state in irq, %d\n",
 684			       chan->number, chan->load_state);
 685			return IRQ_HANDLED;
 686		}
 687
 688		local_irq_save(flags);
 689		s3c2410_dma_loadbuffer(chan, chan->next);
 690		local_irq_restore(flags);
 691	} else {
 692		s3c2410_dma_lastxfer(chan);
 693
 694		/* see if we can stop this channel.. */
 695		if (chan->load_state == S3C2410_DMALOAD_NONE) {
 696			pr_debug("dma%d: end of transfer, stopping channel (%ld)\n",
 697				 chan->number, jiffies);
 698			s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL,
 699					 S3C2410_DMAOP_STOP);
 700		}
 701	}
 702
 703 no_load:
 704	return IRQ_HANDLED;
 705}
 706
 707static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel);
 708
 709/* s3c2410_request_dma
 710 *
 711 * get control of an dma channel
 712*/
 713
 714int s3c2410_dma_request(enum dma_ch channel,
 715			struct s3c2410_dma_client *client,
 716			void *dev)
 717{
 718	struct s3c2410_dma_chan *chan;
 719	unsigned long flags;
 720	int err;
 721
 722	pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
 723		 channel, client->name, dev);
 724
 725	local_irq_save(flags);
 726
 727	chan = s3c2410_dma_map_channel(channel);
 728	if (chan == NULL) {
 729		local_irq_restore(flags);
 730		return -EBUSY;
 731	}
 732
 733	dbg_showchan(chan);
 734
 735	chan->client = client;
 736	chan->in_use = 1;
 737
 738	if (!chan->irq_claimed) {
 739		pr_debug("dma%d: %s : requesting irq %d\n",
 740			 channel, __func__, chan->irq);
 741
 742		chan->irq_claimed = 1;
 743		local_irq_restore(flags);
 744
 745		err = request_irq(chan->irq, s3c2410_dma_irq, 0,
 746				  client->name, (void *)chan);
 747
 748		local_irq_save(flags);
 749
 750		if (err) {
 751			chan->in_use = 0;
 752			chan->irq_claimed = 0;
 753			local_irq_restore(flags);
 754
 755			printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n",
 756			       client->name, chan->irq, chan->number);
 757			return err;
 758		}
 759
 760		chan->irq_enabled = 1;
 761	}
 762
 763	local_irq_restore(flags);
 764
 765	/* need to setup */
 766
 767	pr_debug("%s: channel initialised, %p\n", __func__, chan);
 768
 769	return chan->number | DMACH_LOW_LEVEL;
 770}
 771
 772EXPORT_SYMBOL(s3c2410_dma_request);
 773
 774/* s3c2410_dma_free
 775 *
 776 * release the given channel back to the system, will stop and flush
 777 * any outstanding transfers, and ensure the channel is ready for the
 778 * next claimant.
 779 *
 780 * Note, although a warning is currently printed if the freeing client
 781 * info is not the same as the registrant's client info, the free is still
 782 * allowed to go through.
 783*/
 784
 785int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
 786{
 787	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 788	unsigned long flags;
 789
 790	if (chan == NULL)
 791		return -EINVAL;
 792
 793	local_irq_save(flags);
 794
 795	if (chan->client != client) {
 796		printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
 797		       channel, chan->client, client);
 798	}
 799
 800	/* sort out stopping and freeing the channel */
 801
 802	if (chan->state != S3C2410_DMA_IDLE) {
 803		pr_debug("%s: need to stop dma channel %p\n",
 804		       __func__, chan);
 805
 806		/* possibly flush the channel */
 807		s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STOP);
 808	}
 809
 810	chan->client = NULL;
 811	chan->in_use = 0;
 812
 813	if (chan->irq_claimed)
 814		free_irq(chan->irq, (void *)chan);
 815
 816	chan->irq_claimed = 0;
 817
 818	if (!(channel & DMACH_LOW_LEVEL))
 819		s3c_dma_chan_map[channel] = NULL;
 820
 821	local_irq_restore(flags);
 822
 823	return 0;
 824}
 825
 826EXPORT_SYMBOL(s3c2410_dma_free);
 827
 828static int s3c2410_dma_dostop(struct s3c2410_dma_chan *chan)
 829{
 830	unsigned long flags;
 831	unsigned long tmp;
 832
 833	pr_debug("%s:\n", __func__);
 834
 835	dbg_showchan(chan);
 836
 837	local_irq_save(flags);
 838
 839	s3c2410_dma_call_op(chan,  S3C2410_DMAOP_STOP);
 840
 841	tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
 842	tmp |= S3C2410_DMASKTRIG_STOP;
 843	//tmp &= ~S3C2410_DMASKTRIG_ON;
 844	dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
 845
 846#if 0
 847	/* should also clear interrupts, according to WinCE BSP */
 848	tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
 849	tmp |= S3C2410_DCON_NORELOAD;
 850	dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
 851#endif
 852
 853	/* should stop do this, or should we wait for flush? */
 854	chan->state      = S3C2410_DMA_IDLE;
 855	chan->load_state = S3C2410_DMALOAD_NONE;
 856
 857	local_irq_restore(flags);
 858
 859	return 0;
 860}
 861
 862static void s3c2410_dma_waitforstop(struct s3c2410_dma_chan *chan)
 863{
 864	unsigned long tmp;
 865	unsigned int timeout = 0x10000;
 866
 867	while (timeout-- > 0) {
 868		tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
 869
 870		if (!(tmp & S3C2410_DMASKTRIG_ON))
 871			return;
 872	}
 873
 874	pr_debug("dma%d: failed to stop?\n", chan->number);
 875}
 876
 877
 878/* s3c2410_dma_flush
 879 *
 880 * stop the channel, and remove all current and pending transfers
 881*/
 882
 883static int s3c2410_dma_flush(struct s3c2410_dma_chan *chan)
 884{
 885	struct s3c2410_dma_buf *buf, *next;
 886	unsigned long flags;
 887
 888	pr_debug("%s: chan %p (%d)\n", __func__, chan, chan->number);
 889
 890	dbg_showchan(chan);
 891
 892	local_irq_save(flags);
 893
 894	if (chan->state != S3C2410_DMA_IDLE) {
 895		pr_debug("%s: stopping channel...\n", __func__ );
 896		s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP);
 897	}
 898
 899	buf = chan->curr;
 900	if (buf == NULL)
 901		buf = chan->next;
 902
 903	chan->curr = chan->next = chan->end = NULL;
 904
 905	if (buf != NULL) {
 906		for ( ; buf != NULL; buf = next) {
 907			next = buf->next;
 908
 909			pr_debug("%s: free buffer %p, next %p\n",
 910			       __func__, buf, buf->next);
 911
 912			s3c2410_dma_buffdone(chan, buf, S3C2410_RES_ABORT);
 913			s3c2410_dma_freebuf(buf);
 914		}
 915	}
 916
 917	dbg_showregs(chan);
 918
 919	s3c2410_dma_waitforstop(chan);
 920
 921#if 0
 922	/* should also clear interrupts, according to WinCE BSP */
 923	{
 924		unsigned long tmp;
 925
 926		tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
 927		tmp |= S3C2410_DCON_NORELOAD;
 928		dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
 929	}
 930#endif
 931
 932	dbg_showregs(chan);
 933
 934	local_irq_restore(flags);
 935
 936	return 0;
 937}
 938
 939static int s3c2410_dma_started(struct s3c2410_dma_chan *chan)
 940{
 941	unsigned long flags;
 942
 943	local_irq_save(flags);
 944
 945	dbg_showchan(chan);
 946
 947	/* if we've only loaded one buffer onto the channel, then chec
 948	 * to see if we have another, and if so, try and load it so when
 949	 * the first buffer is finished, the new one will be loaded onto
 950	 * the channel */
 951
 952	if (chan->next != NULL) {
 953		if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
 954
 955			if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
 956				pr_debug("%s: buff not yet loaded, no more todo\n",
 957					 __func__);
 958			} else {
 959				chan->load_state = S3C2410_DMALOAD_1RUNNING;
 960				s3c2410_dma_loadbuffer(chan, chan->next);
 961			}
 962
 963		} else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
 964			s3c2410_dma_loadbuffer(chan, chan->next);
 965		}
 966	}
 967
 968
 969	local_irq_restore(flags);
 970
 971	return 0;
 972
 973}
 974
 975int
 976s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
 977{
 978	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
 979
 980	if (chan == NULL)
 981		return -EINVAL;
 982
 983	switch (op) {
 984	case S3C2410_DMAOP_START:
 985		return s3c2410_dma_start(chan);
 986
 987	case S3C2410_DMAOP_STOP:
 988		return s3c2410_dma_dostop(chan);
 989
 990	case S3C2410_DMAOP_PAUSE:
 991	case S3C2410_DMAOP_RESUME:
 992		return -ENOENT;
 993
 994	case S3C2410_DMAOP_FLUSH:
 995		return s3c2410_dma_flush(chan);
 996
 997	case S3C2410_DMAOP_STARTED:
 998		return s3c2410_dma_started(chan);
 999
1000	case S3C2410_DMAOP_TIMEOUT:
1001		return 0;
1002
1003	}
1004
1005	return -ENOENT;      /* unknown, don't bother */
1006}
1007
1008EXPORT_SYMBOL(s3c2410_dma_ctrl);
1009
1010/* DMA configuration for each channel
1011 *
1012 * DISRCC -> source of the DMA (AHB,APB)
1013 * DISRC  -> source address of the DMA
1014 * DIDSTC -> destination of the DMA (AHB,APD)
1015 * DIDST  -> destination address of the DMA
1016*/
1017
1018/* s3c2410_dma_config
1019 *
1020 * xfersize:     size of unit in bytes (1,2,4)
1021*/
1022
1023int s3c2410_dma_config(enum dma_ch channel,
1024		       int xferunit)
1025{
1026	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
1027	unsigned int dcon;
1028
1029	pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit);
1030
1031	if (chan == NULL)
1032		return -EINVAL;
1033
1034	dcon = chan->dcon & dma_sel.dcon_mask;
1035	pr_debug("%s: dcon is %08x\n", __func__, dcon);
1036
1037	switch (chan->req_ch) {
1038	case DMACH_I2S_IN:
1039	case DMACH_I2S_OUT:
1040	case DMACH_PCM_IN:
1041	case DMACH_PCM_OUT:
1042	case DMACH_MIC_IN:
1043	default:
1044		dcon |= S3C2410_DCON_HANDSHAKE;
1045		dcon |= S3C2410_DCON_SYNC_PCLK;
1046		break;
1047
1048	case DMACH_SDI:
1049		/* note, ensure if need HANDSHAKE or not */
1050		dcon |= S3C2410_DCON_SYNC_PCLK;
1051		break;
1052
1053	case DMACH_XD0:
1054	case DMACH_XD1:
1055		dcon |= S3C2410_DCON_HANDSHAKE;
1056		dcon |= S3C2410_DCON_SYNC_HCLK;
1057		break;
1058	}
1059
1060	switch (xferunit) {
1061	case 1:
1062		dcon |= S3C2410_DCON_BYTE;
1063		break;
1064
1065	case 2:
1066		dcon |= S3C2410_DCON_HALFWORD;
1067		break;
1068
1069	case 4:
1070		dcon |= S3C2410_DCON_WORD;
1071		break;
1072
1073	default:
1074		pr_debug("%s: bad transfer size %d\n", __func__, xferunit);
1075		return -EINVAL;
1076	}
1077
1078	dcon |= S3C2410_DCON_HWTRIG;
1079	dcon |= S3C2410_DCON_INTREQ;
1080
1081	pr_debug("%s: dcon now %08x\n", __func__, dcon);
1082
1083	chan->dcon = dcon;
1084	chan->xfer_unit = xferunit;
1085
1086	return 0;
1087}
1088
1089EXPORT_SYMBOL(s3c2410_dma_config);
1090
1091
1092/* s3c2410_dma_devconfig
1093 *
1094 * configure the dma source/destination hardware type and address
1095 *
1096 * source:    DMA_FROM_DEVICE: source is hardware
1097 *            DMA_TO_DEVICE: source is memory
1098 *
1099 * devaddr:   physical address of the source
1100*/
1101
1102int s3c2410_dma_devconfig(enum dma_ch channel,
1103			  enum dma_data_direction source,
1104			  unsigned long devaddr)
1105{
1106	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
1107	unsigned int hwcfg;
1108
1109	if (chan == NULL)
1110		return -EINVAL;
1111
1112	pr_debug("%s: source=%d, devaddr=%08lx\n",
1113		 __func__, (int)source, devaddr);
1114
1115	chan->source = source;
1116	chan->dev_addr = devaddr;
1117
1118	switch (chan->req_ch) {
1119	case DMACH_XD0:
1120	case DMACH_XD1:
1121		hwcfg = 0; /* AHB */
1122		break;
1123
1124	default:
1125		hwcfg = S3C2410_DISRCC_APB;
1126	}
1127
1128	/* always assume our peripheral desintation is a fixed
1129	 * address in memory. */
1130	 hwcfg |= S3C2410_DISRCC_INC;
1131
1132	switch (source) {
1133	case DMA_FROM_DEVICE:
1134		/* source is hardware */
1135		pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n",
1136			 __func__, devaddr, hwcfg);
1137		dma_wrreg(chan, S3C2410_DMA_DISRCC, hwcfg & 3);
1138		dma_wrreg(chan, S3C2410_DMA_DISRC,  devaddr);
1139		dma_wrreg(chan, S3C2410_DMA_DIDSTC, (0<<1) | (0<<0));
1140
1141		chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST);
1142		break;
1143
1144	case DMA_TO_DEVICE:
1145		/* source is memory */
1146		pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n",
1147			 __func__, devaddr, hwcfg);
1148		dma_wrreg(chan, S3C2410_DMA_DISRCC, (0<<1) | (0<<0));
1149		dma_wrreg(chan, S3C2410_DMA_DIDST,  devaddr);
1150		dma_wrreg(chan, S3C2410_DMA_DIDSTC, hwcfg & 3);
1151
1152		chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DISRC);
1153		break;
1154
1155	default:
1156		printk(KERN_ERR "dma%d: invalid source type (%d)\n",
1157		       channel, source);
1158
1159		return -EINVAL;
1160	}
1161
1162	return 0;
1163}
1164
1165EXPORT_SYMBOL(s3c2410_dma_devconfig);
1166
1167/* s3c2410_dma_getposition
1168 *
1169 * returns the current transfer points for the dma source and destination
1170*/
1171
1172int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst)
1173{
1174	struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
1175
1176	if (chan == NULL)
1177		return -EINVAL;
1178
1179	if (src != NULL)
1180 		*src = dma_rdreg(chan, S3C2410_DMA_DCSRC);
1181
1182 	if (dst != NULL)
1183 		*dst = dma_rdreg(chan, S3C2410_DMA_DCDST);
1184
1185 	return 0;
1186}
1187
1188EXPORT_SYMBOL(s3c2410_dma_getposition);
1189
1190/* system core operations */
1191
1192#ifdef CONFIG_PM
1193
1194static void s3c2410_dma_suspend_chan(struct s3c2410_dma_chan *cp)
1195{
1196	printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
1197
1198	if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) {
1199		/* the dma channel is still working, which is probably
1200		 * a bad thing to do over suspend/resume. We stop the
1201		 * channel and assume that the client is either going to
1202		 * retry after resume, or that it is broken.
1203		 */
1204
1205		printk(KERN_INFO "dma: stopping channel %d due to suspend\n",
1206		       cp->number);
1207
1208		s3c2410_dma_dostop(cp);
1209	}
1210}
1211
1212static int s3c2410_dma_suspend(void)
1213{
1214	struct s3c2410_dma_chan *cp = s3c2410_chans;
1215	int channel;
1216
1217	for (channel = 0; channel < dma_channels; cp++, channel++)
1218		s3c2410_dma_suspend_chan(cp);
1219
1220	return 0;
1221}
1222
1223static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
1224{
1225	unsigned int no = cp->number | DMACH_LOW_LEVEL;
1226
1227	/* restore channel's hardware configuration */
1228
1229	if (!cp->in_use)
1230		return;
1231
1232	printk(KERN_INFO "dma%d: restoring configuration\n", cp->number);
1233
1234	s3c2410_dma_config(no, cp->xfer_unit);
1235	s3c2410_dma_devconfig(no, cp->source, cp->dev_addr);
1236
1237	/* re-select the dma source for this channel */
1238
1239	if (cp->map != NULL)
1240		dma_sel.select(cp, cp->map);
1241}
1242
1243static void s3c2410_dma_resume(void)
1244{
1245	struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1;
1246	int channel;
1247
1248	for (channel = dma_channels - 1; channel >= 0; cp--, channel--)
1249		s3c2410_dma_resume_chan(cp);
1250}
1251
1252#else
1253#define s3c2410_dma_suspend NULL
1254#define s3c2410_dma_resume  NULL
1255#endif /* CONFIG_PM */
1256
1257struct syscore_ops dma_syscore_ops = {
1258	.suspend	= s3c2410_dma_suspend,
1259	.resume		= s3c2410_dma_resume,
1260};
1261
1262/* kmem cache implementation */
1263
1264static void s3c2410_dma_cache_ctor(void *p)
1265{
1266	memset(p, 0, sizeof(struct s3c2410_dma_buf));
1267}
1268
1269/* initialisation code */
1270
1271static int __init s3c24xx_dma_syscore_init(void)
1272{
1273	register_syscore_ops(&dma_syscore_ops);
1274
1275	return 0;
1276}
1277
1278late_initcall(s3c24xx_dma_syscore_init);
1279
1280int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq,
1281			    unsigned int stride)
1282{
1283	struct s3c2410_dma_chan *cp;
1284	int channel;
1285	int ret;
1286
1287	printk("S3C24XX DMA Driver, Copyright 2003-2006 Simtec Electronics\n");
1288
1289	dma_channels = channels;
1290
1291	dma_base = ioremap(S3C24XX_PA_DMA, stride * channels);
1292	if (dma_base == NULL) {
1293		printk(KERN_ERR "dma failed to remap register block\n");
1294		return -ENOMEM;
1295	}
1296
1297	dma_kmem = kmem_cache_create("dma_desc",
1298				     sizeof(struct s3c2410_dma_buf), 0,
1299				     SLAB_HWCACHE_ALIGN,
1300				     s3c2410_dma_cache_ctor);
1301
1302	if (dma_kmem == NULL) {
1303		printk(KERN_ERR "dma failed to make kmem cache\n");
1304		ret = -ENOMEM;
1305		goto err;
1306	}
1307
1308	for (channel = 0; channel < channels;  channel++) {
1309		cp = &s3c2410_chans[channel];
1310
1311		memset(cp, 0, sizeof(struct s3c2410_dma_chan));
1312
1313		/* dma channel irqs are in order.. */
1314		cp->number = channel;
1315		cp->irq    = channel + irq;
1316		cp->regs   = dma_base + (channel * stride);
1317
1318		/* point current stats somewhere */
1319		cp->stats  = &cp->stats_store;
1320		cp->stats_store.timeout_shortest = LONG_MAX;
1321
1322		/* basic channel configuration */
1323
1324		cp->load_timeout = 1<<18;
1325
1326		printk("DMA channel %d at %p, irq %d\n",
1327		       cp->number, cp->regs, cp->irq);
1328	}
1329
1330	return 0;
1331
1332 err:
1333	kmem_cache_destroy(dma_kmem);
1334	iounmap(dma_base);
1335	dma_base = NULL;
1336	return ret;
1337}
1338
1339int __init s3c2410_dma_init(void)
1340{
1341	return s3c24xx_dma_init(4, IRQ_DMA0, 0x40);
1342}
1343
1344static inline int is_channel_valid(unsigned int channel)
1345{
1346	return (channel & DMA_CH_VALID);
1347}
1348
1349static struct s3c24xx_dma_order *dma_order;
1350
1351
1352/* s3c2410_dma_map_channel()
1353 *
1354 * turn the virtual channel number into a real, and un-used hardware
1355 * channel.
1356 *
1357 * first, try the dma ordering given to us by either the relevant
1358 * dma code, or the board. Then just find the first usable free
1359 * channel
1360*/
1361
1362static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel)
1363{
1364	struct s3c24xx_dma_order_ch *ord = NULL;
1365	struct s3c24xx_dma_map *ch_map;
1366	struct s3c2410_dma_chan *dmach;
1367	int ch;
1368
1369	if (dma_sel.map == NULL || channel > dma_sel.map_size)
1370		return NULL;
1371
1372	ch_map = dma_sel.map + channel;
1373
1374	/* first, try the board mapping */
1375
1376	if (dma_order) {
1377		ord = &dma_order->channels[channel];
1378
1379		for (ch = 0; ch < dma_channels; ch++) {
1380			int tmp;
1381			if (!is_channel_valid(ord->list[ch]))
1382				continue;
1383
1384			tmp = ord->list[ch] & ~DMA_CH_VALID;
1385			if (s3c2410_chans[tmp].in_use == 0) {
1386				ch = tmp;
1387				goto found;
1388			}
1389		}
1390
1391		if (ord->flags & DMA_CH_NEVER)
1392			return NULL;
1393	}
1394
1395	/* second, search the channel map for first free */
1396
1397	for (ch = 0; ch < dma_channels; ch++) {
1398		if (!is_channel_valid(ch_map->channels[ch]))
1399			continue;
1400
1401		if (s3c2410_chans[ch].in_use == 0) {
1402			printk("mapped channel %d to %d\n", channel, ch);
1403			break;
1404		}
1405	}
1406
1407	if (ch >= dma_channels)
1408		return NULL;
1409
1410	/* update our channel mapping */
1411
1412 found:
1413	dmach = &s3c2410_chans[ch];
1414	dmach->map = ch_map;
1415	dmach->req_ch = channel;
1416	s3c_dma_chan_map[channel] = dmach;
1417
1418	/* select the channel */
1419
1420	(dma_sel.select)(dmach, ch_map);
1421
1422	return dmach;
1423}
1424
1425static int s3c24xx_dma_check_entry(struct s3c24xx_dma_map *map, int ch)
1426{
1427	return 0;
1428}
1429
1430int __init s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel)
1431{
1432	struct s3c24xx_dma_map *nmap;
1433	size_t map_sz = sizeof(*nmap) * sel->map_size;
1434	int ptr;
1435
1436	nmap = kmemdup(sel->map, map_sz, GFP_KERNEL);
1437	if (nmap == NULL)
1438		return -ENOMEM;
1439
1440	memcpy(&dma_sel, sel, sizeof(*sel));
1441
1442	dma_sel.map = nmap;
1443
1444	for (ptr = 0; ptr < sel->map_size; ptr++)
1445		s3c24xx_dma_check_entry(nmap+ptr, ptr);
1446
1447	return 0;
1448}
1449
1450int __init s3c24xx_dma_order_set(struct s3c24xx_dma_order *ord)
1451{
1452	struct s3c24xx_dma_order *nord = dma_order;
1453
1454	if (nord == NULL)
1455		nord = kmalloc(sizeof(struct s3c24xx_dma_order), GFP_KERNEL);
1456
1457	if (nord == NULL) {
1458		printk(KERN_ERR "no memory to store dma channel order\n");
1459		return -ENOMEM;
1460	}
1461
1462	dma_order = nord;
1463	memcpy(nord, ord, sizeof(struct s3c24xx_dma_order));
1464	return 0;
1465}