Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
   1/*
   2 * offload engine driver for the Intel Xscale series of i/o processors
   3 * Copyright © 2006, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 *
  18 */
  19
  20/*
  21 * This driver supports the asynchrounous DMA copy and RAID engines available
  22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
  23 */
  24
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/delay.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/spinlock.h>
  30#include <linux/interrupt.h>
  31#include <linux/platform_device.h>
  32#include <linux/memory.h>
  33#include <linux/ioport.h>
  34#include <linux/raid/pq.h>
  35#include <linux/slab.h>
  36
  37#include <mach/adma.h>
  38
  39#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
  40#define to_iop_adma_device(dev) \
  41	container_of(dev, struct iop_adma_device, common)
  42#define tx_to_iop_adma_slot(tx) \
  43	container_of(tx, struct iop_adma_desc_slot, async_tx)
  44
  45/**
  46 * iop_adma_free_slots - flags descriptor slots for reuse
  47 * @slot: Slot to free
  48 * Caller must hold &iop_chan->lock while calling this function
  49 */
  50static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
  51{
  52	int stride = slot->slots_per_op;
  53
  54	while (stride--) {
  55		slot->slots_per_op = 0;
  56		slot = list_entry(slot->slot_node.next,
  57				struct iop_adma_desc_slot,
  58				slot_node);
  59	}
  60}
  61
  62static void
  63iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
  64{
  65	struct dma_async_tx_descriptor *tx = &desc->async_tx;
  66	struct iop_adma_desc_slot *unmap = desc->group_head;
  67	struct device *dev = &iop_chan->device->pdev->dev;
  68	u32 len = unmap->unmap_len;
  69	enum dma_ctrl_flags flags = tx->flags;
  70	u32 src_cnt;
  71	dma_addr_t addr;
  72	dma_addr_t dest;
  73
  74	src_cnt = unmap->unmap_src_cnt;
  75	dest = iop_desc_get_dest_addr(unmap, iop_chan);
  76	if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  77		enum dma_data_direction dir;
  78
  79		if (src_cnt > 1) /* is xor? */
  80			dir = DMA_BIDIRECTIONAL;
  81		else
  82			dir = DMA_FROM_DEVICE;
  83
  84		dma_unmap_page(dev, dest, len, dir);
  85	}
  86
  87	if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  88		while (src_cnt--) {
  89			addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
  90			if (addr == dest)
  91				continue;
  92			dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
  93		}
  94	}
  95	desc->group_head = NULL;
  96}
  97
  98static void
  99iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
 100{
 101	struct dma_async_tx_descriptor *tx = &desc->async_tx;
 102	struct iop_adma_desc_slot *unmap = desc->group_head;
 103	struct device *dev = &iop_chan->device->pdev->dev;
 104	u32 len = unmap->unmap_len;
 105	enum dma_ctrl_flags flags = tx->flags;
 106	u32 src_cnt = unmap->unmap_src_cnt;
 107	dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
 108	dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
 109	int i;
 110
 111	if (tx->flags & DMA_PREP_CONTINUE)
 112		src_cnt -= 3;
 113
 114	if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
 115		dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
 116		dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
 117	}
 118
 119	if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
 120		dma_addr_t addr;
 121
 122		for (i = 0; i < src_cnt; i++) {
 123			addr = iop_desc_get_src_addr(unmap, iop_chan, i);
 124			dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
 125		}
 126		if (desc->pq_check_result) {
 127			dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
 128			dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
 129		}
 130	}
 131
 132	desc->group_head = NULL;
 133}
 134
 135
 136static dma_cookie_t
 137iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
 138	struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
 139{
 140	struct dma_async_tx_descriptor *tx = &desc->async_tx;
 141
 142	BUG_ON(tx->cookie < 0);
 143	if (tx->cookie > 0) {
 144		cookie = tx->cookie;
 145		tx->cookie = 0;
 146
 147		/* call the callback (must not sleep or submit new
 148		 * operations to this channel)
 149		 */
 150		if (tx->callback)
 151			tx->callback(tx->callback_param);
 152
 153		/* unmap dma addresses
 154		 * (unmap_single vs unmap_page?)
 155		 */
 156		if (desc->group_head && desc->unmap_len) {
 157			if (iop_desc_is_pq(desc))
 158				iop_desc_unmap_pq(iop_chan, desc);
 159			else
 160				iop_desc_unmap(iop_chan, desc);
 161		}
 162	}
 163
 164	/* run dependent operations */
 165	dma_run_dependencies(tx);
 166
 167	return cookie;
 168}
 169
 170static int
 171iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
 172	struct iop_adma_chan *iop_chan)
 173{
 174	/* the client is allowed to attach dependent operations
 175	 * until 'ack' is set
 176	 */
 177	if (!async_tx_test_ack(&desc->async_tx))
 178		return 0;
 179
 180	/* leave the last descriptor in the chain
 181	 * so we can append to it
 182	 */
 183	if (desc->chain_node.next == &iop_chan->chain)
 184		return 1;
 185
 186	dev_dbg(iop_chan->device->common.dev,
 187		"\tfree slot: %d slots_per_op: %d\n",
 188		desc->idx, desc->slots_per_op);
 189
 190	list_del(&desc->chain_node);
 191	iop_adma_free_slots(desc);
 192
 193	return 0;
 194}
 195
 196static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 197{
 198	struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
 199	dma_cookie_t cookie = 0;
 200	u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
 201	int busy = iop_chan_is_busy(iop_chan);
 202	int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
 203
 204	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
 205	/* free completed slots from the chain starting with
 206	 * the oldest descriptor
 207	 */
 208	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 209					chain_node) {
 210		pr_debug("\tcookie: %d slot: %d busy: %d "
 211			"this_desc: %#x next_desc: %#x ack: %d\n",
 212			iter->async_tx.cookie, iter->idx, busy,
 213			iter->async_tx.phys, iop_desc_get_next_desc(iter),
 214			async_tx_test_ack(&iter->async_tx));
 215		prefetch(_iter);
 216		prefetch(&_iter->async_tx);
 217
 218		/* do not advance past the current descriptor loaded into the
 219		 * hardware channel, subsequent descriptors are either in
 220		 * process or have not been submitted
 221		 */
 222		if (seen_current)
 223			break;
 224
 225		/* stop the search if we reach the current descriptor and the
 226		 * channel is busy, or if it appears that the current descriptor
 227		 * needs to be re-read (i.e. has been appended to)
 228		 */
 229		if (iter->async_tx.phys == current_desc) {
 230			BUG_ON(seen_current++);
 231			if (busy || iop_desc_get_next_desc(iter))
 232				break;
 233		}
 234
 235		/* detect the start of a group transaction */
 236		if (!slot_cnt && !slots_per_op) {
 237			slot_cnt = iter->slot_cnt;
 238			slots_per_op = iter->slots_per_op;
 239			if (slot_cnt <= slots_per_op) {
 240				slot_cnt = 0;
 241				slots_per_op = 0;
 242			}
 243		}
 244
 245		if (slot_cnt) {
 246			pr_debug("\tgroup++\n");
 247			if (!grp_start)
 248				grp_start = iter;
 249			slot_cnt -= slots_per_op;
 250		}
 251
 252		/* all the members of a group are complete */
 253		if (slots_per_op != 0 && slot_cnt == 0) {
 254			struct iop_adma_desc_slot *grp_iter, *_grp_iter;
 255			int end_of_chain = 0;
 256			pr_debug("\tgroup end\n");
 257
 258			/* collect the total results */
 259			if (grp_start->xor_check_result) {
 260				u32 zero_sum_result = 0;
 261				slot_cnt = grp_start->slot_cnt;
 262				grp_iter = grp_start;
 263
 264				list_for_each_entry_from(grp_iter,
 265					&iop_chan->chain, chain_node) {
 266					zero_sum_result |=
 267					    iop_desc_get_zero_result(grp_iter);
 268					    pr_debug("\titer%d result: %d\n",
 269					    grp_iter->idx, zero_sum_result);
 270					slot_cnt -= slots_per_op;
 271					if (slot_cnt == 0)
 272						break;
 273				}
 274				pr_debug("\tgrp_start->xor_check_result: %p\n",
 275					grp_start->xor_check_result);
 276				*grp_start->xor_check_result = zero_sum_result;
 277			}
 278
 279			/* clean up the group */
 280			slot_cnt = grp_start->slot_cnt;
 281			grp_iter = grp_start;
 282			list_for_each_entry_safe_from(grp_iter, _grp_iter,
 283				&iop_chan->chain, chain_node) {
 284				cookie = iop_adma_run_tx_complete_actions(
 285					grp_iter, iop_chan, cookie);
 286
 287				slot_cnt -= slots_per_op;
 288				end_of_chain = iop_adma_clean_slot(grp_iter,
 289					iop_chan);
 290
 291				if (slot_cnt == 0 || end_of_chain)
 292					break;
 293			}
 294
 295			/* the group should be complete at this point */
 296			BUG_ON(slot_cnt);
 297
 298			slots_per_op = 0;
 299			grp_start = NULL;
 300			if (end_of_chain)
 301				break;
 302			else
 303				continue;
 304		} else if (slots_per_op) /* wait for group completion */
 305			continue;
 306
 307		/* write back zero sum results (single descriptor case) */
 308		if (iter->xor_check_result && iter->async_tx.cookie)
 309			*iter->xor_check_result =
 310				iop_desc_get_zero_result(iter);
 311
 312		cookie = iop_adma_run_tx_complete_actions(
 313					iter, iop_chan, cookie);
 314
 315		if (iop_adma_clean_slot(iter, iop_chan))
 316			break;
 317	}
 318
 319	if (cookie > 0) {
 320		iop_chan->completed_cookie = cookie;
 321		pr_debug("\tcompleted cookie %d\n", cookie);
 322	}
 323}
 324
 325static void
 326iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 327{
 328	spin_lock_bh(&iop_chan->lock);
 329	__iop_adma_slot_cleanup(iop_chan);
 330	spin_unlock_bh(&iop_chan->lock);
 331}
 332
 333static void iop_adma_tasklet(unsigned long data)
 334{
 335	struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
 336
 337	/* lockdep will flag depedency submissions as potentially
 338	 * recursive locking, this is not the case as a dependency
 339	 * submission will never recurse a channels submit routine.
 340	 * There are checks in async_tx.c to prevent this.
 341	 */
 342	spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
 343	__iop_adma_slot_cleanup(iop_chan);
 344	spin_unlock(&iop_chan->lock);
 345}
 346
 347static struct iop_adma_desc_slot *
 348iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
 349			int slots_per_op)
 350{
 351	struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
 352	LIST_HEAD(chain);
 353	int slots_found, retry = 0;
 354
 355	/* start search from the last allocated descrtiptor
 356	 * if a contiguous allocation can not be found start searching
 357	 * from the beginning of the list
 358	 */
 359retry:
 360	slots_found = 0;
 361	if (retry == 0)
 362		iter = iop_chan->last_used;
 363	else
 364		iter = list_entry(&iop_chan->all_slots,
 365			struct iop_adma_desc_slot,
 366			slot_node);
 367
 368	list_for_each_entry_safe_continue(
 369		iter, _iter, &iop_chan->all_slots, slot_node) {
 370		prefetch(_iter);
 371		prefetch(&_iter->async_tx);
 372		if (iter->slots_per_op) {
 373			/* give up after finding the first busy slot
 374			 * on the second pass through the list
 375			 */
 376			if (retry)
 377				break;
 378
 379			slots_found = 0;
 380			continue;
 381		}
 382
 383		/* start the allocation if the slot is correctly aligned */
 384		if (!slots_found++) {
 385			if (iop_desc_is_aligned(iter, slots_per_op))
 386				alloc_start = iter;
 387			else {
 388				slots_found = 0;
 389				continue;
 390			}
 391		}
 392
 393		if (slots_found == num_slots) {
 394			struct iop_adma_desc_slot *alloc_tail = NULL;
 395			struct iop_adma_desc_slot *last_used = NULL;
 396			iter = alloc_start;
 397			while (num_slots) {
 398				int i;
 399				dev_dbg(iop_chan->device->common.dev,
 400					"allocated slot: %d "
 401					"(desc %p phys: %#x) slots_per_op %d\n",
 402					iter->idx, iter->hw_desc,
 403					iter->async_tx.phys, slots_per_op);
 404
 405				/* pre-ack all but the last descriptor */
 406				if (num_slots != slots_per_op)
 407					async_tx_ack(&iter->async_tx);
 408
 409				list_add_tail(&iter->chain_node, &chain);
 410				alloc_tail = iter;
 411				iter->async_tx.cookie = 0;
 412				iter->slot_cnt = num_slots;
 413				iter->xor_check_result = NULL;
 414				for (i = 0; i < slots_per_op; i++) {
 415					iter->slots_per_op = slots_per_op - i;
 416					last_used = iter;
 417					iter = list_entry(iter->slot_node.next,
 418						struct iop_adma_desc_slot,
 419						slot_node);
 420				}
 421				num_slots -= slots_per_op;
 422			}
 423			alloc_tail->group_head = alloc_start;
 424			alloc_tail->async_tx.cookie = -EBUSY;
 425			list_splice(&chain, &alloc_tail->tx_list);
 426			iop_chan->last_used = last_used;
 427			iop_desc_clear_next_desc(alloc_start);
 428			iop_desc_clear_next_desc(alloc_tail);
 429			return alloc_tail;
 430		}
 431	}
 432	if (!retry++)
 433		goto retry;
 434
 435	/* perform direct reclaim if the allocation fails */
 436	__iop_adma_slot_cleanup(iop_chan);
 437
 438	return NULL;
 439}
 440
 441static dma_cookie_t
 442iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
 443	struct iop_adma_desc_slot *desc)
 444{
 445	dma_cookie_t cookie = iop_chan->common.cookie;
 446	cookie++;
 447	if (cookie < 0)
 448		cookie = 1;
 449	iop_chan->common.cookie = desc->async_tx.cookie = cookie;
 450	return cookie;
 451}
 452
 453static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
 454{
 455	dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
 456		iop_chan->pending);
 457
 458	if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
 459		iop_chan->pending = 0;
 460		iop_chan_append(iop_chan);
 461	}
 462}
 463
 464static dma_cookie_t
 465iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
 466{
 467	struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
 468	struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
 469	struct iop_adma_desc_slot *grp_start, *old_chain_tail;
 470	int slot_cnt;
 471	int slots_per_op;
 472	dma_cookie_t cookie;
 473	dma_addr_t next_dma;
 474
 475	grp_start = sw_desc->group_head;
 476	slot_cnt = grp_start->slot_cnt;
 477	slots_per_op = grp_start->slots_per_op;
 478
 479	spin_lock_bh(&iop_chan->lock);
 480	cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
 481
 482	old_chain_tail = list_entry(iop_chan->chain.prev,
 483		struct iop_adma_desc_slot, chain_node);
 484	list_splice_init(&sw_desc->tx_list,
 485			 &old_chain_tail->chain_node);
 486
 487	/* fix up the hardware chain */
 488	next_dma = grp_start->async_tx.phys;
 489	iop_desc_set_next_desc(old_chain_tail, next_dma);
 490	BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
 491
 492	/* check for pre-chained descriptors */
 493	iop_paranoia(iop_desc_get_next_desc(sw_desc));
 494
 495	/* increment the pending count by the number of slots
 496	 * memcpy operations have a 1:1 (slot:operation) relation
 497	 * other operations are heavier and will pop the threshold
 498	 * more often.
 499	 */
 500	iop_chan->pending += slot_cnt;
 501	iop_adma_check_threshold(iop_chan);
 502	spin_unlock_bh(&iop_chan->lock);
 503
 504	dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
 505		__func__, sw_desc->async_tx.cookie, sw_desc->idx);
 506
 507	return cookie;
 508}
 509
 510static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
 511static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
 512
 513/**
 514 * iop_adma_alloc_chan_resources -  returns the number of allocated descriptors
 515 * @chan - allocate descriptor resources for this channel
 516 * @client - current client requesting the channel be ready for requests
 517 *
 518 * Note: We keep the slots for 1 operation on iop_chan->chain at all times.  To
 519 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
 520 * greater than 2x the number slots needed to satisfy a device->max_xor
 521 * request.
 522 * */
 523static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
 524{
 525	char *hw_desc;
 526	int idx;
 527	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 528	struct iop_adma_desc_slot *slot = NULL;
 529	int init = iop_chan->slots_allocated ? 0 : 1;
 530	struct iop_adma_platform_data *plat_data =
 531		iop_chan->device->pdev->dev.platform_data;
 532	int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
 533
 534	/* Allocate descriptor slots */
 535	do {
 536		idx = iop_chan->slots_allocated;
 537		if (idx == num_descs_in_pool)
 538			break;
 539
 540		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 541		if (!slot) {
 542			printk(KERN_INFO "IOP ADMA Channel only initialized"
 543				" %d descriptor slots", idx);
 544			break;
 545		}
 546		hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
 547		slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 548
 549		dma_async_tx_descriptor_init(&slot->async_tx, chan);
 550		slot->async_tx.tx_submit = iop_adma_tx_submit;
 551		INIT_LIST_HEAD(&slot->tx_list);
 552		INIT_LIST_HEAD(&slot->chain_node);
 553		INIT_LIST_HEAD(&slot->slot_node);
 554		hw_desc = (char *) iop_chan->device->dma_desc_pool;
 555		slot->async_tx.phys =
 556			(dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 557		slot->idx = idx;
 558
 559		spin_lock_bh(&iop_chan->lock);
 560		iop_chan->slots_allocated++;
 561		list_add_tail(&slot->slot_node, &iop_chan->all_slots);
 562		spin_unlock_bh(&iop_chan->lock);
 563	} while (iop_chan->slots_allocated < num_descs_in_pool);
 564
 565	if (idx && !iop_chan->last_used)
 566		iop_chan->last_used = list_entry(iop_chan->all_slots.next,
 567					struct iop_adma_desc_slot,
 568					slot_node);
 569
 570	dev_dbg(iop_chan->device->common.dev,
 571		"allocated %d descriptor slots last_used: %p\n",
 572		iop_chan->slots_allocated, iop_chan->last_used);
 573
 574	/* initialize the channel and the chain with a null operation */
 575	if (init) {
 576		if (dma_has_cap(DMA_MEMCPY,
 577			iop_chan->device->common.cap_mask))
 578			iop_chan_start_null_memcpy(iop_chan);
 579		else if (dma_has_cap(DMA_XOR,
 580			iop_chan->device->common.cap_mask))
 581			iop_chan_start_null_xor(iop_chan);
 582		else
 583			BUG();
 584	}
 585
 586	return (idx > 0) ? idx : -ENOMEM;
 587}
 588
 589static struct dma_async_tx_descriptor *
 590iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 591{
 592	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 593	struct iop_adma_desc_slot *sw_desc, *grp_start;
 594	int slot_cnt, slots_per_op;
 595
 596	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
 597
 598	spin_lock_bh(&iop_chan->lock);
 599	slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
 600	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 601	if (sw_desc) {
 602		grp_start = sw_desc->group_head;
 603		iop_desc_init_interrupt(grp_start, iop_chan);
 604		grp_start->unmap_len = 0;
 605		sw_desc->async_tx.flags = flags;
 606	}
 607	spin_unlock_bh(&iop_chan->lock);
 608
 609	return sw_desc ? &sw_desc->async_tx : NULL;
 610}
 611
 612static struct dma_async_tx_descriptor *
 613iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
 614			 dma_addr_t dma_src, size_t len, unsigned long flags)
 615{
 616	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 617	struct iop_adma_desc_slot *sw_desc, *grp_start;
 618	int slot_cnt, slots_per_op;
 619
 620	if (unlikely(!len))
 621		return NULL;
 622	BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
 623
 624	dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
 625		__func__, len);
 626
 627	spin_lock_bh(&iop_chan->lock);
 628	slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
 629	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 630	if (sw_desc) {
 631		grp_start = sw_desc->group_head;
 632		iop_desc_init_memcpy(grp_start, flags);
 633		iop_desc_set_byte_count(grp_start, iop_chan, len);
 634		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 635		iop_desc_set_memcpy_src_addr(grp_start, dma_src);
 636		sw_desc->unmap_src_cnt = 1;
 637		sw_desc->unmap_len = len;
 638		sw_desc->async_tx.flags = flags;
 639	}
 640	spin_unlock_bh(&iop_chan->lock);
 641
 642	return sw_desc ? &sw_desc->async_tx : NULL;
 643}
 644
 645static struct dma_async_tx_descriptor *
 646iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
 647			 int value, size_t len, unsigned long flags)
 648{
 649	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 650	struct iop_adma_desc_slot *sw_desc, *grp_start;
 651	int slot_cnt, slots_per_op;
 652
 653	if (unlikely(!len))
 654		return NULL;
 655	BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
 656
 657	dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
 658		__func__, len);
 659
 660	spin_lock_bh(&iop_chan->lock);
 661	slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
 662	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 663	if (sw_desc) {
 664		grp_start = sw_desc->group_head;
 665		iop_desc_init_memset(grp_start, flags);
 666		iop_desc_set_byte_count(grp_start, iop_chan, len);
 667		iop_desc_set_block_fill_val(grp_start, value);
 668		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 669		sw_desc->unmap_src_cnt = 1;
 670		sw_desc->unmap_len = len;
 671		sw_desc->async_tx.flags = flags;
 672	}
 673	spin_unlock_bh(&iop_chan->lock);
 674
 675	return sw_desc ? &sw_desc->async_tx : NULL;
 676}
 677
 678static struct dma_async_tx_descriptor *
 679iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
 680		      dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
 681		      unsigned long flags)
 682{
 683	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 684	struct iop_adma_desc_slot *sw_desc, *grp_start;
 685	int slot_cnt, slots_per_op;
 686
 687	if (unlikely(!len))
 688		return NULL;
 689	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 690
 691	dev_dbg(iop_chan->device->common.dev,
 692		"%s src_cnt: %d len: %u flags: %lx\n",
 693		__func__, src_cnt, len, flags);
 694
 695	spin_lock_bh(&iop_chan->lock);
 696	slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
 697	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 698	if (sw_desc) {
 699		grp_start = sw_desc->group_head;
 700		iop_desc_init_xor(grp_start, src_cnt, flags);
 701		iop_desc_set_byte_count(grp_start, iop_chan, len);
 702		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 703		sw_desc->unmap_src_cnt = src_cnt;
 704		sw_desc->unmap_len = len;
 705		sw_desc->async_tx.flags = flags;
 706		while (src_cnt--)
 707			iop_desc_set_xor_src_addr(grp_start, src_cnt,
 708						  dma_src[src_cnt]);
 709	}
 710	spin_unlock_bh(&iop_chan->lock);
 711
 712	return sw_desc ? &sw_desc->async_tx : NULL;
 713}
 714
 715static struct dma_async_tx_descriptor *
 716iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
 717			  unsigned int src_cnt, size_t len, u32 *result,
 718			  unsigned long flags)
 719{
 720	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 721	struct iop_adma_desc_slot *sw_desc, *grp_start;
 722	int slot_cnt, slots_per_op;
 723
 724	if (unlikely(!len))
 725		return NULL;
 726
 727	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
 728		__func__, src_cnt, len);
 729
 730	spin_lock_bh(&iop_chan->lock);
 731	slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
 732	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 733	if (sw_desc) {
 734		grp_start = sw_desc->group_head;
 735		iop_desc_init_zero_sum(grp_start, src_cnt, flags);
 736		iop_desc_set_zero_sum_byte_count(grp_start, len);
 737		grp_start->xor_check_result = result;
 738		pr_debug("\t%s: grp_start->xor_check_result: %p\n",
 739			__func__, grp_start->xor_check_result);
 740		sw_desc->unmap_src_cnt = src_cnt;
 741		sw_desc->unmap_len = len;
 742		sw_desc->async_tx.flags = flags;
 743		while (src_cnt--)
 744			iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
 745						       dma_src[src_cnt]);
 746	}
 747	spin_unlock_bh(&iop_chan->lock);
 748
 749	return sw_desc ? &sw_desc->async_tx : NULL;
 750}
 751
 752static struct dma_async_tx_descriptor *
 753iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
 754		     unsigned int src_cnt, const unsigned char *scf, size_t len,
 755		     unsigned long flags)
 756{
 757	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 758	struct iop_adma_desc_slot *sw_desc, *g;
 759	int slot_cnt, slots_per_op;
 760	int continue_srcs;
 761
 762	if (unlikely(!len))
 763		return NULL;
 764	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 765
 766	dev_dbg(iop_chan->device->common.dev,
 767		"%s src_cnt: %d len: %u flags: %lx\n",
 768		__func__, src_cnt, len, flags);
 769
 770	if (dmaf_p_disabled_continue(flags))
 771		continue_srcs = 1+src_cnt;
 772	else if (dmaf_continue(flags))
 773		continue_srcs = 3+src_cnt;
 774	else
 775		continue_srcs = 0+src_cnt;
 776
 777	spin_lock_bh(&iop_chan->lock);
 778	slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
 779	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 780	if (sw_desc) {
 781		int i;
 782
 783		g = sw_desc->group_head;
 784		iop_desc_set_byte_count(g, iop_chan, len);
 785
 786		/* even if P is disabled its destination address (bits
 787		 * [3:0]) must match Q.  It is ok if P points to an
 788		 * invalid address, it won't be written.
 789		 */
 790		if (flags & DMA_PREP_PQ_DISABLE_P)
 791			dst[0] = dst[1] & 0x7;
 792
 793		iop_desc_set_pq_addr(g, dst);
 794		sw_desc->unmap_src_cnt = src_cnt;
 795		sw_desc->unmap_len = len;
 796		sw_desc->async_tx.flags = flags;
 797		for (i = 0; i < src_cnt; i++)
 798			iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
 799
 800		/* if we are continuing a previous operation factor in
 801		 * the old p and q values, see the comment for dma_maxpq
 802		 * in include/linux/dmaengine.h
 803		 */
 804		if (dmaf_p_disabled_continue(flags))
 805			iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
 806		else if (dmaf_continue(flags)) {
 807			iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
 808			iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
 809			iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
 810		}
 811		iop_desc_init_pq(g, i, flags);
 812	}
 813	spin_unlock_bh(&iop_chan->lock);
 814
 815	return sw_desc ? &sw_desc->async_tx : NULL;
 816}
 817
 818static struct dma_async_tx_descriptor *
 819iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
 820			 unsigned int src_cnt, const unsigned char *scf,
 821			 size_t len, enum sum_check_flags *pqres,
 822			 unsigned long flags)
 823{
 824	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 825	struct iop_adma_desc_slot *sw_desc, *g;
 826	int slot_cnt, slots_per_op;
 827
 828	if (unlikely(!len))
 829		return NULL;
 830	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 831
 832	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
 833		__func__, src_cnt, len);
 834
 835	spin_lock_bh(&iop_chan->lock);
 836	slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
 837	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 838	if (sw_desc) {
 839		/* for validate operations p and q are tagged onto the
 840		 * end of the source list
 841		 */
 842		int pq_idx = src_cnt;
 843
 844		g = sw_desc->group_head;
 845		iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
 846		iop_desc_set_pq_zero_sum_byte_count(g, len);
 847		g->pq_check_result = pqres;
 848		pr_debug("\t%s: g->pq_check_result: %p\n",
 849			__func__, g->pq_check_result);
 850		sw_desc->unmap_src_cnt = src_cnt+2;
 851		sw_desc->unmap_len = len;
 852		sw_desc->async_tx.flags = flags;
 853		while (src_cnt--)
 854			iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
 855							  src[src_cnt],
 856							  scf[src_cnt]);
 857		iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
 858	}
 859	spin_unlock_bh(&iop_chan->lock);
 860
 861	return sw_desc ? &sw_desc->async_tx : NULL;
 862}
 863
 864static void iop_adma_free_chan_resources(struct dma_chan *chan)
 865{
 866	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 867	struct iop_adma_desc_slot *iter, *_iter;
 868	int in_use_descs = 0;
 869
 870	iop_adma_slot_cleanup(iop_chan);
 871
 872	spin_lock_bh(&iop_chan->lock);
 873	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 874					chain_node) {
 875		in_use_descs++;
 876		list_del(&iter->chain_node);
 877	}
 878	list_for_each_entry_safe_reverse(
 879		iter, _iter, &iop_chan->all_slots, slot_node) {
 880		list_del(&iter->slot_node);
 881		kfree(iter);
 882		iop_chan->slots_allocated--;
 883	}
 884	iop_chan->last_used = NULL;
 885
 886	dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
 887		__func__, iop_chan->slots_allocated);
 888	spin_unlock_bh(&iop_chan->lock);
 889
 890	/* one is ok since we left it on there on purpose */
 891	if (in_use_descs > 1)
 892		printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
 893			in_use_descs - 1);
 894}
 895
 896/**
 897 * iop_adma_status - poll the status of an ADMA transaction
 898 * @chan: ADMA channel handle
 899 * @cookie: ADMA transaction identifier
 900 * @txstate: a holder for the current state of the channel or NULL
 901 */
 902static enum dma_status iop_adma_status(struct dma_chan *chan,
 903					dma_cookie_t cookie,
 904					struct dma_tx_state *txstate)
 905{
 906	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 907	dma_cookie_t last_used;
 908	dma_cookie_t last_complete;
 909	enum dma_status ret;
 910
 911	last_used = chan->cookie;
 912	last_complete = iop_chan->completed_cookie;
 913	dma_set_tx_state(txstate, last_complete, last_used, 0);
 914	ret = dma_async_is_complete(cookie, last_complete, last_used);
 915	if (ret == DMA_SUCCESS)
 916		return ret;
 917
 918	iop_adma_slot_cleanup(iop_chan);
 919
 920	last_used = chan->cookie;
 921	last_complete = iop_chan->completed_cookie;
 922	dma_set_tx_state(txstate, last_complete, last_used, 0);
 923
 924	return dma_async_is_complete(cookie, last_complete, last_used);
 925}
 926
 927static irqreturn_t iop_adma_eot_handler(int irq, void *data)
 928{
 929	struct iop_adma_chan *chan = data;
 930
 931	dev_dbg(chan->device->common.dev, "%s\n", __func__);
 932
 933	tasklet_schedule(&chan->irq_tasklet);
 934
 935	iop_adma_device_clear_eot_status(chan);
 936
 937	return IRQ_HANDLED;
 938}
 939
 940static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
 941{
 942	struct iop_adma_chan *chan = data;
 943
 944	dev_dbg(chan->device->common.dev, "%s\n", __func__);
 945
 946	tasklet_schedule(&chan->irq_tasklet);
 947
 948	iop_adma_device_clear_eoc_status(chan);
 949
 950	return IRQ_HANDLED;
 951}
 952
 953static irqreturn_t iop_adma_err_handler(int irq, void *data)
 954{
 955	struct iop_adma_chan *chan = data;
 956	unsigned long status = iop_chan_get_status(chan);
 957
 958	dev_printk(KERN_ERR, chan->device->common.dev,
 959		"error ( %s%s%s%s%s%s%s)\n",
 960		iop_is_err_int_parity(status, chan) ? "int_parity " : "",
 961		iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
 962		iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
 963		iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
 964		iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
 965		iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
 966		iop_is_err_split_tx(status, chan) ? "split_tx " : "");
 967
 968	iop_adma_device_clear_err_status(chan);
 969
 970	BUG();
 971
 972	return IRQ_HANDLED;
 973}
 974
 975static void iop_adma_issue_pending(struct dma_chan *chan)
 976{
 977	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 978
 979	if (iop_chan->pending) {
 980		iop_chan->pending = 0;
 981		iop_chan_append(iop_chan);
 982	}
 983}
 984
 985/*
 986 * Perform a transaction to verify the HW works.
 987 */
 988#define IOP_ADMA_TEST_SIZE 2000
 989
 990static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
 991{
 992	int i;
 993	void *src, *dest;
 994	dma_addr_t src_dma, dest_dma;
 995	struct dma_chan *dma_chan;
 996	dma_cookie_t cookie;
 997	struct dma_async_tx_descriptor *tx;
 998	int err = 0;
 999	struct iop_adma_chan *iop_chan;
1000
1001	dev_dbg(device->common.dev, "%s\n", __func__);
1002
1003	src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
1004	if (!src)
1005		return -ENOMEM;
1006	dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
1007	if (!dest) {
1008		kfree(src);
1009		return -ENOMEM;
1010	}
1011
1012	/* Fill in src buffer */
1013	for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
1014		((u8 *) src)[i] = (u8)i;
1015
1016	/* Start copy, using first DMA channel */
1017	dma_chan = container_of(device->common.channels.next,
1018				struct dma_chan,
1019				device_node);
1020	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1021		err = -ENODEV;
1022		goto out;
1023	}
1024
1025	dest_dma = dma_map_single(dma_chan->device->dev, dest,
1026				IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
1027	src_dma = dma_map_single(dma_chan->device->dev, src,
1028				IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
1029	tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
1030				      IOP_ADMA_TEST_SIZE,
1031				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1032
1033	cookie = iop_adma_tx_submit(tx);
1034	iop_adma_issue_pending(dma_chan);
1035	msleep(1);
1036
1037	if (iop_adma_status(dma_chan, cookie, NULL) !=
1038			DMA_SUCCESS) {
1039		dev_printk(KERN_ERR, dma_chan->device->dev,
1040			"Self-test copy timed out, disabling\n");
1041		err = -ENODEV;
1042		goto free_resources;
1043	}
1044
1045	iop_chan = to_iop_adma_chan(dma_chan);
1046	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1047		IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
1048	if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
1049		dev_printk(KERN_ERR, dma_chan->device->dev,
1050			"Self-test copy failed compare, disabling\n");
1051		err = -ENODEV;
1052		goto free_resources;
1053	}
1054
1055free_resources:
1056	iop_adma_free_chan_resources(dma_chan);
1057out:
1058	kfree(src);
1059	kfree(dest);
1060	return err;
1061}
1062
1063#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
1064static int __devinit
1065iop_adma_xor_val_self_test(struct iop_adma_device *device)
1066{
1067	int i, src_idx;
1068	struct page *dest;
1069	struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
1070	struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1071	dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1072	dma_addr_t dma_addr, dest_dma;
1073	struct dma_async_tx_descriptor *tx;
1074	struct dma_chan *dma_chan;
1075	dma_cookie_t cookie;
1076	u8 cmp_byte = 0;
1077	u32 cmp_word;
1078	u32 zero_sum_result;
1079	int err = 0;
1080	struct iop_adma_chan *iop_chan;
1081
1082	dev_dbg(device->common.dev, "%s\n", __func__);
1083
1084	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1085		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1086		if (!xor_srcs[src_idx]) {
1087			while (src_idx--)
1088				__free_page(xor_srcs[src_idx]);
1089			return -ENOMEM;
1090		}
1091	}
1092
1093	dest = alloc_page(GFP_KERNEL);
1094	if (!dest) {
1095		while (src_idx--)
1096			__free_page(xor_srcs[src_idx]);
1097		return -ENOMEM;
1098	}
1099
1100	/* Fill in src buffers */
1101	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1102		u8 *ptr = page_address(xor_srcs[src_idx]);
1103		for (i = 0; i < PAGE_SIZE; i++)
1104			ptr[i] = (1 << src_idx);
1105	}
1106
1107	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
1108		cmp_byte ^= (u8) (1 << src_idx);
1109
1110	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1111			(cmp_byte << 8) | cmp_byte;
1112
1113	memset(page_address(dest), 0, PAGE_SIZE);
1114
1115	dma_chan = container_of(device->common.channels.next,
1116				struct dma_chan,
1117				device_node);
1118	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1119		err = -ENODEV;
1120		goto out;
1121	}
1122
1123	/* test xor */
1124	dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
1125				PAGE_SIZE, DMA_FROM_DEVICE);
1126	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1127		dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1128					   0, PAGE_SIZE, DMA_TO_DEVICE);
1129	tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1130				   IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
1131				   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1132
1133	cookie = iop_adma_tx_submit(tx);
1134	iop_adma_issue_pending(dma_chan);
1135	msleep(8);
1136
1137	if (iop_adma_status(dma_chan, cookie, NULL) !=
1138		DMA_SUCCESS) {
1139		dev_printk(KERN_ERR, dma_chan->device->dev,
1140			"Self-test xor timed out, disabling\n");
1141		err = -ENODEV;
1142		goto free_resources;
1143	}
1144
1145	iop_chan = to_iop_adma_chan(dma_chan);
1146	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1147		PAGE_SIZE, DMA_FROM_DEVICE);
1148	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1149		u32 *ptr = page_address(dest);
1150		if (ptr[i] != cmp_word) {
1151			dev_printk(KERN_ERR, dma_chan->device->dev,
1152				"Self-test xor failed compare, disabling\n");
1153			err = -ENODEV;
1154			goto free_resources;
1155		}
1156	}
1157	dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1158		PAGE_SIZE, DMA_TO_DEVICE);
1159
1160	/* skip zero sum if the capability is not present */
1161	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1162		goto free_resources;
1163
1164	/* zero sum the sources with the destintation page */
1165	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1166		zero_sum_srcs[i] = xor_srcs[i];
1167	zero_sum_srcs[i] = dest;
1168
1169	zero_sum_result = 1;
1170
1171	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1172		dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1173					   zero_sum_srcs[i], 0, PAGE_SIZE,
1174					   DMA_TO_DEVICE);
1175	tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1176				       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1177				       &zero_sum_result,
1178				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1179
1180	cookie = iop_adma_tx_submit(tx);
1181	iop_adma_issue_pending(dma_chan);
1182	msleep(8);
1183
1184	if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1185		dev_printk(KERN_ERR, dma_chan->device->dev,
1186			"Self-test zero sum timed out, disabling\n");
1187		err = -ENODEV;
1188		goto free_resources;
1189	}
1190
1191	if (zero_sum_result != 0) {
1192		dev_printk(KERN_ERR, dma_chan->device->dev,
1193			"Self-test zero sum failed compare, disabling\n");
1194		err = -ENODEV;
1195		goto free_resources;
1196	}
1197
1198	/* test memset */
1199	dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1200			PAGE_SIZE, DMA_FROM_DEVICE);
1201	tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
1202				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1203
1204	cookie = iop_adma_tx_submit(tx);
1205	iop_adma_issue_pending(dma_chan);
1206	msleep(8);
1207
1208	if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1209		dev_printk(KERN_ERR, dma_chan->device->dev,
1210			"Self-test memset timed out, disabling\n");
1211		err = -ENODEV;
1212		goto free_resources;
1213	}
1214
1215	for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1216		u32 *ptr = page_address(dest);
1217		if (ptr[i]) {
1218			dev_printk(KERN_ERR, dma_chan->device->dev,
1219				"Self-test memset failed compare, disabling\n");
1220			err = -ENODEV;
1221			goto free_resources;
1222		}
1223	}
1224
1225	/* test for non-zero parity sum */
1226	zero_sum_result = 0;
1227	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1228		dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1229					   zero_sum_srcs[i], 0, PAGE_SIZE,
1230					   DMA_TO_DEVICE);
1231	tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1232				       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1233				       &zero_sum_result,
1234				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1235
1236	cookie = iop_adma_tx_submit(tx);
1237	iop_adma_issue_pending(dma_chan);
1238	msleep(8);
1239
1240	if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1241		dev_printk(KERN_ERR, dma_chan->device->dev,
1242			"Self-test non-zero sum timed out, disabling\n");
1243		err = -ENODEV;
1244		goto free_resources;
1245	}
1246
1247	if (zero_sum_result != 1) {
1248		dev_printk(KERN_ERR, dma_chan->device->dev,
1249			"Self-test non-zero sum failed compare, disabling\n");
1250		err = -ENODEV;
1251		goto free_resources;
1252	}
1253
1254free_resources:
1255	iop_adma_free_chan_resources(dma_chan);
1256out:
1257	src_idx = IOP_ADMA_NUM_SRC_TEST;
1258	while (src_idx--)
1259		__free_page(xor_srcs[src_idx]);
1260	__free_page(dest);
1261	return err;
1262}
1263
1264#ifdef CONFIG_RAID6_PQ
1265static int __devinit
1266iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1267{
1268	/* combined sources, software pq results, and extra hw pq results */
1269	struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1270	/* ptr to the extra hw pq buffers defined above */
1271	struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1272	/* address conversion buffers (dma_map / page_address) */
1273	void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1274	dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST];
1275	dma_addr_t pq_dest[2];
1276
1277	int i;
1278	struct dma_async_tx_descriptor *tx;
1279	struct dma_chan *dma_chan;
1280	dma_cookie_t cookie;
1281	u32 zero_sum_result;
1282	int err = 0;
1283	struct device *dev;
1284
1285	dev_dbg(device->common.dev, "%s\n", __func__);
1286
1287	for (i = 0; i < ARRAY_SIZE(pq); i++) {
1288		pq[i] = alloc_page(GFP_KERNEL);
1289		if (!pq[i]) {
1290			while (i--)
1291				__free_page(pq[i]);
1292			return -ENOMEM;
1293		}
1294	}
1295
1296	/* Fill in src buffers */
1297	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1298		pq_sw[i] = page_address(pq[i]);
1299		memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1300	}
1301	pq_sw[i] = page_address(pq[i]);
1302	pq_sw[i+1] = page_address(pq[i+1]);
1303
1304	dma_chan = container_of(device->common.channels.next,
1305				struct dma_chan,
1306				device_node);
1307	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1308		err = -ENODEV;
1309		goto out;
1310	}
1311
1312	dev = dma_chan->device->dev;
1313
1314	/* initialize the dests */
1315	memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1316	memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1317
1318	/* test pq */
1319	pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1320	pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1321	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1322		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1323					 DMA_TO_DEVICE);
1324
1325	tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1326				  IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1327				  PAGE_SIZE,
1328				  DMA_PREP_INTERRUPT |
1329				  DMA_CTRL_ACK);
1330
1331	cookie = iop_adma_tx_submit(tx);
1332	iop_adma_issue_pending(dma_chan);
1333	msleep(8);
1334
1335	if (iop_adma_status(dma_chan, cookie, NULL) !=
1336		DMA_SUCCESS) {
1337		dev_err(dev, "Self-test pq timed out, disabling\n");
1338		err = -ENODEV;
1339		goto free_resources;
1340	}
1341
1342	raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1343
1344	if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1345		   page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1346		dev_err(dev, "Self-test p failed compare, disabling\n");
1347		err = -ENODEV;
1348		goto free_resources;
1349	}
1350	if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1351		   page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1352		dev_err(dev, "Self-test q failed compare, disabling\n");
1353		err = -ENODEV;
1354		goto free_resources;
1355	}
1356
1357	/* test correct zero sum using the software generated pq values */
1358	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1359		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1360					 DMA_TO_DEVICE);
1361
1362	zero_sum_result = ~0;
1363	tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1364				      pq_src, IOP_ADMA_NUM_SRC_TEST,
1365				      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1366				      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1367
1368	cookie = iop_adma_tx_submit(tx);
1369	iop_adma_issue_pending(dma_chan);
1370	msleep(8);
1371
1372	if (iop_adma_status(dma_chan, cookie, NULL) !=
1373		DMA_SUCCESS) {
1374		dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1375		err = -ENODEV;
1376		goto free_resources;
1377	}
1378
1379	if (zero_sum_result != 0) {
1380		dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1381			zero_sum_result);
1382		err = -ENODEV;
1383		goto free_resources;
1384	}
1385
1386	/* test incorrect zero sum */
1387	i = IOP_ADMA_NUM_SRC_TEST;
1388	memset(pq_sw[i] + 100, 0, 100);
1389	memset(pq_sw[i+1] + 200, 0, 200);
1390	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1391		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1392					 DMA_TO_DEVICE);
1393
1394	zero_sum_result = 0;
1395	tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1396				      pq_src, IOP_ADMA_NUM_SRC_TEST,
1397				      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1398				      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1399
1400	cookie = iop_adma_tx_submit(tx);
1401	iop_adma_issue_pending(dma_chan);
1402	msleep(8);
1403
1404	if (iop_adma_status(dma_chan, cookie, NULL) !=
1405		DMA_SUCCESS) {
1406		dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1407		err = -ENODEV;
1408		goto free_resources;
1409	}
1410
1411	if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1412		dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1413			zero_sum_result);
1414		err = -ENODEV;
1415		goto free_resources;
1416	}
1417
1418free_resources:
1419	iop_adma_free_chan_resources(dma_chan);
1420out:
1421	i = ARRAY_SIZE(pq);
1422	while (i--)
1423		__free_page(pq[i]);
1424	return err;
1425}
1426#endif
1427
1428static int __devexit iop_adma_remove(struct platform_device *dev)
1429{
1430	struct iop_adma_device *device = platform_get_drvdata(dev);
1431	struct dma_chan *chan, *_chan;
1432	struct iop_adma_chan *iop_chan;
1433	struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
1434
1435	dma_async_device_unregister(&device->common);
1436
1437	dma_free_coherent(&dev->dev, plat_data->pool_size,
1438			device->dma_desc_pool_virt, device->dma_desc_pool);
1439
1440	list_for_each_entry_safe(chan, _chan, &device->common.channels,
1441				device_node) {
1442		iop_chan = to_iop_adma_chan(chan);
1443		list_del(&chan->device_node);
1444		kfree(iop_chan);
1445	}
1446	kfree(device);
1447
1448	return 0;
1449}
1450
1451static int __devinit iop_adma_probe(struct platform_device *pdev)
1452{
1453	struct resource *res;
1454	int ret = 0, i;
1455	struct iop_adma_device *adev;
1456	struct iop_adma_chan *iop_chan;
1457	struct dma_device *dma_dev;
1458	struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
1459
1460	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1461	if (!res)
1462		return -ENODEV;
1463
1464	if (!devm_request_mem_region(&pdev->dev, res->start,
1465				resource_size(res), pdev->name))
1466		return -EBUSY;
1467
1468	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1469	if (!adev)
1470		return -ENOMEM;
1471	dma_dev = &adev->common;
1472
1473	/* allocate coherent memory for hardware descriptors
1474	 * note: writecombine gives slightly better performance, but
1475	 * requires that we explicitly flush the writes
1476	 */
1477	if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1478					plat_data->pool_size,
1479					&adev->dma_desc_pool,
1480					GFP_KERNEL)) == NULL) {
1481		ret = -ENOMEM;
1482		goto err_free_adev;
1483	}
1484
1485	dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
1486		__func__, adev->dma_desc_pool_virt,
1487		(void *) adev->dma_desc_pool);
1488
1489	adev->id = plat_data->hw_id;
1490
1491	/* discover transaction capabilites from the platform data */
1492	dma_dev->cap_mask = plat_data->cap_mask;
1493
1494	adev->pdev = pdev;
1495	platform_set_drvdata(pdev, adev);
1496
1497	INIT_LIST_HEAD(&dma_dev->channels);
1498
1499	/* set base routines */
1500	dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1501	dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1502	dma_dev->device_tx_status = iop_adma_status;
1503	dma_dev->device_issue_pending = iop_adma_issue_pending;
1504	dma_dev->dev = &pdev->dev;
1505
1506	/* set prep routines based on capability */
1507	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1508		dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1509	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1510		dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
1511	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1512		dma_dev->max_xor = iop_adma_get_max_xor();
1513		dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1514	}
1515	if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1516		dma_dev->device_prep_dma_xor_val =
1517			iop_adma_prep_dma_xor_val;
1518	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1519		dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1520		dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1521	}
1522	if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1523		dma_dev->device_prep_dma_pq_val =
1524			iop_adma_prep_dma_pq_val;
1525	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1526		dma_dev->device_prep_dma_interrupt =
1527			iop_adma_prep_dma_interrupt;
1528
1529	iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1530	if (!iop_chan) {
1531		ret = -ENOMEM;
1532		goto err_free_dma;
1533	}
1534	iop_chan->device = adev;
1535
1536	iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1537					resource_size(res));
1538	if (!iop_chan->mmr_base) {
1539		ret = -ENOMEM;
1540		goto err_free_iop_chan;
1541	}
1542	tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1543		iop_chan);
1544
1545	/* clear errors before enabling interrupts */
1546	iop_adma_device_clear_err_status(iop_chan);
1547
1548	for (i = 0; i < 3; i++) {
1549		irq_handler_t handler[] = { iop_adma_eot_handler,
1550					iop_adma_eoc_handler,
1551					iop_adma_err_handler };
1552		int irq = platform_get_irq(pdev, i);
1553		if (irq < 0) {
1554			ret = -ENXIO;
1555			goto err_free_iop_chan;
1556		} else {
1557			ret = devm_request_irq(&pdev->dev, irq,
1558					handler[i], 0, pdev->name, iop_chan);
1559			if (ret)
1560				goto err_free_iop_chan;
1561		}
1562	}
1563
1564	spin_lock_init(&iop_chan->lock);
1565	INIT_LIST_HEAD(&iop_chan->chain);
1566	INIT_LIST_HEAD(&iop_chan->all_slots);
1567	iop_chan->common.device = dma_dev;
1568	list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1569
1570	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1571		ret = iop_adma_memcpy_self_test(adev);
1572		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1573		if (ret)
1574			goto err_free_iop_chan;
1575	}
1576
1577	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
1578	    dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1579		ret = iop_adma_xor_val_self_test(adev);
1580		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1581		if (ret)
1582			goto err_free_iop_chan;
1583	}
1584
1585	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1586	    dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1587		#ifdef CONFIG_RAID6_PQ
1588		ret = iop_adma_pq_zero_sum_self_test(adev);
1589		dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1590		#else
1591		/* can not test raid6, so do not publish capability */
1592		dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1593		dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1594		ret = 0;
1595		#endif
1596		if (ret)
1597			goto err_free_iop_chan;
1598	}
1599
1600	dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
1601	  "( %s%s%s%s%s%s%s)\n",
1602	  dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1603	  dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1604	  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1605	  dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1606	  dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
1607	  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1608	  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1609
1610	dma_async_device_register(dma_dev);
1611	goto out;
1612
1613 err_free_iop_chan:
1614	kfree(iop_chan);
1615 err_free_dma:
1616	dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1617			adev->dma_desc_pool_virt, adev->dma_desc_pool);
1618 err_free_adev:
1619	kfree(adev);
1620 out:
1621	return ret;
1622}
1623
1624static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1625{
1626	struct iop_adma_desc_slot *sw_desc, *grp_start;
1627	dma_cookie_t cookie;
1628	int slot_cnt, slots_per_op;
1629
1630	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1631
1632	spin_lock_bh(&iop_chan->lock);
1633	slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1634	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1635	if (sw_desc) {
1636		grp_start = sw_desc->group_head;
1637
1638		list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1639		async_tx_ack(&sw_desc->async_tx);
1640		iop_desc_init_memcpy(grp_start, 0);
1641		iop_desc_set_byte_count(grp_start, iop_chan, 0);
1642		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1643		iop_desc_set_memcpy_src_addr(grp_start, 0);
1644
1645		cookie = iop_chan->common.cookie;
1646		cookie++;
1647		if (cookie <= 1)
1648			cookie = 2;
1649
1650		/* initialize the completed cookie to be less than
1651		 * the most recently used cookie
1652		 */
1653		iop_chan->completed_cookie = cookie - 1;
1654		iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1655
1656		/* channel should not be busy */
1657		BUG_ON(iop_chan_is_busy(iop_chan));
1658
1659		/* clear any prior error-status bits */
1660		iop_adma_device_clear_err_status(iop_chan);
1661
1662		/* disable operation */
1663		iop_chan_disable(iop_chan);
1664
1665		/* set the descriptor address */
1666		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1667
1668		/* 1/ don't add pre-chained descriptors
1669		 * 2/ dummy read to flush next_desc write
1670		 */
1671		BUG_ON(iop_desc_get_next_desc(sw_desc));
1672
1673		/* run the descriptor */
1674		iop_chan_enable(iop_chan);
1675	} else
1676		dev_printk(KERN_ERR, iop_chan->device->common.dev,
1677			 "failed to allocate null descriptor\n");
1678	spin_unlock_bh(&iop_chan->lock);
1679}
1680
1681static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1682{
1683	struct iop_adma_desc_slot *sw_desc, *grp_start;
1684	dma_cookie_t cookie;
1685	int slot_cnt, slots_per_op;
1686
1687	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1688
1689	spin_lock_bh(&iop_chan->lock);
1690	slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1691	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1692	if (sw_desc) {
1693		grp_start = sw_desc->group_head;
1694		list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1695		async_tx_ack(&sw_desc->async_tx);
1696		iop_desc_init_null_xor(grp_start, 2, 0);
1697		iop_desc_set_byte_count(grp_start, iop_chan, 0);
1698		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1699		iop_desc_set_xor_src_addr(grp_start, 0, 0);
1700		iop_desc_set_xor_src_addr(grp_start, 1, 0);
1701
1702		cookie = iop_chan->common.cookie;
1703		cookie++;
1704		if (cookie <= 1)
1705			cookie = 2;
1706
1707		/* initialize the completed cookie to be less than
1708		 * the most recently used cookie
1709		 */
1710		iop_chan->completed_cookie = cookie - 1;
1711		iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1712
1713		/* channel should not be busy */
1714		BUG_ON(iop_chan_is_busy(iop_chan));
1715
1716		/* clear any prior error-status bits */
1717		iop_adma_device_clear_err_status(iop_chan);
1718
1719		/* disable operation */
1720		iop_chan_disable(iop_chan);
1721
1722		/* set the descriptor address */
1723		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1724
1725		/* 1/ don't add pre-chained descriptors
1726		 * 2/ dummy read to flush next_desc write
1727		 */
1728		BUG_ON(iop_desc_get_next_desc(sw_desc));
1729
1730		/* run the descriptor */
1731		iop_chan_enable(iop_chan);
1732	} else
1733		dev_printk(KERN_ERR, iop_chan->device->common.dev,
1734			"failed to allocate null descriptor\n");
1735	spin_unlock_bh(&iop_chan->lock);
1736}
1737
1738MODULE_ALIAS("platform:iop-adma");
1739
1740static struct platform_driver iop_adma_driver = {
1741	.probe		= iop_adma_probe,
1742	.remove		= __devexit_p(iop_adma_remove),
1743	.driver		= {
1744		.owner	= THIS_MODULE,
1745		.name	= "iop-adma",
1746	},
1747};
1748
1749static int __init iop_adma_init (void)
1750{
1751	return platform_driver_register(&iop_adma_driver);
1752}
1753
1754static void __exit iop_adma_exit (void)
1755{
1756	platform_driver_unregister(&iop_adma_driver);
1757	return;
1758}
1759module_exit(iop_adma_exit);
1760module_init(iop_adma_init);
1761
1762MODULE_AUTHOR("Intel Corporation");
1763MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1764MODULE_LICENSE("GPL");