Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * offload engine driver for the Intel Xscale series of i/o processors
   4 * Copyright © 2006, Intel Corporation.
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7/*
   8 * This driver supports the asynchrounous DMA copy and RAID engines available
   9 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/delay.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/platform_device.h>
  19#include <linux/prefetch.h>
  20#include <linux/memory.h>
  21#include <linux/ioport.h>
  22#include <linux/raid/pq.h>
  23#include <linux/slab.h>
  24
  25#include "iop-adma.h"
 
  26#include "dmaengine.h"
  27
  28#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
  29#define to_iop_adma_device(dev) \
  30	container_of(dev, struct iop_adma_device, common)
  31#define tx_to_iop_adma_slot(tx) \
  32	container_of(tx, struct iop_adma_desc_slot, async_tx)
  33
  34/**
  35 * iop_adma_free_slots - flags descriptor slots for reuse
  36 * @slot: Slot to free
  37 * Caller must hold &iop_chan->lock while calling this function
  38 */
  39static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
  40{
  41	int stride = slot->slots_per_op;
  42
  43	while (stride--) {
  44		slot->slots_per_op = 0;
  45		slot = list_entry(slot->slot_node.next,
  46				struct iop_adma_desc_slot,
  47				slot_node);
  48	}
  49}
  50
  51static dma_cookie_t
  52iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
  53	struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
  54{
  55	struct dma_async_tx_descriptor *tx = &desc->async_tx;
  56
  57	BUG_ON(tx->cookie < 0);
  58	if (tx->cookie > 0) {
  59		cookie = tx->cookie;
  60		tx->cookie = 0;
  61
  62		/* call the callback (must not sleep or submit new
  63		 * operations to this channel)
  64		 */
  65		dmaengine_desc_get_callback_invoke(tx, NULL);
  66
  67		dma_descriptor_unmap(tx);
  68		if (desc->group_head)
  69			desc->group_head = NULL;
  70	}
  71
  72	/* run dependent operations */
  73	dma_run_dependencies(tx);
  74
  75	return cookie;
  76}
  77
  78static int
  79iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
  80	struct iop_adma_chan *iop_chan)
  81{
  82	/* the client is allowed to attach dependent operations
  83	 * until 'ack' is set
  84	 */
  85	if (!async_tx_test_ack(&desc->async_tx))
  86		return 0;
  87
  88	/* leave the last descriptor in the chain
  89	 * so we can append to it
  90	 */
  91	if (desc->chain_node.next == &iop_chan->chain)
  92		return 1;
  93
  94	dev_dbg(iop_chan->device->common.dev,
  95		"\tfree slot: %d slots_per_op: %d\n",
  96		desc->idx, desc->slots_per_op);
  97
  98	list_del(&desc->chain_node);
  99	iop_adma_free_slots(desc);
 100
 101	return 0;
 102}
 103
 104static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 105{
 106	struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
 107	dma_cookie_t cookie = 0;
 108	u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
 109	int busy = iop_chan_is_busy(iop_chan);
 110	int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
 111
 112	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
 113	/* free completed slots from the chain starting with
 114	 * the oldest descriptor
 115	 */
 116	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 117					chain_node) {
 118		pr_debug("\tcookie: %d slot: %d busy: %d "
 119			"this_desc: %pad next_desc: %#llx ack: %d\n",
 120			iter->async_tx.cookie, iter->idx, busy,
 121			&iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
 122			async_tx_test_ack(&iter->async_tx));
 123		prefetch(_iter);
 124		prefetch(&_iter->async_tx);
 125
 126		/* do not advance past the current descriptor loaded into the
 127		 * hardware channel, subsequent descriptors are either in
 128		 * process or have not been submitted
 129		 */
 130		if (seen_current)
 131			break;
 132
 133		/* stop the search if we reach the current descriptor and the
 134		 * channel is busy, or if it appears that the current descriptor
 135		 * needs to be re-read (i.e. has been appended to)
 136		 */
 137		if (iter->async_tx.phys == current_desc) {
 138			BUG_ON(seen_current++);
 139			if (busy || iop_desc_get_next_desc(iter))
 140				break;
 141		}
 142
 143		/* detect the start of a group transaction */
 144		if (!slot_cnt && !slots_per_op) {
 145			slot_cnt = iter->slot_cnt;
 146			slots_per_op = iter->slots_per_op;
 147			if (slot_cnt <= slots_per_op) {
 148				slot_cnt = 0;
 149				slots_per_op = 0;
 150			}
 151		}
 152
 153		if (slot_cnt) {
 154			pr_debug("\tgroup++\n");
 155			if (!grp_start)
 156				grp_start = iter;
 157			slot_cnt -= slots_per_op;
 158		}
 159
 160		/* all the members of a group are complete */
 161		if (slots_per_op != 0 && slot_cnt == 0) {
 162			struct iop_adma_desc_slot *grp_iter, *_grp_iter;
 163			int end_of_chain = 0;
 164			pr_debug("\tgroup end\n");
 165
 166			/* collect the total results */
 167			if (grp_start->xor_check_result) {
 168				u32 zero_sum_result = 0;
 169				slot_cnt = grp_start->slot_cnt;
 170				grp_iter = grp_start;
 171
 172				list_for_each_entry_from(grp_iter,
 173					&iop_chan->chain, chain_node) {
 174					zero_sum_result |=
 175					    iop_desc_get_zero_result(grp_iter);
 176					    pr_debug("\titer%d result: %d\n",
 177					    grp_iter->idx, zero_sum_result);
 178					slot_cnt -= slots_per_op;
 179					if (slot_cnt == 0)
 180						break;
 181				}
 182				pr_debug("\tgrp_start->xor_check_result: %p\n",
 183					grp_start->xor_check_result);
 184				*grp_start->xor_check_result = zero_sum_result;
 185			}
 186
 187			/* clean up the group */
 188			slot_cnt = grp_start->slot_cnt;
 189			grp_iter = grp_start;
 190			list_for_each_entry_safe_from(grp_iter, _grp_iter,
 191				&iop_chan->chain, chain_node) {
 192				cookie = iop_adma_run_tx_complete_actions(
 193					grp_iter, iop_chan, cookie);
 194
 195				slot_cnt -= slots_per_op;
 196				end_of_chain = iop_adma_clean_slot(grp_iter,
 197					iop_chan);
 198
 199				if (slot_cnt == 0 || end_of_chain)
 200					break;
 201			}
 202
 203			/* the group should be complete at this point */
 204			BUG_ON(slot_cnt);
 205
 206			slots_per_op = 0;
 207			grp_start = NULL;
 208			if (end_of_chain)
 209				break;
 210			else
 211				continue;
 212		} else if (slots_per_op) /* wait for group completion */
 213			continue;
 214
 215		/* write back zero sum results (single descriptor case) */
 216		if (iter->xor_check_result && iter->async_tx.cookie)
 217			*iter->xor_check_result =
 218				iop_desc_get_zero_result(iter);
 219
 220		cookie = iop_adma_run_tx_complete_actions(
 221					iter, iop_chan, cookie);
 222
 223		if (iop_adma_clean_slot(iter, iop_chan))
 224			break;
 225	}
 226
 227	if (cookie > 0) {
 228		iop_chan->common.completed_cookie = cookie;
 229		pr_debug("\tcompleted cookie %d\n", cookie);
 230	}
 231}
 232
 233static void
 234iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 235{
 236	spin_lock_bh(&iop_chan->lock);
 237	__iop_adma_slot_cleanup(iop_chan);
 238	spin_unlock_bh(&iop_chan->lock);
 239}
 240
 241static void iop_adma_tasklet(unsigned long data)
 242{
 243	struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
 244
 245	/* lockdep will flag depedency submissions as potentially
 246	 * recursive locking, this is not the case as a dependency
 247	 * submission will never recurse a channels submit routine.
 248	 * There are checks in async_tx.c to prevent this.
 249	 */
 250	spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
 251	__iop_adma_slot_cleanup(iop_chan);
 252	spin_unlock(&iop_chan->lock);
 253}
 254
 255static struct iop_adma_desc_slot *
 256iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
 257			int slots_per_op)
 258{
 259	struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
 260	LIST_HEAD(chain);
 261	int slots_found, retry = 0;
 262
 263	/* start search from the last allocated descrtiptor
 264	 * if a contiguous allocation can not be found start searching
 265	 * from the beginning of the list
 266	 */
 267retry:
 268	slots_found = 0;
 269	if (retry == 0)
 270		iter = iop_chan->last_used;
 271	else
 272		iter = list_entry(&iop_chan->all_slots,
 273			struct iop_adma_desc_slot,
 274			slot_node);
 275
 276	list_for_each_entry_safe_continue(
 277		iter, _iter, &iop_chan->all_slots, slot_node) {
 278		prefetch(_iter);
 279		prefetch(&_iter->async_tx);
 280		if (iter->slots_per_op) {
 281			/* give up after finding the first busy slot
 282			 * on the second pass through the list
 283			 */
 284			if (retry)
 285				break;
 286
 287			slots_found = 0;
 288			continue;
 289		}
 290
 291		/* start the allocation if the slot is correctly aligned */
 292		if (!slots_found++) {
 293			if (iop_desc_is_aligned(iter, slots_per_op))
 294				alloc_start = iter;
 295			else {
 296				slots_found = 0;
 297				continue;
 298			}
 299		}
 300
 301		if (slots_found == num_slots) {
 302			struct iop_adma_desc_slot *alloc_tail = NULL;
 303			struct iop_adma_desc_slot *last_used = NULL;
 304			iter = alloc_start;
 305			while (num_slots) {
 306				int i;
 307				dev_dbg(iop_chan->device->common.dev,
 308					"allocated slot: %d "
 309					"(desc %p phys: %#llx) slots_per_op %d\n",
 310					iter->idx, iter->hw_desc,
 311					(u64)iter->async_tx.phys, slots_per_op);
 312
 313				/* pre-ack all but the last descriptor */
 314				if (num_slots != slots_per_op)
 315					async_tx_ack(&iter->async_tx);
 316
 317				list_add_tail(&iter->chain_node, &chain);
 318				alloc_tail = iter;
 319				iter->async_tx.cookie = 0;
 320				iter->slot_cnt = num_slots;
 321				iter->xor_check_result = NULL;
 322				for (i = 0; i < slots_per_op; i++) {
 323					iter->slots_per_op = slots_per_op - i;
 324					last_used = iter;
 325					iter = list_entry(iter->slot_node.next,
 326						struct iop_adma_desc_slot,
 327						slot_node);
 328				}
 329				num_slots -= slots_per_op;
 330			}
 331			alloc_tail->group_head = alloc_start;
 332			alloc_tail->async_tx.cookie = -EBUSY;
 333			list_splice(&chain, &alloc_tail->tx_list);
 334			iop_chan->last_used = last_used;
 335			iop_desc_clear_next_desc(alloc_start);
 336			iop_desc_clear_next_desc(alloc_tail);
 337			return alloc_tail;
 338		}
 339	}
 340	if (!retry++)
 341		goto retry;
 342
 343	/* perform direct reclaim if the allocation fails */
 344	__iop_adma_slot_cleanup(iop_chan);
 345
 346	return NULL;
 347}
 348
 349static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
 350{
 351	dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
 352		iop_chan->pending);
 353
 354	if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
 355		iop_chan->pending = 0;
 356		iop_chan_append(iop_chan);
 357	}
 358}
 359
 360static dma_cookie_t
 361iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
 362{
 363	struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
 364	struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
 365	struct iop_adma_desc_slot *grp_start, *old_chain_tail;
 366	int slot_cnt;
 
 367	dma_cookie_t cookie;
 368	dma_addr_t next_dma;
 369
 370	grp_start = sw_desc->group_head;
 371	slot_cnt = grp_start->slot_cnt;
 
 372
 373	spin_lock_bh(&iop_chan->lock);
 374	cookie = dma_cookie_assign(tx);
 375
 376	old_chain_tail = list_entry(iop_chan->chain.prev,
 377		struct iop_adma_desc_slot, chain_node);
 378	list_splice_init(&sw_desc->tx_list,
 379			 &old_chain_tail->chain_node);
 380
 381	/* fix up the hardware chain */
 382	next_dma = grp_start->async_tx.phys;
 383	iop_desc_set_next_desc(old_chain_tail, next_dma);
 384	BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
 385
 386	/* check for pre-chained descriptors */
 387	iop_paranoia(iop_desc_get_next_desc(sw_desc));
 388
 389	/* increment the pending count by the number of slots
 390	 * memcpy operations have a 1:1 (slot:operation) relation
 391	 * other operations are heavier and will pop the threshold
 392	 * more often.
 393	 */
 394	iop_chan->pending += slot_cnt;
 395	iop_adma_check_threshold(iop_chan);
 396	spin_unlock_bh(&iop_chan->lock);
 397
 398	dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
 399		__func__, sw_desc->async_tx.cookie, sw_desc->idx);
 400
 401	return cookie;
 402}
 403
 404static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
 405static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
 406
 407/**
 408 * iop_adma_alloc_chan_resources -  returns the number of allocated descriptors
 409 * @chan - allocate descriptor resources for this channel
 410 * @client - current client requesting the channel be ready for requests
 411 *
 412 * Note: We keep the slots for 1 operation on iop_chan->chain at all times.  To
 413 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
 414 * greater than 2x the number slots needed to satisfy a device->max_xor
 415 * request.
 416 * */
 417static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
 418{
 419	char *hw_desc;
 420	int idx;
 421	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 422	struct iop_adma_desc_slot *slot = NULL;
 423	int init = iop_chan->slots_allocated ? 0 : 1;
 424	struct iop_adma_platform_data *plat_data =
 425		dev_get_platdata(&iop_chan->device->pdev->dev);
 426	int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
 427
 428	/* Allocate descriptor slots */
 429	do {
 430		idx = iop_chan->slots_allocated;
 431		if (idx == num_descs_in_pool)
 432			break;
 433
 434		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 435		if (!slot) {
 436			printk(KERN_INFO "IOP ADMA Channel only initialized"
 437				" %d descriptor slots", idx);
 438			break;
 439		}
 440		hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
 441		slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 442
 443		dma_async_tx_descriptor_init(&slot->async_tx, chan);
 444		slot->async_tx.tx_submit = iop_adma_tx_submit;
 445		INIT_LIST_HEAD(&slot->tx_list);
 446		INIT_LIST_HEAD(&slot->chain_node);
 447		INIT_LIST_HEAD(&slot->slot_node);
 448		hw_desc = (char *) iop_chan->device->dma_desc_pool;
 449		slot->async_tx.phys =
 450			(dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 451		slot->idx = idx;
 452
 453		spin_lock_bh(&iop_chan->lock);
 454		iop_chan->slots_allocated++;
 455		list_add_tail(&slot->slot_node, &iop_chan->all_slots);
 456		spin_unlock_bh(&iop_chan->lock);
 457	} while (iop_chan->slots_allocated < num_descs_in_pool);
 458
 459	if (idx && !iop_chan->last_used)
 460		iop_chan->last_used = list_entry(iop_chan->all_slots.next,
 461					struct iop_adma_desc_slot,
 462					slot_node);
 463
 464	dev_dbg(iop_chan->device->common.dev,
 465		"allocated %d descriptor slots last_used: %p\n",
 466		iop_chan->slots_allocated, iop_chan->last_used);
 467
 468	/* initialize the channel and the chain with a null operation */
 469	if (init) {
 470		if (dma_has_cap(DMA_MEMCPY,
 471			iop_chan->device->common.cap_mask))
 472			iop_chan_start_null_memcpy(iop_chan);
 473		else if (dma_has_cap(DMA_XOR,
 474			iop_chan->device->common.cap_mask))
 475			iop_chan_start_null_xor(iop_chan);
 476		else
 477			BUG();
 478	}
 479
 480	return (idx > 0) ? idx : -ENOMEM;
 481}
 482
 483static struct dma_async_tx_descriptor *
 484iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 485{
 486	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 487	struct iop_adma_desc_slot *sw_desc, *grp_start;
 488	int slot_cnt, slots_per_op;
 489
 490	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
 491
 492	spin_lock_bh(&iop_chan->lock);
 493	slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
 494	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 495	if (sw_desc) {
 496		grp_start = sw_desc->group_head;
 497		iop_desc_init_interrupt(grp_start, iop_chan);
 498		sw_desc->async_tx.flags = flags;
 499	}
 500	spin_unlock_bh(&iop_chan->lock);
 501
 502	return sw_desc ? &sw_desc->async_tx : NULL;
 503}
 504
 505static struct dma_async_tx_descriptor *
 506iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
 507			 dma_addr_t dma_src, size_t len, unsigned long flags)
 508{
 509	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 510	struct iop_adma_desc_slot *sw_desc, *grp_start;
 511	int slot_cnt, slots_per_op;
 512
 513	if (unlikely(!len))
 514		return NULL;
 515	BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
 516
 517	dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
 518		__func__, len);
 519
 520	spin_lock_bh(&iop_chan->lock);
 521	slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
 522	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 523	if (sw_desc) {
 524		grp_start = sw_desc->group_head;
 525		iop_desc_init_memcpy(grp_start, flags);
 526		iop_desc_set_byte_count(grp_start, iop_chan, len);
 527		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 528		iop_desc_set_memcpy_src_addr(grp_start, dma_src);
 529		sw_desc->async_tx.flags = flags;
 530	}
 531	spin_unlock_bh(&iop_chan->lock);
 532
 533	return sw_desc ? &sw_desc->async_tx : NULL;
 534}
 535
 536static struct dma_async_tx_descriptor *
 537iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
 538		      dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
 539		      unsigned long flags)
 540{
 541	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 542	struct iop_adma_desc_slot *sw_desc, *grp_start;
 543	int slot_cnt, slots_per_op;
 544
 545	if (unlikely(!len))
 546		return NULL;
 547	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 548
 549	dev_dbg(iop_chan->device->common.dev,
 550		"%s src_cnt: %d len: %zu flags: %lx\n",
 551		__func__, src_cnt, len, flags);
 552
 553	spin_lock_bh(&iop_chan->lock);
 554	slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
 555	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 556	if (sw_desc) {
 557		grp_start = sw_desc->group_head;
 558		iop_desc_init_xor(grp_start, src_cnt, flags);
 559		iop_desc_set_byte_count(grp_start, iop_chan, len);
 560		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 561		sw_desc->async_tx.flags = flags;
 562		while (src_cnt--)
 563			iop_desc_set_xor_src_addr(grp_start, src_cnt,
 564						  dma_src[src_cnt]);
 565	}
 566	spin_unlock_bh(&iop_chan->lock);
 567
 568	return sw_desc ? &sw_desc->async_tx : NULL;
 569}
 570
 571static struct dma_async_tx_descriptor *
 572iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
 573			  unsigned int src_cnt, size_t len, u32 *result,
 574			  unsigned long flags)
 575{
 576	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 577	struct iop_adma_desc_slot *sw_desc, *grp_start;
 578	int slot_cnt, slots_per_op;
 579
 580	if (unlikely(!len))
 581		return NULL;
 582
 583	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
 584		__func__, src_cnt, len);
 585
 586	spin_lock_bh(&iop_chan->lock);
 587	slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
 588	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 589	if (sw_desc) {
 590		grp_start = sw_desc->group_head;
 591		iop_desc_init_zero_sum(grp_start, src_cnt, flags);
 592		iop_desc_set_zero_sum_byte_count(grp_start, len);
 593		grp_start->xor_check_result = result;
 594		pr_debug("\t%s: grp_start->xor_check_result: %p\n",
 595			__func__, grp_start->xor_check_result);
 596		sw_desc->async_tx.flags = flags;
 597		while (src_cnt--)
 598			iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
 599						       dma_src[src_cnt]);
 600	}
 601	spin_unlock_bh(&iop_chan->lock);
 602
 603	return sw_desc ? &sw_desc->async_tx : NULL;
 604}
 605
 606static struct dma_async_tx_descriptor *
 607iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
 608		     unsigned int src_cnt, const unsigned char *scf, size_t len,
 609		     unsigned long flags)
 610{
 611	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 612	struct iop_adma_desc_slot *sw_desc, *g;
 613	int slot_cnt, slots_per_op;
 614	int continue_srcs;
 615
 616	if (unlikely(!len))
 617		return NULL;
 618	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 619
 620	dev_dbg(iop_chan->device->common.dev,
 621		"%s src_cnt: %d len: %zu flags: %lx\n",
 622		__func__, src_cnt, len, flags);
 623
 624	if (dmaf_p_disabled_continue(flags))
 625		continue_srcs = 1+src_cnt;
 626	else if (dmaf_continue(flags))
 627		continue_srcs = 3+src_cnt;
 628	else
 629		continue_srcs = 0+src_cnt;
 630
 631	spin_lock_bh(&iop_chan->lock);
 632	slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
 633	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 634	if (sw_desc) {
 635		int i;
 636
 637		g = sw_desc->group_head;
 638		iop_desc_set_byte_count(g, iop_chan, len);
 639
 640		/* even if P is disabled its destination address (bits
 641		 * [3:0]) must match Q.  It is ok if P points to an
 642		 * invalid address, it won't be written.
 643		 */
 644		if (flags & DMA_PREP_PQ_DISABLE_P)
 645			dst[0] = dst[1] & 0x7;
 646
 647		iop_desc_set_pq_addr(g, dst);
 648		sw_desc->async_tx.flags = flags;
 649		for (i = 0; i < src_cnt; i++)
 650			iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
 651
 652		/* if we are continuing a previous operation factor in
 653		 * the old p and q values, see the comment for dma_maxpq
 654		 * in include/linux/dmaengine.h
 655		 */
 656		if (dmaf_p_disabled_continue(flags))
 657			iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
 658		else if (dmaf_continue(flags)) {
 659			iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
 660			iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
 661			iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
 662		}
 663		iop_desc_init_pq(g, i, flags);
 664	}
 665	spin_unlock_bh(&iop_chan->lock);
 666
 667	return sw_desc ? &sw_desc->async_tx : NULL;
 668}
 669
 670static struct dma_async_tx_descriptor *
 671iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
 672			 unsigned int src_cnt, const unsigned char *scf,
 673			 size_t len, enum sum_check_flags *pqres,
 674			 unsigned long flags)
 675{
 676	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 677	struct iop_adma_desc_slot *sw_desc, *g;
 678	int slot_cnt, slots_per_op;
 679
 680	if (unlikely(!len))
 681		return NULL;
 682	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 683
 684	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
 685		__func__, src_cnt, len);
 686
 687	spin_lock_bh(&iop_chan->lock);
 688	slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
 689	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 690	if (sw_desc) {
 691		/* for validate operations p and q are tagged onto the
 692		 * end of the source list
 693		 */
 694		int pq_idx = src_cnt;
 695
 696		g = sw_desc->group_head;
 697		iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
 698		iop_desc_set_pq_zero_sum_byte_count(g, len);
 699		g->pq_check_result = pqres;
 700		pr_debug("\t%s: g->pq_check_result: %p\n",
 701			__func__, g->pq_check_result);
 702		sw_desc->async_tx.flags = flags;
 703		while (src_cnt--)
 704			iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
 705							  src[src_cnt],
 706							  scf[src_cnt]);
 707		iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
 708	}
 709	spin_unlock_bh(&iop_chan->lock);
 710
 711	return sw_desc ? &sw_desc->async_tx : NULL;
 712}
 713
 714static void iop_adma_free_chan_resources(struct dma_chan *chan)
 715{
 716	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 717	struct iop_adma_desc_slot *iter, *_iter;
 718	int in_use_descs = 0;
 719
 720	iop_adma_slot_cleanup(iop_chan);
 721
 722	spin_lock_bh(&iop_chan->lock);
 723	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 724					chain_node) {
 725		in_use_descs++;
 726		list_del(&iter->chain_node);
 727	}
 728	list_for_each_entry_safe_reverse(
 729		iter, _iter, &iop_chan->all_slots, slot_node) {
 730		list_del(&iter->slot_node);
 731		kfree(iter);
 732		iop_chan->slots_allocated--;
 733	}
 734	iop_chan->last_used = NULL;
 735
 736	dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
 737		__func__, iop_chan->slots_allocated);
 738	spin_unlock_bh(&iop_chan->lock);
 739
 740	/* one is ok since we left it on there on purpose */
 741	if (in_use_descs > 1)
 742		printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
 743			in_use_descs - 1);
 744}
 745
 746/**
 747 * iop_adma_status - poll the status of an ADMA transaction
 748 * @chan: ADMA channel handle
 749 * @cookie: ADMA transaction identifier
 750 * @txstate: a holder for the current state of the channel or NULL
 751 */
 752static enum dma_status iop_adma_status(struct dma_chan *chan,
 753					dma_cookie_t cookie,
 754					struct dma_tx_state *txstate)
 755{
 756	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 757	int ret;
 758
 759	ret = dma_cookie_status(chan, cookie, txstate);
 760	if (ret == DMA_COMPLETE)
 761		return ret;
 762
 763	iop_adma_slot_cleanup(iop_chan);
 764
 765	return dma_cookie_status(chan, cookie, txstate);
 766}
 767
 768static irqreturn_t iop_adma_eot_handler(int irq, void *data)
 769{
 770	struct iop_adma_chan *chan = data;
 771
 772	dev_dbg(chan->device->common.dev, "%s\n", __func__);
 773
 774	tasklet_schedule(&chan->irq_tasklet);
 775
 776	iop_adma_device_clear_eot_status(chan);
 777
 778	return IRQ_HANDLED;
 779}
 780
 781static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
 782{
 783	struct iop_adma_chan *chan = data;
 784
 785	dev_dbg(chan->device->common.dev, "%s\n", __func__);
 786
 787	tasklet_schedule(&chan->irq_tasklet);
 788
 789	iop_adma_device_clear_eoc_status(chan);
 790
 791	return IRQ_HANDLED;
 792}
 793
 794static irqreturn_t iop_adma_err_handler(int irq, void *data)
 795{
 796	struct iop_adma_chan *chan = data;
 797	unsigned long status = iop_chan_get_status(chan);
 798
 799	dev_err(chan->device->common.dev,
 800		"error ( %s%s%s%s%s%s%s)\n",
 801		iop_is_err_int_parity(status, chan) ? "int_parity " : "",
 802		iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
 803		iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
 804		iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
 805		iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
 806		iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
 807		iop_is_err_split_tx(status, chan) ? "split_tx " : "");
 808
 809	iop_adma_device_clear_err_status(chan);
 810
 811	BUG();
 812
 813	return IRQ_HANDLED;
 814}
 815
 816static void iop_adma_issue_pending(struct dma_chan *chan)
 817{
 818	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 819
 820	if (iop_chan->pending) {
 821		iop_chan->pending = 0;
 822		iop_chan_append(iop_chan);
 823	}
 824}
 825
 826/*
 827 * Perform a transaction to verify the HW works.
 828 */
 829#define IOP_ADMA_TEST_SIZE 2000
 830
 831static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
 832{
 833	int i;
 834	void *src, *dest;
 835	dma_addr_t src_dma, dest_dma;
 836	struct dma_chan *dma_chan;
 837	dma_cookie_t cookie;
 838	struct dma_async_tx_descriptor *tx;
 839	int err = 0;
 840	struct iop_adma_chan *iop_chan;
 841
 842	dev_dbg(device->common.dev, "%s\n", __func__);
 843
 844	src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
 845	if (!src)
 846		return -ENOMEM;
 847	dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
 848	if (!dest) {
 849		kfree(src);
 850		return -ENOMEM;
 851	}
 852
 853	/* Fill in src buffer */
 854	for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
 855		((u8 *) src)[i] = (u8)i;
 856
 857	/* Start copy, using first DMA channel */
 858	dma_chan = container_of(device->common.channels.next,
 859				struct dma_chan,
 860				device_node);
 861	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
 862		err = -ENODEV;
 863		goto out;
 864	}
 865
 866	dest_dma = dma_map_single(dma_chan->device->dev, dest,
 867				IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
 868	src_dma = dma_map_single(dma_chan->device->dev, src,
 869				IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
 870	tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
 871				      IOP_ADMA_TEST_SIZE,
 872				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 873
 874	cookie = iop_adma_tx_submit(tx);
 875	iop_adma_issue_pending(dma_chan);
 876	msleep(1);
 877
 878	if (iop_adma_status(dma_chan, cookie, NULL) !=
 879			DMA_COMPLETE) {
 880		dev_err(dma_chan->device->dev,
 881			"Self-test copy timed out, disabling\n");
 882		err = -ENODEV;
 883		goto free_resources;
 884	}
 885
 886	iop_chan = to_iop_adma_chan(dma_chan);
 887	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
 888		IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
 889	if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
 890		dev_err(dma_chan->device->dev,
 891			"Self-test copy failed compare, disabling\n");
 892		err = -ENODEV;
 893		goto free_resources;
 894	}
 895
 896free_resources:
 897	iop_adma_free_chan_resources(dma_chan);
 898out:
 899	kfree(src);
 900	kfree(dest);
 901	return err;
 902}
 903
 904#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
 905static int
 906iop_adma_xor_val_self_test(struct iop_adma_device *device)
 907{
 908	int i, src_idx;
 909	struct page *dest;
 910	struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
 911	struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
 912	dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
 913	dma_addr_t dest_dma;
 914	struct dma_async_tx_descriptor *tx;
 915	struct dma_chan *dma_chan;
 916	dma_cookie_t cookie;
 917	u8 cmp_byte = 0;
 918	u32 cmp_word;
 919	u32 zero_sum_result;
 920	int err = 0;
 921	struct iop_adma_chan *iop_chan;
 922
 923	dev_dbg(device->common.dev, "%s\n", __func__);
 924
 925	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
 926		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
 927		if (!xor_srcs[src_idx]) {
 928			while (src_idx--)
 929				__free_page(xor_srcs[src_idx]);
 930			return -ENOMEM;
 931		}
 932	}
 933
 934	dest = alloc_page(GFP_KERNEL);
 935	if (!dest) {
 936		while (src_idx--)
 937			__free_page(xor_srcs[src_idx]);
 938		return -ENOMEM;
 939	}
 940
 941	/* Fill in src buffers */
 942	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
 943		u8 *ptr = page_address(xor_srcs[src_idx]);
 944		for (i = 0; i < PAGE_SIZE; i++)
 945			ptr[i] = (1 << src_idx);
 946	}
 947
 948	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
 949		cmp_byte ^= (u8) (1 << src_idx);
 950
 951	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
 952			(cmp_byte << 8) | cmp_byte;
 953
 954	memset(page_address(dest), 0, PAGE_SIZE);
 955
 956	dma_chan = container_of(device->common.channels.next,
 957				struct dma_chan,
 958				device_node);
 959	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
 960		err = -ENODEV;
 961		goto out;
 962	}
 963
 964	/* test xor */
 965	dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
 966				PAGE_SIZE, DMA_FROM_DEVICE);
 967	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
 968		dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
 969					   0, PAGE_SIZE, DMA_TO_DEVICE);
 970	tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
 971				   IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
 972				   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 973
 974	cookie = iop_adma_tx_submit(tx);
 975	iop_adma_issue_pending(dma_chan);
 976	msleep(8);
 977
 978	if (iop_adma_status(dma_chan, cookie, NULL) !=
 979		DMA_COMPLETE) {
 980		dev_err(dma_chan->device->dev,
 981			"Self-test xor timed out, disabling\n");
 982		err = -ENODEV;
 983		goto free_resources;
 984	}
 985
 986	iop_chan = to_iop_adma_chan(dma_chan);
 987	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
 988		PAGE_SIZE, DMA_FROM_DEVICE);
 989	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
 990		u32 *ptr = page_address(dest);
 991		if (ptr[i] != cmp_word) {
 992			dev_err(dma_chan->device->dev,
 993				"Self-test xor failed compare, disabling\n");
 994			err = -ENODEV;
 995			goto free_resources;
 996		}
 997	}
 998	dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
 999		PAGE_SIZE, DMA_TO_DEVICE);
1000
1001	/* skip zero sum if the capability is not present */
1002	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1003		goto free_resources;
1004
1005	/* zero sum the sources with the destintation page */
1006	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1007		zero_sum_srcs[i] = xor_srcs[i];
1008	zero_sum_srcs[i] = dest;
1009
1010	zero_sum_result = 1;
1011
1012	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1013		dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1014					   zero_sum_srcs[i], 0, PAGE_SIZE,
1015					   DMA_TO_DEVICE);
1016	tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1017				       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1018				       &zero_sum_result,
1019				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1020
1021	cookie = iop_adma_tx_submit(tx);
1022	iop_adma_issue_pending(dma_chan);
1023	msleep(8);
1024
1025	if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1026		dev_err(dma_chan->device->dev,
1027			"Self-test zero sum timed out, disabling\n");
1028		err = -ENODEV;
1029		goto free_resources;
1030	}
1031
1032	if (zero_sum_result != 0) {
1033		dev_err(dma_chan->device->dev,
1034			"Self-test zero sum failed compare, disabling\n");
1035		err = -ENODEV;
1036		goto free_resources;
1037	}
1038
1039	/* test for non-zero parity sum */
1040	zero_sum_result = 0;
1041	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1042		dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1043					   zero_sum_srcs[i], 0, PAGE_SIZE,
1044					   DMA_TO_DEVICE);
1045	tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1046				       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1047				       &zero_sum_result,
1048				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1049
1050	cookie = iop_adma_tx_submit(tx);
1051	iop_adma_issue_pending(dma_chan);
1052	msleep(8);
1053
1054	if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1055		dev_err(dma_chan->device->dev,
1056			"Self-test non-zero sum timed out, disabling\n");
1057		err = -ENODEV;
1058		goto free_resources;
1059	}
1060
1061	if (zero_sum_result != 1) {
1062		dev_err(dma_chan->device->dev,
1063			"Self-test non-zero sum failed compare, disabling\n");
1064		err = -ENODEV;
1065		goto free_resources;
1066	}
1067
1068free_resources:
1069	iop_adma_free_chan_resources(dma_chan);
1070out:
1071	src_idx = IOP_ADMA_NUM_SRC_TEST;
1072	while (src_idx--)
1073		__free_page(xor_srcs[src_idx]);
1074	__free_page(dest);
1075	return err;
1076}
1077
1078#ifdef CONFIG_RAID6_PQ
1079static int
1080iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1081{
1082	/* combined sources, software pq results, and extra hw pq results */
1083	struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1084	/* ptr to the extra hw pq buffers defined above */
1085	struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1086	/* address conversion buffers (dma_map / page_address) */
1087	void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1088	dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1089	dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1090
1091	int i;
1092	struct dma_async_tx_descriptor *tx;
1093	struct dma_chan *dma_chan;
1094	dma_cookie_t cookie;
1095	u32 zero_sum_result;
1096	int err = 0;
1097	struct device *dev;
1098
1099	dev_dbg(device->common.dev, "%s\n", __func__);
1100
1101	for (i = 0; i < ARRAY_SIZE(pq); i++) {
1102		pq[i] = alloc_page(GFP_KERNEL);
1103		if (!pq[i]) {
1104			while (i--)
1105				__free_page(pq[i]);
1106			return -ENOMEM;
1107		}
1108	}
1109
1110	/* Fill in src buffers */
1111	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1112		pq_sw[i] = page_address(pq[i]);
1113		memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1114	}
1115	pq_sw[i] = page_address(pq[i]);
1116	pq_sw[i+1] = page_address(pq[i+1]);
1117
1118	dma_chan = container_of(device->common.channels.next,
1119				struct dma_chan,
1120				device_node);
1121	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1122		err = -ENODEV;
1123		goto out;
1124	}
1125
1126	dev = dma_chan->device->dev;
1127
1128	/* initialize the dests */
1129	memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1130	memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1131
1132	/* test pq */
1133	pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1134	pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1135	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1136		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1137					 DMA_TO_DEVICE);
1138
1139	tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1140				  IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1141				  PAGE_SIZE,
1142				  DMA_PREP_INTERRUPT |
1143				  DMA_CTRL_ACK);
1144
1145	cookie = iop_adma_tx_submit(tx);
1146	iop_adma_issue_pending(dma_chan);
1147	msleep(8);
1148
1149	if (iop_adma_status(dma_chan, cookie, NULL) !=
1150		DMA_COMPLETE) {
1151		dev_err(dev, "Self-test pq timed out, disabling\n");
1152		err = -ENODEV;
1153		goto free_resources;
1154	}
1155
1156	raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1157
1158	if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1159		   page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1160		dev_err(dev, "Self-test p failed compare, disabling\n");
1161		err = -ENODEV;
1162		goto free_resources;
1163	}
1164	if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1165		   page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1166		dev_err(dev, "Self-test q failed compare, disabling\n");
1167		err = -ENODEV;
1168		goto free_resources;
1169	}
1170
1171	/* test correct zero sum using the software generated pq values */
1172	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1173		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1174					 DMA_TO_DEVICE);
1175
1176	zero_sum_result = ~0;
1177	tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1178				      pq_src, IOP_ADMA_NUM_SRC_TEST,
1179				      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1180				      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1181
1182	cookie = iop_adma_tx_submit(tx);
1183	iop_adma_issue_pending(dma_chan);
1184	msleep(8);
1185
1186	if (iop_adma_status(dma_chan, cookie, NULL) !=
1187		DMA_COMPLETE) {
1188		dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1189		err = -ENODEV;
1190		goto free_resources;
1191	}
1192
1193	if (zero_sum_result != 0) {
1194		dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1195			zero_sum_result);
1196		err = -ENODEV;
1197		goto free_resources;
1198	}
1199
1200	/* test incorrect zero sum */
1201	i = IOP_ADMA_NUM_SRC_TEST;
1202	memset(pq_sw[i] + 100, 0, 100);
1203	memset(pq_sw[i+1] + 200, 0, 200);
1204	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1205		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1206					 DMA_TO_DEVICE);
1207
1208	zero_sum_result = 0;
1209	tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1210				      pq_src, IOP_ADMA_NUM_SRC_TEST,
1211				      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1212				      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1213
1214	cookie = iop_adma_tx_submit(tx);
1215	iop_adma_issue_pending(dma_chan);
1216	msleep(8);
1217
1218	if (iop_adma_status(dma_chan, cookie, NULL) !=
1219		DMA_COMPLETE) {
1220		dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1221		err = -ENODEV;
1222		goto free_resources;
1223	}
1224
1225	if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1226		dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1227			zero_sum_result);
1228		err = -ENODEV;
1229		goto free_resources;
1230	}
1231
1232free_resources:
1233	iop_adma_free_chan_resources(dma_chan);
1234out:
1235	i = ARRAY_SIZE(pq);
1236	while (i--)
1237		__free_page(pq[i]);
1238	return err;
1239}
1240#endif
1241
1242static int iop_adma_remove(struct platform_device *dev)
1243{
1244	struct iop_adma_device *device = platform_get_drvdata(dev);
1245	struct dma_chan *chan, *_chan;
1246	struct iop_adma_chan *iop_chan;
1247	struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
1248
1249	dma_async_device_unregister(&device->common);
1250
1251	dma_free_coherent(&dev->dev, plat_data->pool_size,
1252			device->dma_desc_pool_virt, device->dma_desc_pool);
1253
1254	list_for_each_entry_safe(chan, _chan, &device->common.channels,
1255				device_node) {
1256		iop_chan = to_iop_adma_chan(chan);
1257		list_del(&chan->device_node);
1258		kfree(iop_chan);
1259	}
1260	kfree(device);
1261
1262	return 0;
1263}
1264
1265static int iop_adma_probe(struct platform_device *pdev)
1266{
1267	struct resource *res;
1268	int ret = 0, i;
1269	struct iop_adma_device *adev;
1270	struct iop_adma_chan *iop_chan;
1271	struct dma_device *dma_dev;
1272	struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
1273
1274	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1275	if (!res)
1276		return -ENODEV;
1277
1278	if (!devm_request_mem_region(&pdev->dev, res->start,
1279				resource_size(res), pdev->name))
1280		return -EBUSY;
1281
1282	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1283	if (!adev)
1284		return -ENOMEM;
1285	dma_dev = &adev->common;
1286
1287	/* allocate coherent memory for hardware descriptors
1288	 * note: writecombine gives slightly better performance, but
1289	 * requires that we explicitly flush the writes
1290	 */
1291	adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
1292						plat_data->pool_size,
1293						&adev->dma_desc_pool,
1294						GFP_KERNEL);
1295	if (!adev->dma_desc_pool_virt) {
1296		ret = -ENOMEM;
1297		goto err_free_adev;
1298	}
1299
1300	dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n",
1301		__func__, adev->dma_desc_pool_virt,
1302		(void *) adev->dma_desc_pool);
1303
1304	adev->id = plat_data->hw_id;
1305
1306	/* discover transaction capabilites from the platform data */
1307	dma_dev->cap_mask = plat_data->cap_mask;
1308
1309	adev->pdev = pdev;
1310	platform_set_drvdata(pdev, adev);
1311
1312	INIT_LIST_HEAD(&dma_dev->channels);
1313
1314	/* set base routines */
1315	dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1316	dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1317	dma_dev->device_tx_status = iop_adma_status;
1318	dma_dev->device_issue_pending = iop_adma_issue_pending;
1319	dma_dev->dev = &pdev->dev;
1320
1321	/* set prep routines based on capability */
1322	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1323		dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1324	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1325		dma_dev->max_xor = iop_adma_get_max_xor();
1326		dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1327	}
1328	if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1329		dma_dev->device_prep_dma_xor_val =
1330			iop_adma_prep_dma_xor_val;
1331	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1332		dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1333		dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1334	}
1335	if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1336		dma_dev->device_prep_dma_pq_val =
1337			iop_adma_prep_dma_pq_val;
1338	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1339		dma_dev->device_prep_dma_interrupt =
1340			iop_adma_prep_dma_interrupt;
1341
1342	iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1343	if (!iop_chan) {
1344		ret = -ENOMEM;
1345		goto err_free_dma;
1346	}
1347	iop_chan->device = adev;
1348
1349	iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1350					resource_size(res));
1351	if (!iop_chan->mmr_base) {
1352		ret = -ENOMEM;
1353		goto err_free_iop_chan;
1354	}
1355	tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1356		iop_chan);
1357
1358	/* clear errors before enabling interrupts */
1359	iop_adma_device_clear_err_status(iop_chan);
1360
1361	for (i = 0; i < 3; i++) {
1362		irq_handler_t handler[] = { iop_adma_eot_handler,
1363					iop_adma_eoc_handler,
1364					iop_adma_err_handler };
1365		int irq = platform_get_irq(pdev, i);
1366		if (irq < 0) {
1367			ret = -ENXIO;
1368			goto err_free_iop_chan;
1369		} else {
1370			ret = devm_request_irq(&pdev->dev, irq,
1371					handler[i], 0, pdev->name, iop_chan);
1372			if (ret)
1373				goto err_free_iop_chan;
1374		}
1375	}
1376
1377	spin_lock_init(&iop_chan->lock);
1378	INIT_LIST_HEAD(&iop_chan->chain);
1379	INIT_LIST_HEAD(&iop_chan->all_slots);
1380	iop_chan->common.device = dma_dev;
1381	dma_cookie_init(&iop_chan->common);
1382	list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1383
1384	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1385		ret = iop_adma_memcpy_self_test(adev);
1386		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1387		if (ret)
1388			goto err_free_iop_chan;
1389	}
1390
1391	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1392		ret = iop_adma_xor_val_self_test(adev);
1393		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1394		if (ret)
1395			goto err_free_iop_chan;
1396	}
1397
1398	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1399	    dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1400		#ifdef CONFIG_RAID6_PQ
1401		ret = iop_adma_pq_zero_sum_self_test(adev);
1402		dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1403		#else
1404		/* can not test raid6, so do not publish capability */
1405		dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1406		dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1407		ret = 0;
1408		#endif
1409		if (ret)
1410			goto err_free_iop_chan;
1411	}
1412
1413	dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n",
1414		 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1415		 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1416		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1417		 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1418		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1419		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1420
1421	dma_async_device_register(dma_dev);
1422	goto out;
1423
1424 err_free_iop_chan:
1425	kfree(iop_chan);
1426 err_free_dma:
1427	dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1428			adev->dma_desc_pool_virt, adev->dma_desc_pool);
1429 err_free_adev:
1430	kfree(adev);
1431 out:
1432	return ret;
1433}
1434
1435static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1436{
1437	struct iop_adma_desc_slot *sw_desc, *grp_start;
1438	dma_cookie_t cookie;
1439	int slot_cnt, slots_per_op;
1440
1441	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1442
1443	spin_lock_bh(&iop_chan->lock);
1444	slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1445	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1446	if (sw_desc) {
1447		grp_start = sw_desc->group_head;
1448
1449		list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1450		async_tx_ack(&sw_desc->async_tx);
1451		iop_desc_init_memcpy(grp_start, 0);
1452		iop_desc_set_byte_count(grp_start, iop_chan, 0);
1453		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1454		iop_desc_set_memcpy_src_addr(grp_start, 0);
1455
1456		cookie = dma_cookie_assign(&sw_desc->async_tx);
1457
1458		/* initialize the completed cookie to be less than
1459		 * the most recently used cookie
1460		 */
1461		iop_chan->common.completed_cookie = cookie - 1;
1462
1463		/* channel should not be busy */
1464		BUG_ON(iop_chan_is_busy(iop_chan));
1465
1466		/* clear any prior error-status bits */
1467		iop_adma_device_clear_err_status(iop_chan);
1468
1469		/* disable operation */
1470		iop_chan_disable(iop_chan);
1471
1472		/* set the descriptor address */
1473		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1474
1475		/* 1/ don't add pre-chained descriptors
1476		 * 2/ dummy read to flush next_desc write
1477		 */
1478		BUG_ON(iop_desc_get_next_desc(sw_desc));
1479
1480		/* run the descriptor */
1481		iop_chan_enable(iop_chan);
1482	} else
1483		dev_err(iop_chan->device->common.dev,
1484			"failed to allocate null descriptor\n");
1485	spin_unlock_bh(&iop_chan->lock);
1486}
1487
1488static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1489{
1490	struct iop_adma_desc_slot *sw_desc, *grp_start;
1491	dma_cookie_t cookie;
1492	int slot_cnt, slots_per_op;
1493
1494	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1495
1496	spin_lock_bh(&iop_chan->lock);
1497	slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1498	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1499	if (sw_desc) {
1500		grp_start = sw_desc->group_head;
1501		list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1502		async_tx_ack(&sw_desc->async_tx);
1503		iop_desc_init_null_xor(grp_start, 2, 0);
1504		iop_desc_set_byte_count(grp_start, iop_chan, 0);
1505		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1506		iop_desc_set_xor_src_addr(grp_start, 0, 0);
1507		iop_desc_set_xor_src_addr(grp_start, 1, 0);
1508
1509		cookie = dma_cookie_assign(&sw_desc->async_tx);
1510
1511		/* initialize the completed cookie to be less than
1512		 * the most recently used cookie
1513		 */
1514		iop_chan->common.completed_cookie = cookie - 1;
1515
1516		/* channel should not be busy */
1517		BUG_ON(iop_chan_is_busy(iop_chan));
1518
1519		/* clear any prior error-status bits */
1520		iop_adma_device_clear_err_status(iop_chan);
1521
1522		/* disable operation */
1523		iop_chan_disable(iop_chan);
1524
1525		/* set the descriptor address */
1526		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1527
1528		/* 1/ don't add pre-chained descriptors
1529		 * 2/ dummy read to flush next_desc write
1530		 */
1531		BUG_ON(iop_desc_get_next_desc(sw_desc));
1532
1533		/* run the descriptor */
1534		iop_chan_enable(iop_chan);
1535	} else
1536		dev_err(iop_chan->device->common.dev,
1537			"failed to allocate null descriptor\n");
1538	spin_unlock_bh(&iop_chan->lock);
1539}
1540
1541static struct platform_driver iop_adma_driver = {
1542	.probe		= iop_adma_probe,
1543	.remove		= iop_adma_remove,
1544	.driver		= {
1545		.name	= "iop-adma",
1546	},
1547};
1548
1549module_platform_driver(iop_adma_driver);
1550
1551MODULE_AUTHOR("Intel Corporation");
1552MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1553MODULE_LICENSE("GPL");
1554MODULE_ALIAS("platform:iop-adma");
v4.10.11
 
   1/*
   2 * offload engine driver for the Intel Xscale series of i/o processors
   3 * Copyright © 2006, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 */
  15
  16/*
  17 * This driver supports the asynchrounous DMA copy and RAID engines available
  18 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
  19 */
  20
  21#include <linux/init.h>
  22#include <linux/module.h>
  23#include <linux/delay.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/spinlock.h>
  26#include <linux/interrupt.h>
  27#include <linux/platform_device.h>
 
  28#include <linux/memory.h>
  29#include <linux/ioport.h>
  30#include <linux/raid/pq.h>
  31#include <linux/slab.h>
  32
  33#include <mach/adma.h>
  34
  35#include "dmaengine.h"
  36
  37#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
  38#define to_iop_adma_device(dev) \
  39	container_of(dev, struct iop_adma_device, common)
  40#define tx_to_iop_adma_slot(tx) \
  41	container_of(tx, struct iop_adma_desc_slot, async_tx)
  42
  43/**
  44 * iop_adma_free_slots - flags descriptor slots for reuse
  45 * @slot: Slot to free
  46 * Caller must hold &iop_chan->lock while calling this function
  47 */
  48static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
  49{
  50	int stride = slot->slots_per_op;
  51
  52	while (stride--) {
  53		slot->slots_per_op = 0;
  54		slot = list_entry(slot->slot_node.next,
  55				struct iop_adma_desc_slot,
  56				slot_node);
  57	}
  58}
  59
  60static dma_cookie_t
  61iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
  62	struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
  63{
  64	struct dma_async_tx_descriptor *tx = &desc->async_tx;
  65
  66	BUG_ON(tx->cookie < 0);
  67	if (tx->cookie > 0) {
  68		cookie = tx->cookie;
  69		tx->cookie = 0;
  70
  71		/* call the callback (must not sleep or submit new
  72		 * operations to this channel)
  73		 */
  74		dmaengine_desc_get_callback_invoke(tx, NULL);
  75
  76		dma_descriptor_unmap(tx);
  77		if (desc->group_head)
  78			desc->group_head = NULL;
  79	}
  80
  81	/* run dependent operations */
  82	dma_run_dependencies(tx);
  83
  84	return cookie;
  85}
  86
  87static int
  88iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
  89	struct iop_adma_chan *iop_chan)
  90{
  91	/* the client is allowed to attach dependent operations
  92	 * until 'ack' is set
  93	 */
  94	if (!async_tx_test_ack(&desc->async_tx))
  95		return 0;
  96
  97	/* leave the last descriptor in the chain
  98	 * so we can append to it
  99	 */
 100	if (desc->chain_node.next == &iop_chan->chain)
 101		return 1;
 102
 103	dev_dbg(iop_chan->device->common.dev,
 104		"\tfree slot: %d slots_per_op: %d\n",
 105		desc->idx, desc->slots_per_op);
 106
 107	list_del(&desc->chain_node);
 108	iop_adma_free_slots(desc);
 109
 110	return 0;
 111}
 112
 113static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 114{
 115	struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
 116	dma_cookie_t cookie = 0;
 117	u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
 118	int busy = iop_chan_is_busy(iop_chan);
 119	int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
 120
 121	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
 122	/* free completed slots from the chain starting with
 123	 * the oldest descriptor
 124	 */
 125	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 126					chain_node) {
 127		pr_debug("\tcookie: %d slot: %d busy: %d "
 128			"this_desc: %#x next_desc: %#x ack: %d\n",
 129			iter->async_tx.cookie, iter->idx, busy,
 130			iter->async_tx.phys, iop_desc_get_next_desc(iter),
 131			async_tx_test_ack(&iter->async_tx));
 132		prefetch(_iter);
 133		prefetch(&_iter->async_tx);
 134
 135		/* do not advance past the current descriptor loaded into the
 136		 * hardware channel, subsequent descriptors are either in
 137		 * process or have not been submitted
 138		 */
 139		if (seen_current)
 140			break;
 141
 142		/* stop the search if we reach the current descriptor and the
 143		 * channel is busy, or if it appears that the current descriptor
 144		 * needs to be re-read (i.e. has been appended to)
 145		 */
 146		if (iter->async_tx.phys == current_desc) {
 147			BUG_ON(seen_current++);
 148			if (busy || iop_desc_get_next_desc(iter))
 149				break;
 150		}
 151
 152		/* detect the start of a group transaction */
 153		if (!slot_cnt && !slots_per_op) {
 154			slot_cnt = iter->slot_cnt;
 155			slots_per_op = iter->slots_per_op;
 156			if (slot_cnt <= slots_per_op) {
 157				slot_cnt = 0;
 158				slots_per_op = 0;
 159			}
 160		}
 161
 162		if (slot_cnt) {
 163			pr_debug("\tgroup++\n");
 164			if (!grp_start)
 165				grp_start = iter;
 166			slot_cnt -= slots_per_op;
 167		}
 168
 169		/* all the members of a group are complete */
 170		if (slots_per_op != 0 && slot_cnt == 0) {
 171			struct iop_adma_desc_slot *grp_iter, *_grp_iter;
 172			int end_of_chain = 0;
 173			pr_debug("\tgroup end\n");
 174
 175			/* collect the total results */
 176			if (grp_start->xor_check_result) {
 177				u32 zero_sum_result = 0;
 178				slot_cnt = grp_start->slot_cnt;
 179				grp_iter = grp_start;
 180
 181				list_for_each_entry_from(grp_iter,
 182					&iop_chan->chain, chain_node) {
 183					zero_sum_result |=
 184					    iop_desc_get_zero_result(grp_iter);
 185					    pr_debug("\titer%d result: %d\n",
 186					    grp_iter->idx, zero_sum_result);
 187					slot_cnt -= slots_per_op;
 188					if (slot_cnt == 0)
 189						break;
 190				}
 191				pr_debug("\tgrp_start->xor_check_result: %p\n",
 192					grp_start->xor_check_result);
 193				*grp_start->xor_check_result = zero_sum_result;
 194			}
 195
 196			/* clean up the group */
 197			slot_cnt = grp_start->slot_cnt;
 198			grp_iter = grp_start;
 199			list_for_each_entry_safe_from(grp_iter, _grp_iter,
 200				&iop_chan->chain, chain_node) {
 201				cookie = iop_adma_run_tx_complete_actions(
 202					grp_iter, iop_chan, cookie);
 203
 204				slot_cnt -= slots_per_op;
 205				end_of_chain = iop_adma_clean_slot(grp_iter,
 206					iop_chan);
 207
 208				if (slot_cnt == 0 || end_of_chain)
 209					break;
 210			}
 211
 212			/* the group should be complete at this point */
 213			BUG_ON(slot_cnt);
 214
 215			slots_per_op = 0;
 216			grp_start = NULL;
 217			if (end_of_chain)
 218				break;
 219			else
 220				continue;
 221		} else if (slots_per_op) /* wait for group completion */
 222			continue;
 223
 224		/* write back zero sum results (single descriptor case) */
 225		if (iter->xor_check_result && iter->async_tx.cookie)
 226			*iter->xor_check_result =
 227				iop_desc_get_zero_result(iter);
 228
 229		cookie = iop_adma_run_tx_complete_actions(
 230					iter, iop_chan, cookie);
 231
 232		if (iop_adma_clean_slot(iter, iop_chan))
 233			break;
 234	}
 235
 236	if (cookie > 0) {
 237		iop_chan->common.completed_cookie = cookie;
 238		pr_debug("\tcompleted cookie %d\n", cookie);
 239	}
 240}
 241
 242static void
 243iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
 244{
 245	spin_lock_bh(&iop_chan->lock);
 246	__iop_adma_slot_cleanup(iop_chan);
 247	spin_unlock_bh(&iop_chan->lock);
 248}
 249
 250static void iop_adma_tasklet(unsigned long data)
 251{
 252	struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
 253
 254	/* lockdep will flag depedency submissions as potentially
 255	 * recursive locking, this is not the case as a dependency
 256	 * submission will never recurse a channels submit routine.
 257	 * There are checks in async_tx.c to prevent this.
 258	 */
 259	spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
 260	__iop_adma_slot_cleanup(iop_chan);
 261	spin_unlock(&iop_chan->lock);
 262}
 263
 264static struct iop_adma_desc_slot *
 265iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
 266			int slots_per_op)
 267{
 268	struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
 269	LIST_HEAD(chain);
 270	int slots_found, retry = 0;
 271
 272	/* start search from the last allocated descrtiptor
 273	 * if a contiguous allocation can not be found start searching
 274	 * from the beginning of the list
 275	 */
 276retry:
 277	slots_found = 0;
 278	if (retry == 0)
 279		iter = iop_chan->last_used;
 280	else
 281		iter = list_entry(&iop_chan->all_slots,
 282			struct iop_adma_desc_slot,
 283			slot_node);
 284
 285	list_for_each_entry_safe_continue(
 286		iter, _iter, &iop_chan->all_slots, slot_node) {
 287		prefetch(_iter);
 288		prefetch(&_iter->async_tx);
 289		if (iter->slots_per_op) {
 290			/* give up after finding the first busy slot
 291			 * on the second pass through the list
 292			 */
 293			if (retry)
 294				break;
 295
 296			slots_found = 0;
 297			continue;
 298		}
 299
 300		/* start the allocation if the slot is correctly aligned */
 301		if (!slots_found++) {
 302			if (iop_desc_is_aligned(iter, slots_per_op))
 303				alloc_start = iter;
 304			else {
 305				slots_found = 0;
 306				continue;
 307			}
 308		}
 309
 310		if (slots_found == num_slots) {
 311			struct iop_adma_desc_slot *alloc_tail = NULL;
 312			struct iop_adma_desc_slot *last_used = NULL;
 313			iter = alloc_start;
 314			while (num_slots) {
 315				int i;
 316				dev_dbg(iop_chan->device->common.dev,
 317					"allocated slot: %d "
 318					"(desc %p phys: %#x) slots_per_op %d\n",
 319					iter->idx, iter->hw_desc,
 320					iter->async_tx.phys, slots_per_op);
 321
 322				/* pre-ack all but the last descriptor */
 323				if (num_slots != slots_per_op)
 324					async_tx_ack(&iter->async_tx);
 325
 326				list_add_tail(&iter->chain_node, &chain);
 327				alloc_tail = iter;
 328				iter->async_tx.cookie = 0;
 329				iter->slot_cnt = num_slots;
 330				iter->xor_check_result = NULL;
 331				for (i = 0; i < slots_per_op; i++) {
 332					iter->slots_per_op = slots_per_op - i;
 333					last_used = iter;
 334					iter = list_entry(iter->slot_node.next,
 335						struct iop_adma_desc_slot,
 336						slot_node);
 337				}
 338				num_slots -= slots_per_op;
 339			}
 340			alloc_tail->group_head = alloc_start;
 341			alloc_tail->async_tx.cookie = -EBUSY;
 342			list_splice(&chain, &alloc_tail->tx_list);
 343			iop_chan->last_used = last_used;
 344			iop_desc_clear_next_desc(alloc_start);
 345			iop_desc_clear_next_desc(alloc_tail);
 346			return alloc_tail;
 347		}
 348	}
 349	if (!retry++)
 350		goto retry;
 351
 352	/* perform direct reclaim if the allocation fails */
 353	__iop_adma_slot_cleanup(iop_chan);
 354
 355	return NULL;
 356}
 357
 358static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
 359{
 360	dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
 361		iop_chan->pending);
 362
 363	if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
 364		iop_chan->pending = 0;
 365		iop_chan_append(iop_chan);
 366	}
 367}
 368
 369static dma_cookie_t
 370iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
 371{
 372	struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
 373	struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
 374	struct iop_adma_desc_slot *grp_start, *old_chain_tail;
 375	int slot_cnt;
 376	int slots_per_op;
 377	dma_cookie_t cookie;
 378	dma_addr_t next_dma;
 379
 380	grp_start = sw_desc->group_head;
 381	slot_cnt = grp_start->slot_cnt;
 382	slots_per_op = grp_start->slots_per_op;
 383
 384	spin_lock_bh(&iop_chan->lock);
 385	cookie = dma_cookie_assign(tx);
 386
 387	old_chain_tail = list_entry(iop_chan->chain.prev,
 388		struct iop_adma_desc_slot, chain_node);
 389	list_splice_init(&sw_desc->tx_list,
 390			 &old_chain_tail->chain_node);
 391
 392	/* fix up the hardware chain */
 393	next_dma = grp_start->async_tx.phys;
 394	iop_desc_set_next_desc(old_chain_tail, next_dma);
 395	BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
 396
 397	/* check for pre-chained descriptors */
 398	iop_paranoia(iop_desc_get_next_desc(sw_desc));
 399
 400	/* increment the pending count by the number of slots
 401	 * memcpy operations have a 1:1 (slot:operation) relation
 402	 * other operations are heavier and will pop the threshold
 403	 * more often.
 404	 */
 405	iop_chan->pending += slot_cnt;
 406	iop_adma_check_threshold(iop_chan);
 407	spin_unlock_bh(&iop_chan->lock);
 408
 409	dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
 410		__func__, sw_desc->async_tx.cookie, sw_desc->idx);
 411
 412	return cookie;
 413}
 414
 415static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
 416static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
 417
 418/**
 419 * iop_adma_alloc_chan_resources -  returns the number of allocated descriptors
 420 * @chan - allocate descriptor resources for this channel
 421 * @client - current client requesting the channel be ready for requests
 422 *
 423 * Note: We keep the slots for 1 operation on iop_chan->chain at all times.  To
 424 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
 425 * greater than 2x the number slots needed to satisfy a device->max_xor
 426 * request.
 427 * */
 428static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
 429{
 430	char *hw_desc;
 431	int idx;
 432	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 433	struct iop_adma_desc_slot *slot = NULL;
 434	int init = iop_chan->slots_allocated ? 0 : 1;
 435	struct iop_adma_platform_data *plat_data =
 436		dev_get_platdata(&iop_chan->device->pdev->dev);
 437	int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
 438
 439	/* Allocate descriptor slots */
 440	do {
 441		idx = iop_chan->slots_allocated;
 442		if (idx == num_descs_in_pool)
 443			break;
 444
 445		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 446		if (!slot) {
 447			printk(KERN_INFO "IOP ADMA Channel only initialized"
 448				" %d descriptor slots", idx);
 449			break;
 450		}
 451		hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
 452		slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 453
 454		dma_async_tx_descriptor_init(&slot->async_tx, chan);
 455		slot->async_tx.tx_submit = iop_adma_tx_submit;
 456		INIT_LIST_HEAD(&slot->tx_list);
 457		INIT_LIST_HEAD(&slot->chain_node);
 458		INIT_LIST_HEAD(&slot->slot_node);
 459		hw_desc = (char *) iop_chan->device->dma_desc_pool;
 460		slot->async_tx.phys =
 461			(dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
 462		slot->idx = idx;
 463
 464		spin_lock_bh(&iop_chan->lock);
 465		iop_chan->slots_allocated++;
 466		list_add_tail(&slot->slot_node, &iop_chan->all_slots);
 467		spin_unlock_bh(&iop_chan->lock);
 468	} while (iop_chan->slots_allocated < num_descs_in_pool);
 469
 470	if (idx && !iop_chan->last_used)
 471		iop_chan->last_used = list_entry(iop_chan->all_slots.next,
 472					struct iop_adma_desc_slot,
 473					slot_node);
 474
 475	dev_dbg(iop_chan->device->common.dev,
 476		"allocated %d descriptor slots last_used: %p\n",
 477		iop_chan->slots_allocated, iop_chan->last_used);
 478
 479	/* initialize the channel and the chain with a null operation */
 480	if (init) {
 481		if (dma_has_cap(DMA_MEMCPY,
 482			iop_chan->device->common.cap_mask))
 483			iop_chan_start_null_memcpy(iop_chan);
 484		else if (dma_has_cap(DMA_XOR,
 485			iop_chan->device->common.cap_mask))
 486			iop_chan_start_null_xor(iop_chan);
 487		else
 488			BUG();
 489	}
 490
 491	return (idx > 0) ? idx : -ENOMEM;
 492}
 493
 494static struct dma_async_tx_descriptor *
 495iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 496{
 497	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 498	struct iop_adma_desc_slot *sw_desc, *grp_start;
 499	int slot_cnt, slots_per_op;
 500
 501	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
 502
 503	spin_lock_bh(&iop_chan->lock);
 504	slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
 505	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 506	if (sw_desc) {
 507		grp_start = sw_desc->group_head;
 508		iop_desc_init_interrupt(grp_start, iop_chan);
 509		sw_desc->async_tx.flags = flags;
 510	}
 511	spin_unlock_bh(&iop_chan->lock);
 512
 513	return sw_desc ? &sw_desc->async_tx : NULL;
 514}
 515
 516static struct dma_async_tx_descriptor *
 517iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
 518			 dma_addr_t dma_src, size_t len, unsigned long flags)
 519{
 520	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 521	struct iop_adma_desc_slot *sw_desc, *grp_start;
 522	int slot_cnt, slots_per_op;
 523
 524	if (unlikely(!len))
 525		return NULL;
 526	BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
 527
 528	dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
 529		__func__, len);
 530
 531	spin_lock_bh(&iop_chan->lock);
 532	slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
 533	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 534	if (sw_desc) {
 535		grp_start = sw_desc->group_head;
 536		iop_desc_init_memcpy(grp_start, flags);
 537		iop_desc_set_byte_count(grp_start, iop_chan, len);
 538		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 539		iop_desc_set_memcpy_src_addr(grp_start, dma_src);
 540		sw_desc->async_tx.flags = flags;
 541	}
 542	spin_unlock_bh(&iop_chan->lock);
 543
 544	return sw_desc ? &sw_desc->async_tx : NULL;
 545}
 546
 547static struct dma_async_tx_descriptor *
 548iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
 549		      dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
 550		      unsigned long flags)
 551{
 552	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 553	struct iop_adma_desc_slot *sw_desc, *grp_start;
 554	int slot_cnt, slots_per_op;
 555
 556	if (unlikely(!len))
 557		return NULL;
 558	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 559
 560	dev_dbg(iop_chan->device->common.dev,
 561		"%s src_cnt: %d len: %u flags: %lx\n",
 562		__func__, src_cnt, len, flags);
 563
 564	spin_lock_bh(&iop_chan->lock);
 565	slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
 566	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 567	if (sw_desc) {
 568		grp_start = sw_desc->group_head;
 569		iop_desc_init_xor(grp_start, src_cnt, flags);
 570		iop_desc_set_byte_count(grp_start, iop_chan, len);
 571		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
 572		sw_desc->async_tx.flags = flags;
 573		while (src_cnt--)
 574			iop_desc_set_xor_src_addr(grp_start, src_cnt,
 575						  dma_src[src_cnt]);
 576	}
 577	spin_unlock_bh(&iop_chan->lock);
 578
 579	return sw_desc ? &sw_desc->async_tx : NULL;
 580}
 581
 582static struct dma_async_tx_descriptor *
 583iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
 584			  unsigned int src_cnt, size_t len, u32 *result,
 585			  unsigned long flags)
 586{
 587	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 588	struct iop_adma_desc_slot *sw_desc, *grp_start;
 589	int slot_cnt, slots_per_op;
 590
 591	if (unlikely(!len))
 592		return NULL;
 593
 594	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
 595		__func__, src_cnt, len);
 596
 597	spin_lock_bh(&iop_chan->lock);
 598	slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
 599	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 600	if (sw_desc) {
 601		grp_start = sw_desc->group_head;
 602		iop_desc_init_zero_sum(grp_start, src_cnt, flags);
 603		iop_desc_set_zero_sum_byte_count(grp_start, len);
 604		grp_start->xor_check_result = result;
 605		pr_debug("\t%s: grp_start->xor_check_result: %p\n",
 606			__func__, grp_start->xor_check_result);
 607		sw_desc->async_tx.flags = flags;
 608		while (src_cnt--)
 609			iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
 610						       dma_src[src_cnt]);
 611	}
 612	spin_unlock_bh(&iop_chan->lock);
 613
 614	return sw_desc ? &sw_desc->async_tx : NULL;
 615}
 616
 617static struct dma_async_tx_descriptor *
 618iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
 619		     unsigned int src_cnt, const unsigned char *scf, size_t len,
 620		     unsigned long flags)
 621{
 622	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 623	struct iop_adma_desc_slot *sw_desc, *g;
 624	int slot_cnt, slots_per_op;
 625	int continue_srcs;
 626
 627	if (unlikely(!len))
 628		return NULL;
 629	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 630
 631	dev_dbg(iop_chan->device->common.dev,
 632		"%s src_cnt: %d len: %u flags: %lx\n",
 633		__func__, src_cnt, len, flags);
 634
 635	if (dmaf_p_disabled_continue(flags))
 636		continue_srcs = 1+src_cnt;
 637	else if (dmaf_continue(flags))
 638		continue_srcs = 3+src_cnt;
 639	else
 640		continue_srcs = 0+src_cnt;
 641
 642	spin_lock_bh(&iop_chan->lock);
 643	slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
 644	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 645	if (sw_desc) {
 646		int i;
 647
 648		g = sw_desc->group_head;
 649		iop_desc_set_byte_count(g, iop_chan, len);
 650
 651		/* even if P is disabled its destination address (bits
 652		 * [3:0]) must match Q.  It is ok if P points to an
 653		 * invalid address, it won't be written.
 654		 */
 655		if (flags & DMA_PREP_PQ_DISABLE_P)
 656			dst[0] = dst[1] & 0x7;
 657
 658		iop_desc_set_pq_addr(g, dst);
 659		sw_desc->async_tx.flags = flags;
 660		for (i = 0; i < src_cnt; i++)
 661			iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
 662
 663		/* if we are continuing a previous operation factor in
 664		 * the old p and q values, see the comment for dma_maxpq
 665		 * in include/linux/dmaengine.h
 666		 */
 667		if (dmaf_p_disabled_continue(flags))
 668			iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
 669		else if (dmaf_continue(flags)) {
 670			iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
 671			iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
 672			iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
 673		}
 674		iop_desc_init_pq(g, i, flags);
 675	}
 676	spin_unlock_bh(&iop_chan->lock);
 677
 678	return sw_desc ? &sw_desc->async_tx : NULL;
 679}
 680
 681static struct dma_async_tx_descriptor *
 682iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
 683			 unsigned int src_cnt, const unsigned char *scf,
 684			 size_t len, enum sum_check_flags *pqres,
 685			 unsigned long flags)
 686{
 687	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 688	struct iop_adma_desc_slot *sw_desc, *g;
 689	int slot_cnt, slots_per_op;
 690
 691	if (unlikely(!len))
 692		return NULL;
 693	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
 694
 695	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
 696		__func__, src_cnt, len);
 697
 698	spin_lock_bh(&iop_chan->lock);
 699	slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
 700	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
 701	if (sw_desc) {
 702		/* for validate operations p and q are tagged onto the
 703		 * end of the source list
 704		 */
 705		int pq_idx = src_cnt;
 706
 707		g = sw_desc->group_head;
 708		iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
 709		iop_desc_set_pq_zero_sum_byte_count(g, len);
 710		g->pq_check_result = pqres;
 711		pr_debug("\t%s: g->pq_check_result: %p\n",
 712			__func__, g->pq_check_result);
 713		sw_desc->async_tx.flags = flags;
 714		while (src_cnt--)
 715			iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
 716							  src[src_cnt],
 717							  scf[src_cnt]);
 718		iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
 719	}
 720	spin_unlock_bh(&iop_chan->lock);
 721
 722	return sw_desc ? &sw_desc->async_tx : NULL;
 723}
 724
 725static void iop_adma_free_chan_resources(struct dma_chan *chan)
 726{
 727	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 728	struct iop_adma_desc_slot *iter, *_iter;
 729	int in_use_descs = 0;
 730
 731	iop_adma_slot_cleanup(iop_chan);
 732
 733	spin_lock_bh(&iop_chan->lock);
 734	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
 735					chain_node) {
 736		in_use_descs++;
 737		list_del(&iter->chain_node);
 738	}
 739	list_for_each_entry_safe_reverse(
 740		iter, _iter, &iop_chan->all_slots, slot_node) {
 741		list_del(&iter->slot_node);
 742		kfree(iter);
 743		iop_chan->slots_allocated--;
 744	}
 745	iop_chan->last_used = NULL;
 746
 747	dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
 748		__func__, iop_chan->slots_allocated);
 749	spin_unlock_bh(&iop_chan->lock);
 750
 751	/* one is ok since we left it on there on purpose */
 752	if (in_use_descs > 1)
 753		printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
 754			in_use_descs - 1);
 755}
 756
 757/**
 758 * iop_adma_status - poll the status of an ADMA transaction
 759 * @chan: ADMA channel handle
 760 * @cookie: ADMA transaction identifier
 761 * @txstate: a holder for the current state of the channel or NULL
 762 */
 763static enum dma_status iop_adma_status(struct dma_chan *chan,
 764					dma_cookie_t cookie,
 765					struct dma_tx_state *txstate)
 766{
 767	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 768	int ret;
 769
 770	ret = dma_cookie_status(chan, cookie, txstate);
 771	if (ret == DMA_COMPLETE)
 772		return ret;
 773
 774	iop_adma_slot_cleanup(iop_chan);
 775
 776	return dma_cookie_status(chan, cookie, txstate);
 777}
 778
 779static irqreturn_t iop_adma_eot_handler(int irq, void *data)
 780{
 781	struct iop_adma_chan *chan = data;
 782
 783	dev_dbg(chan->device->common.dev, "%s\n", __func__);
 784
 785	tasklet_schedule(&chan->irq_tasklet);
 786
 787	iop_adma_device_clear_eot_status(chan);
 788
 789	return IRQ_HANDLED;
 790}
 791
 792static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
 793{
 794	struct iop_adma_chan *chan = data;
 795
 796	dev_dbg(chan->device->common.dev, "%s\n", __func__);
 797
 798	tasklet_schedule(&chan->irq_tasklet);
 799
 800	iop_adma_device_clear_eoc_status(chan);
 801
 802	return IRQ_HANDLED;
 803}
 804
 805static irqreturn_t iop_adma_err_handler(int irq, void *data)
 806{
 807	struct iop_adma_chan *chan = data;
 808	unsigned long status = iop_chan_get_status(chan);
 809
 810	dev_err(chan->device->common.dev,
 811		"error ( %s%s%s%s%s%s%s)\n",
 812		iop_is_err_int_parity(status, chan) ? "int_parity " : "",
 813		iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
 814		iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
 815		iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
 816		iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
 817		iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
 818		iop_is_err_split_tx(status, chan) ? "split_tx " : "");
 819
 820	iop_adma_device_clear_err_status(chan);
 821
 822	BUG();
 823
 824	return IRQ_HANDLED;
 825}
 826
 827static void iop_adma_issue_pending(struct dma_chan *chan)
 828{
 829	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
 830
 831	if (iop_chan->pending) {
 832		iop_chan->pending = 0;
 833		iop_chan_append(iop_chan);
 834	}
 835}
 836
 837/*
 838 * Perform a transaction to verify the HW works.
 839 */
 840#define IOP_ADMA_TEST_SIZE 2000
 841
 842static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
 843{
 844	int i;
 845	void *src, *dest;
 846	dma_addr_t src_dma, dest_dma;
 847	struct dma_chan *dma_chan;
 848	dma_cookie_t cookie;
 849	struct dma_async_tx_descriptor *tx;
 850	int err = 0;
 851	struct iop_adma_chan *iop_chan;
 852
 853	dev_dbg(device->common.dev, "%s\n", __func__);
 854
 855	src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
 856	if (!src)
 857		return -ENOMEM;
 858	dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
 859	if (!dest) {
 860		kfree(src);
 861		return -ENOMEM;
 862	}
 863
 864	/* Fill in src buffer */
 865	for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
 866		((u8 *) src)[i] = (u8)i;
 867
 868	/* Start copy, using first DMA channel */
 869	dma_chan = container_of(device->common.channels.next,
 870				struct dma_chan,
 871				device_node);
 872	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
 873		err = -ENODEV;
 874		goto out;
 875	}
 876
 877	dest_dma = dma_map_single(dma_chan->device->dev, dest,
 878				IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
 879	src_dma = dma_map_single(dma_chan->device->dev, src,
 880				IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
 881	tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
 882				      IOP_ADMA_TEST_SIZE,
 883				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 884
 885	cookie = iop_adma_tx_submit(tx);
 886	iop_adma_issue_pending(dma_chan);
 887	msleep(1);
 888
 889	if (iop_adma_status(dma_chan, cookie, NULL) !=
 890			DMA_COMPLETE) {
 891		dev_err(dma_chan->device->dev,
 892			"Self-test copy timed out, disabling\n");
 893		err = -ENODEV;
 894		goto free_resources;
 895	}
 896
 897	iop_chan = to_iop_adma_chan(dma_chan);
 898	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
 899		IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
 900	if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
 901		dev_err(dma_chan->device->dev,
 902			"Self-test copy failed compare, disabling\n");
 903		err = -ENODEV;
 904		goto free_resources;
 905	}
 906
 907free_resources:
 908	iop_adma_free_chan_resources(dma_chan);
 909out:
 910	kfree(src);
 911	kfree(dest);
 912	return err;
 913}
 914
 915#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
 916static int
 917iop_adma_xor_val_self_test(struct iop_adma_device *device)
 918{
 919	int i, src_idx;
 920	struct page *dest;
 921	struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
 922	struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
 923	dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
 924	dma_addr_t dest_dma;
 925	struct dma_async_tx_descriptor *tx;
 926	struct dma_chan *dma_chan;
 927	dma_cookie_t cookie;
 928	u8 cmp_byte = 0;
 929	u32 cmp_word;
 930	u32 zero_sum_result;
 931	int err = 0;
 932	struct iop_adma_chan *iop_chan;
 933
 934	dev_dbg(device->common.dev, "%s\n", __func__);
 935
 936	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
 937		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
 938		if (!xor_srcs[src_idx]) {
 939			while (src_idx--)
 940				__free_page(xor_srcs[src_idx]);
 941			return -ENOMEM;
 942		}
 943	}
 944
 945	dest = alloc_page(GFP_KERNEL);
 946	if (!dest) {
 947		while (src_idx--)
 948			__free_page(xor_srcs[src_idx]);
 949		return -ENOMEM;
 950	}
 951
 952	/* Fill in src buffers */
 953	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
 954		u8 *ptr = page_address(xor_srcs[src_idx]);
 955		for (i = 0; i < PAGE_SIZE; i++)
 956			ptr[i] = (1 << src_idx);
 957	}
 958
 959	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
 960		cmp_byte ^= (u8) (1 << src_idx);
 961
 962	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
 963			(cmp_byte << 8) | cmp_byte;
 964
 965	memset(page_address(dest), 0, PAGE_SIZE);
 966
 967	dma_chan = container_of(device->common.channels.next,
 968				struct dma_chan,
 969				device_node);
 970	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
 971		err = -ENODEV;
 972		goto out;
 973	}
 974
 975	/* test xor */
 976	dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
 977				PAGE_SIZE, DMA_FROM_DEVICE);
 978	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
 979		dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
 980					   0, PAGE_SIZE, DMA_TO_DEVICE);
 981	tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
 982				   IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
 983				   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 984
 985	cookie = iop_adma_tx_submit(tx);
 986	iop_adma_issue_pending(dma_chan);
 987	msleep(8);
 988
 989	if (iop_adma_status(dma_chan, cookie, NULL) !=
 990		DMA_COMPLETE) {
 991		dev_err(dma_chan->device->dev,
 992			"Self-test xor timed out, disabling\n");
 993		err = -ENODEV;
 994		goto free_resources;
 995	}
 996
 997	iop_chan = to_iop_adma_chan(dma_chan);
 998	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
 999		PAGE_SIZE, DMA_FROM_DEVICE);
1000	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1001		u32 *ptr = page_address(dest);
1002		if (ptr[i] != cmp_word) {
1003			dev_err(dma_chan->device->dev,
1004				"Self-test xor failed compare, disabling\n");
1005			err = -ENODEV;
1006			goto free_resources;
1007		}
1008	}
1009	dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1010		PAGE_SIZE, DMA_TO_DEVICE);
1011
1012	/* skip zero sum if the capability is not present */
1013	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1014		goto free_resources;
1015
1016	/* zero sum the sources with the destintation page */
1017	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1018		zero_sum_srcs[i] = xor_srcs[i];
1019	zero_sum_srcs[i] = dest;
1020
1021	zero_sum_result = 1;
1022
1023	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1024		dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1025					   zero_sum_srcs[i], 0, PAGE_SIZE,
1026					   DMA_TO_DEVICE);
1027	tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1028				       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1029				       &zero_sum_result,
1030				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1031
1032	cookie = iop_adma_tx_submit(tx);
1033	iop_adma_issue_pending(dma_chan);
1034	msleep(8);
1035
1036	if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1037		dev_err(dma_chan->device->dev,
1038			"Self-test zero sum timed out, disabling\n");
1039		err = -ENODEV;
1040		goto free_resources;
1041	}
1042
1043	if (zero_sum_result != 0) {
1044		dev_err(dma_chan->device->dev,
1045			"Self-test zero sum failed compare, disabling\n");
1046		err = -ENODEV;
1047		goto free_resources;
1048	}
1049
1050	/* test for non-zero parity sum */
1051	zero_sum_result = 0;
1052	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1053		dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1054					   zero_sum_srcs[i], 0, PAGE_SIZE,
1055					   DMA_TO_DEVICE);
1056	tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1057				       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1058				       &zero_sum_result,
1059				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1060
1061	cookie = iop_adma_tx_submit(tx);
1062	iop_adma_issue_pending(dma_chan);
1063	msleep(8);
1064
1065	if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1066		dev_err(dma_chan->device->dev,
1067			"Self-test non-zero sum timed out, disabling\n");
1068		err = -ENODEV;
1069		goto free_resources;
1070	}
1071
1072	if (zero_sum_result != 1) {
1073		dev_err(dma_chan->device->dev,
1074			"Self-test non-zero sum failed compare, disabling\n");
1075		err = -ENODEV;
1076		goto free_resources;
1077	}
1078
1079free_resources:
1080	iop_adma_free_chan_resources(dma_chan);
1081out:
1082	src_idx = IOP_ADMA_NUM_SRC_TEST;
1083	while (src_idx--)
1084		__free_page(xor_srcs[src_idx]);
1085	__free_page(dest);
1086	return err;
1087}
1088
1089#ifdef CONFIG_RAID6_PQ
1090static int
1091iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1092{
1093	/* combined sources, software pq results, and extra hw pq results */
1094	struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1095	/* ptr to the extra hw pq buffers defined above */
1096	struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1097	/* address conversion buffers (dma_map / page_address) */
1098	void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1099	dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1100	dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1101
1102	int i;
1103	struct dma_async_tx_descriptor *tx;
1104	struct dma_chan *dma_chan;
1105	dma_cookie_t cookie;
1106	u32 zero_sum_result;
1107	int err = 0;
1108	struct device *dev;
1109
1110	dev_dbg(device->common.dev, "%s\n", __func__);
1111
1112	for (i = 0; i < ARRAY_SIZE(pq); i++) {
1113		pq[i] = alloc_page(GFP_KERNEL);
1114		if (!pq[i]) {
1115			while (i--)
1116				__free_page(pq[i]);
1117			return -ENOMEM;
1118		}
1119	}
1120
1121	/* Fill in src buffers */
1122	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1123		pq_sw[i] = page_address(pq[i]);
1124		memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1125	}
1126	pq_sw[i] = page_address(pq[i]);
1127	pq_sw[i+1] = page_address(pq[i+1]);
1128
1129	dma_chan = container_of(device->common.channels.next,
1130				struct dma_chan,
1131				device_node);
1132	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1133		err = -ENODEV;
1134		goto out;
1135	}
1136
1137	dev = dma_chan->device->dev;
1138
1139	/* initialize the dests */
1140	memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1141	memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1142
1143	/* test pq */
1144	pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1145	pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1146	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1147		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1148					 DMA_TO_DEVICE);
1149
1150	tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1151				  IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1152				  PAGE_SIZE,
1153				  DMA_PREP_INTERRUPT |
1154				  DMA_CTRL_ACK);
1155
1156	cookie = iop_adma_tx_submit(tx);
1157	iop_adma_issue_pending(dma_chan);
1158	msleep(8);
1159
1160	if (iop_adma_status(dma_chan, cookie, NULL) !=
1161		DMA_COMPLETE) {
1162		dev_err(dev, "Self-test pq timed out, disabling\n");
1163		err = -ENODEV;
1164		goto free_resources;
1165	}
1166
1167	raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1168
1169	if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1170		   page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1171		dev_err(dev, "Self-test p failed compare, disabling\n");
1172		err = -ENODEV;
1173		goto free_resources;
1174	}
1175	if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1176		   page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1177		dev_err(dev, "Self-test q failed compare, disabling\n");
1178		err = -ENODEV;
1179		goto free_resources;
1180	}
1181
1182	/* test correct zero sum using the software generated pq values */
1183	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1184		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1185					 DMA_TO_DEVICE);
1186
1187	zero_sum_result = ~0;
1188	tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1189				      pq_src, IOP_ADMA_NUM_SRC_TEST,
1190				      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1191				      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1192
1193	cookie = iop_adma_tx_submit(tx);
1194	iop_adma_issue_pending(dma_chan);
1195	msleep(8);
1196
1197	if (iop_adma_status(dma_chan, cookie, NULL) !=
1198		DMA_COMPLETE) {
1199		dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1200		err = -ENODEV;
1201		goto free_resources;
1202	}
1203
1204	if (zero_sum_result != 0) {
1205		dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1206			zero_sum_result);
1207		err = -ENODEV;
1208		goto free_resources;
1209	}
1210
1211	/* test incorrect zero sum */
1212	i = IOP_ADMA_NUM_SRC_TEST;
1213	memset(pq_sw[i] + 100, 0, 100);
1214	memset(pq_sw[i+1] + 200, 0, 200);
1215	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1216		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1217					 DMA_TO_DEVICE);
1218
1219	zero_sum_result = 0;
1220	tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1221				      pq_src, IOP_ADMA_NUM_SRC_TEST,
1222				      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1223				      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1224
1225	cookie = iop_adma_tx_submit(tx);
1226	iop_adma_issue_pending(dma_chan);
1227	msleep(8);
1228
1229	if (iop_adma_status(dma_chan, cookie, NULL) !=
1230		DMA_COMPLETE) {
1231		dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1232		err = -ENODEV;
1233		goto free_resources;
1234	}
1235
1236	if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1237		dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1238			zero_sum_result);
1239		err = -ENODEV;
1240		goto free_resources;
1241	}
1242
1243free_resources:
1244	iop_adma_free_chan_resources(dma_chan);
1245out:
1246	i = ARRAY_SIZE(pq);
1247	while (i--)
1248		__free_page(pq[i]);
1249	return err;
1250}
1251#endif
1252
1253static int iop_adma_remove(struct platform_device *dev)
1254{
1255	struct iop_adma_device *device = platform_get_drvdata(dev);
1256	struct dma_chan *chan, *_chan;
1257	struct iop_adma_chan *iop_chan;
1258	struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
1259
1260	dma_async_device_unregister(&device->common);
1261
1262	dma_free_coherent(&dev->dev, plat_data->pool_size,
1263			device->dma_desc_pool_virt, device->dma_desc_pool);
1264
1265	list_for_each_entry_safe(chan, _chan, &device->common.channels,
1266				device_node) {
1267		iop_chan = to_iop_adma_chan(chan);
1268		list_del(&chan->device_node);
1269		kfree(iop_chan);
1270	}
1271	kfree(device);
1272
1273	return 0;
1274}
1275
1276static int iop_adma_probe(struct platform_device *pdev)
1277{
1278	struct resource *res;
1279	int ret = 0, i;
1280	struct iop_adma_device *adev;
1281	struct iop_adma_chan *iop_chan;
1282	struct dma_device *dma_dev;
1283	struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
1284
1285	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1286	if (!res)
1287		return -ENODEV;
1288
1289	if (!devm_request_mem_region(&pdev->dev, res->start,
1290				resource_size(res), pdev->name))
1291		return -EBUSY;
1292
1293	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1294	if (!adev)
1295		return -ENOMEM;
1296	dma_dev = &adev->common;
1297
1298	/* allocate coherent memory for hardware descriptors
1299	 * note: writecombine gives slightly better performance, but
1300	 * requires that we explicitly flush the writes
1301	 */
1302	adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
1303						plat_data->pool_size,
1304						&adev->dma_desc_pool,
1305						GFP_KERNEL);
1306	if (!adev->dma_desc_pool_virt) {
1307		ret = -ENOMEM;
1308		goto err_free_adev;
1309	}
1310
1311	dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n",
1312		__func__, adev->dma_desc_pool_virt,
1313		(void *) adev->dma_desc_pool);
1314
1315	adev->id = plat_data->hw_id;
1316
1317	/* discover transaction capabilites from the platform data */
1318	dma_dev->cap_mask = plat_data->cap_mask;
1319
1320	adev->pdev = pdev;
1321	platform_set_drvdata(pdev, adev);
1322
1323	INIT_LIST_HEAD(&dma_dev->channels);
1324
1325	/* set base routines */
1326	dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1327	dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1328	dma_dev->device_tx_status = iop_adma_status;
1329	dma_dev->device_issue_pending = iop_adma_issue_pending;
1330	dma_dev->dev = &pdev->dev;
1331
1332	/* set prep routines based on capability */
1333	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1334		dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1335	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1336		dma_dev->max_xor = iop_adma_get_max_xor();
1337		dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1338	}
1339	if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1340		dma_dev->device_prep_dma_xor_val =
1341			iop_adma_prep_dma_xor_val;
1342	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1343		dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1344		dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1345	}
1346	if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1347		dma_dev->device_prep_dma_pq_val =
1348			iop_adma_prep_dma_pq_val;
1349	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1350		dma_dev->device_prep_dma_interrupt =
1351			iop_adma_prep_dma_interrupt;
1352
1353	iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1354	if (!iop_chan) {
1355		ret = -ENOMEM;
1356		goto err_free_dma;
1357	}
1358	iop_chan->device = adev;
1359
1360	iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1361					resource_size(res));
1362	if (!iop_chan->mmr_base) {
1363		ret = -ENOMEM;
1364		goto err_free_iop_chan;
1365	}
1366	tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1367		iop_chan);
1368
1369	/* clear errors before enabling interrupts */
1370	iop_adma_device_clear_err_status(iop_chan);
1371
1372	for (i = 0; i < 3; i++) {
1373		irq_handler_t handler[] = { iop_adma_eot_handler,
1374					iop_adma_eoc_handler,
1375					iop_adma_err_handler };
1376		int irq = platform_get_irq(pdev, i);
1377		if (irq < 0) {
1378			ret = -ENXIO;
1379			goto err_free_iop_chan;
1380		} else {
1381			ret = devm_request_irq(&pdev->dev, irq,
1382					handler[i], 0, pdev->name, iop_chan);
1383			if (ret)
1384				goto err_free_iop_chan;
1385		}
1386	}
1387
1388	spin_lock_init(&iop_chan->lock);
1389	INIT_LIST_HEAD(&iop_chan->chain);
1390	INIT_LIST_HEAD(&iop_chan->all_slots);
1391	iop_chan->common.device = dma_dev;
1392	dma_cookie_init(&iop_chan->common);
1393	list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1394
1395	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1396		ret = iop_adma_memcpy_self_test(adev);
1397		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1398		if (ret)
1399			goto err_free_iop_chan;
1400	}
1401
1402	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1403		ret = iop_adma_xor_val_self_test(adev);
1404		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1405		if (ret)
1406			goto err_free_iop_chan;
1407	}
1408
1409	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1410	    dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1411		#ifdef CONFIG_RAID6_PQ
1412		ret = iop_adma_pq_zero_sum_self_test(adev);
1413		dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1414		#else
1415		/* can not test raid6, so do not publish capability */
1416		dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1417		dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1418		ret = 0;
1419		#endif
1420		if (ret)
1421			goto err_free_iop_chan;
1422	}
1423
1424	dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n",
1425		 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1426		 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1427		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1428		 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1429		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1430		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1431
1432	dma_async_device_register(dma_dev);
1433	goto out;
1434
1435 err_free_iop_chan:
1436	kfree(iop_chan);
1437 err_free_dma:
1438	dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1439			adev->dma_desc_pool_virt, adev->dma_desc_pool);
1440 err_free_adev:
1441	kfree(adev);
1442 out:
1443	return ret;
1444}
1445
1446static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1447{
1448	struct iop_adma_desc_slot *sw_desc, *grp_start;
1449	dma_cookie_t cookie;
1450	int slot_cnt, slots_per_op;
1451
1452	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1453
1454	spin_lock_bh(&iop_chan->lock);
1455	slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1456	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1457	if (sw_desc) {
1458		grp_start = sw_desc->group_head;
1459
1460		list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1461		async_tx_ack(&sw_desc->async_tx);
1462		iop_desc_init_memcpy(grp_start, 0);
1463		iop_desc_set_byte_count(grp_start, iop_chan, 0);
1464		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1465		iop_desc_set_memcpy_src_addr(grp_start, 0);
1466
1467		cookie = dma_cookie_assign(&sw_desc->async_tx);
1468
1469		/* initialize the completed cookie to be less than
1470		 * the most recently used cookie
1471		 */
1472		iop_chan->common.completed_cookie = cookie - 1;
1473
1474		/* channel should not be busy */
1475		BUG_ON(iop_chan_is_busy(iop_chan));
1476
1477		/* clear any prior error-status bits */
1478		iop_adma_device_clear_err_status(iop_chan);
1479
1480		/* disable operation */
1481		iop_chan_disable(iop_chan);
1482
1483		/* set the descriptor address */
1484		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1485
1486		/* 1/ don't add pre-chained descriptors
1487		 * 2/ dummy read to flush next_desc write
1488		 */
1489		BUG_ON(iop_desc_get_next_desc(sw_desc));
1490
1491		/* run the descriptor */
1492		iop_chan_enable(iop_chan);
1493	} else
1494		dev_err(iop_chan->device->common.dev,
1495			"failed to allocate null descriptor\n");
1496	spin_unlock_bh(&iop_chan->lock);
1497}
1498
1499static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1500{
1501	struct iop_adma_desc_slot *sw_desc, *grp_start;
1502	dma_cookie_t cookie;
1503	int slot_cnt, slots_per_op;
1504
1505	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1506
1507	spin_lock_bh(&iop_chan->lock);
1508	slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1509	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1510	if (sw_desc) {
1511		grp_start = sw_desc->group_head;
1512		list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1513		async_tx_ack(&sw_desc->async_tx);
1514		iop_desc_init_null_xor(grp_start, 2, 0);
1515		iop_desc_set_byte_count(grp_start, iop_chan, 0);
1516		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1517		iop_desc_set_xor_src_addr(grp_start, 0, 0);
1518		iop_desc_set_xor_src_addr(grp_start, 1, 0);
1519
1520		cookie = dma_cookie_assign(&sw_desc->async_tx);
1521
1522		/* initialize the completed cookie to be less than
1523		 * the most recently used cookie
1524		 */
1525		iop_chan->common.completed_cookie = cookie - 1;
1526
1527		/* channel should not be busy */
1528		BUG_ON(iop_chan_is_busy(iop_chan));
1529
1530		/* clear any prior error-status bits */
1531		iop_adma_device_clear_err_status(iop_chan);
1532
1533		/* disable operation */
1534		iop_chan_disable(iop_chan);
1535
1536		/* set the descriptor address */
1537		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1538
1539		/* 1/ don't add pre-chained descriptors
1540		 * 2/ dummy read to flush next_desc write
1541		 */
1542		BUG_ON(iop_desc_get_next_desc(sw_desc));
1543
1544		/* run the descriptor */
1545		iop_chan_enable(iop_chan);
1546	} else
1547		dev_err(iop_chan->device->common.dev,
1548			"failed to allocate null descriptor\n");
1549	spin_unlock_bh(&iop_chan->lock);
1550}
1551
1552static struct platform_driver iop_adma_driver = {
1553	.probe		= iop_adma_probe,
1554	.remove		= iop_adma_remove,
1555	.driver		= {
1556		.name	= "iop-adma",
1557	},
1558};
1559
1560module_platform_driver(iop_adma_driver);
1561
1562MODULE_AUTHOR("Intel Corporation");
1563MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1564MODULE_LICENSE("GPL");
1565MODULE_ALIAS("platform:iop-adma");