Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * offload engine driver for the Marvell XOR engine
   4 * Copyright (C) 2007, 2008, Marvell International Ltd.
   5 */
   6
   7#include <linux/init.h>
   8#include <linux/slab.h>
   9#include <linux/delay.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/spinlock.h>
  12#include <linux/interrupt.h>
 
  13#include <linux/platform_device.h>
  14#include <linux/property.h>
  15#include <linux/memory.h>
  16#include <linux/clk.h>
  17#include <linux/of.h>
  18#include <linux/of_irq.h>
  19#include <linux/irqdomain.h>
  20#include <linux/cpumask.h>
  21#include <linux/platform_data/dma-mv_xor.h>
  22
  23#include "dmaengine.h"
  24#include "mv_xor.h"
  25
  26enum mv_xor_type {
  27	XOR_ORION,
  28	XOR_ARMADA_38X,
  29	XOR_ARMADA_37XX,
  30};
  31
  32enum mv_xor_mode {
  33	XOR_MODE_IN_REG,
  34	XOR_MODE_IN_DESC,
  35};
  36
  37static void mv_xor_issue_pending(struct dma_chan *chan);
  38
  39#define to_mv_xor_chan(chan)		\
  40	container_of(chan, struct mv_xor_chan, dmachan)
  41
  42#define to_mv_xor_slot(tx)		\
  43	container_of(tx, struct mv_xor_desc_slot, async_tx)
  44
  45#define mv_chan_to_devp(chan)           \
  46	((chan)->dmadev.dev)
  47
  48static void mv_desc_init(struct mv_xor_desc_slot *desc,
  49			 dma_addr_t addr, u32 byte_count,
  50			 enum dma_ctrl_flags flags)
  51{
  52	struct mv_xor_desc *hw_desc = desc->hw_desc;
  53
  54	hw_desc->status = XOR_DESC_DMA_OWNED;
  55	hw_desc->phy_next_desc = 0;
  56	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
  57	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
  58				XOR_DESC_EOD_INT_EN : 0;
  59	hw_desc->phy_dest_addr = addr;
  60	hw_desc->byte_count = byte_count;
  61}
  62
  63static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
  64{
  65	struct mv_xor_desc *hw_desc = desc->hw_desc;
  66
  67	switch (desc->type) {
  68	case DMA_XOR:
  69	case DMA_INTERRUPT:
  70		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
  71		break;
  72	case DMA_MEMCPY:
  73		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
  74		break;
  75	default:
  76		BUG();
  77		return;
  78	}
  79}
  80
  81static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
  82				  u32 next_desc_addr)
  83{
  84	struct mv_xor_desc *hw_desc = desc->hw_desc;
  85	BUG_ON(hw_desc->phy_next_desc);
  86	hw_desc->phy_next_desc = next_desc_addr;
  87}
  88
  89static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
  90				 int index, dma_addr_t addr)
  91{
  92	struct mv_xor_desc *hw_desc = desc->hw_desc;
  93	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
  94	if (desc->type == DMA_XOR)
  95		hw_desc->desc_command |= (1 << index);
  96}
  97
  98static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
  99{
 100	return readl_relaxed(XOR_CURR_DESC(chan));
 101}
 102
 103static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
 104					u32 next_desc_addr)
 105{
 106	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
 107}
 108
 109static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
 110{
 111	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
 112	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
 113	writel_relaxed(val, XOR_INTR_MASK(chan));
 114}
 115
 116static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
 117{
 118	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
 119	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
 120	return intr_cause;
 121}
 122
 123static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
 124{
 125	u32 val;
 126
 127	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
 128	val = ~(val << (chan->idx * 16));
 129	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
 130	writel_relaxed(val, XOR_INTR_CAUSE(chan));
 131}
 132
 133static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
 134{
 135	u32 val = 0xFFFF0000 >> (chan->idx * 16);
 136	writel_relaxed(val, XOR_INTR_CAUSE(chan));
 137}
 138
 139static void mv_chan_set_mode(struct mv_xor_chan *chan,
 140			     u32 op_mode)
 141{
 142	u32 config = readl_relaxed(XOR_CONFIG(chan));
 143
 144	config &= ~0x7;
 145	config |= op_mode;
 146
 147#if defined(__BIG_ENDIAN)
 148	config |= XOR_DESCRIPTOR_SWAP;
 149#else
 150	config &= ~XOR_DESCRIPTOR_SWAP;
 151#endif
 152
 153	writel_relaxed(config, XOR_CONFIG(chan));
 154}
 155
 156static void mv_chan_activate(struct mv_xor_chan *chan)
 157{
 158	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
 159
 160	/* writel ensures all descriptors are flushed before activation */
 161	writel(BIT(0), XOR_ACTIVATION(chan));
 162}
 163
 164static char mv_chan_is_busy(struct mv_xor_chan *chan)
 165{
 166	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
 167
 168	state = (state >> 4) & 0x3;
 169
 170	return (state == 1) ? 1 : 0;
 171}
 172
 173/*
 174 * mv_chan_start_new_chain - program the engine to operate on new
 175 * chain headed by sw_desc
 176 * Caller must hold &mv_chan->lock while calling this function
 177 */
 178static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
 179				    struct mv_xor_desc_slot *sw_desc)
 180{
 181	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
 182		__func__, __LINE__, sw_desc);
 183
 184	/* set the hardware chain */
 185	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
 186
 187	mv_chan->pending++;
 188	mv_xor_issue_pending(&mv_chan->dmachan);
 189}
 190
 191static dma_cookie_t
 192mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
 193				struct mv_xor_chan *mv_chan,
 194				dma_cookie_t cookie)
 195{
 196	BUG_ON(desc->async_tx.cookie < 0);
 197
 198	if (desc->async_tx.cookie > 0) {
 199		cookie = desc->async_tx.cookie;
 200
 201		dma_descriptor_unmap(&desc->async_tx);
 202		/* call the callback (must not sleep or submit new
 203		 * operations to this channel)
 204		 */
 205		dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
 206	}
 207
 208	/* run dependent operations */
 209	dma_run_dependencies(&desc->async_tx);
 210
 211	return cookie;
 212}
 213
 214static int
 215mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
 216{
 217	struct mv_xor_desc_slot *iter, *_iter;
 218
 219	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 220	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
 221				 node) {
 222
 223		if (async_tx_test_ack(&iter->async_tx)) {
 224			list_move_tail(&iter->node, &mv_chan->free_slots);
 225			if (!list_empty(&iter->sg_tx_list)) {
 226				list_splice_tail_init(&iter->sg_tx_list,
 227							&mv_chan->free_slots);
 228			}
 229		}
 230	}
 231	return 0;
 232}
 233
 234static int
 235mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
 236		   struct mv_xor_chan *mv_chan)
 237{
 238	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
 239		__func__, __LINE__, desc, desc->async_tx.flags);
 240
 241	/* the client is allowed to attach dependent operations
 242	 * until 'ack' is set
 243	 */
 244	if (!async_tx_test_ack(&desc->async_tx)) {
 245		/* move this slot to the completed_slots */
 246		list_move_tail(&desc->node, &mv_chan->completed_slots);
 247		if (!list_empty(&desc->sg_tx_list)) {
 248			list_splice_tail_init(&desc->sg_tx_list,
 249					      &mv_chan->completed_slots);
 250		}
 251	} else {
 252		list_move_tail(&desc->node, &mv_chan->free_slots);
 253		if (!list_empty(&desc->sg_tx_list)) {
 254			list_splice_tail_init(&desc->sg_tx_list,
 255					      &mv_chan->free_slots);
 256		}
 257	}
 258
 259	return 0;
 260}
 261
 262/* This function must be called with the mv_xor_chan spinlock held */
 263static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
 264{
 265	struct mv_xor_desc_slot *iter, *_iter;
 266	dma_cookie_t cookie = 0;
 267	int busy = mv_chan_is_busy(mv_chan);
 268	u32 current_desc = mv_chan_get_current_desc(mv_chan);
 269	int current_cleaned = 0;
 270	struct mv_xor_desc *hw_desc;
 271
 272	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 273	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
 274	mv_chan_clean_completed_slots(mv_chan);
 275
 276	/* free completed slots from the chain starting with
 277	 * the oldest descriptor
 278	 */
 279
 280	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
 281				 node) {
 282
 283		/* clean finished descriptors */
 284		hw_desc = iter->hw_desc;
 285		if (hw_desc->status & XOR_DESC_SUCCESS) {
 286			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
 287								 cookie);
 288
 289			/* done processing desc, clean slot */
 290			mv_desc_clean_slot(iter, mv_chan);
 291
 292			/* break if we did cleaned the current */
 293			if (iter->async_tx.phys == current_desc) {
 294				current_cleaned = 1;
 295				break;
 296			}
 297		} else {
 298			if (iter->async_tx.phys == current_desc) {
 299				current_cleaned = 0;
 300				break;
 301			}
 302		}
 303	}
 304
 305	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
 306		if (current_cleaned) {
 307			/*
 308			 * current descriptor cleaned and removed, run
 309			 * from list head
 310			 */
 311			iter = list_entry(mv_chan->chain.next,
 312					  struct mv_xor_desc_slot,
 313					  node);
 314			mv_chan_start_new_chain(mv_chan, iter);
 315		} else {
 316			if (!list_is_last(&iter->node, &mv_chan->chain)) {
 317				/*
 318				 * descriptors are still waiting after
 319				 * current, trigger them
 320				 */
 321				iter = list_entry(iter->node.next,
 322						  struct mv_xor_desc_slot,
 323						  node);
 324				mv_chan_start_new_chain(mv_chan, iter);
 325			} else {
 326				/*
 327				 * some descriptors are still waiting
 328				 * to be cleaned
 329				 */
 330				tasklet_schedule(&mv_chan->irq_tasklet);
 331			}
 332		}
 333	}
 334
 335	if (cookie > 0)
 336		mv_chan->dmachan.completed_cookie = cookie;
 337}
 338
 339static void mv_xor_tasklet(struct tasklet_struct *t)
 340{
 341	struct mv_xor_chan *chan = from_tasklet(chan, t, irq_tasklet);
 342
 343	spin_lock(&chan->lock);
 344	mv_chan_slot_cleanup(chan);
 345	spin_unlock(&chan->lock);
 346}
 347
 348static struct mv_xor_desc_slot *
 349mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
 350{
 351	struct mv_xor_desc_slot *iter;
 352
 353	spin_lock_bh(&mv_chan->lock);
 354
 355	if (!list_empty(&mv_chan->free_slots)) {
 356		iter = list_first_entry(&mv_chan->free_slots,
 357					struct mv_xor_desc_slot,
 358					node);
 359
 360		list_move_tail(&iter->node, &mv_chan->allocated_slots);
 361
 362		spin_unlock_bh(&mv_chan->lock);
 363
 364		/* pre-ack descriptor */
 365		async_tx_ack(&iter->async_tx);
 366		iter->async_tx.cookie = -EBUSY;
 367
 368		return iter;
 369
 370	}
 371
 372	spin_unlock_bh(&mv_chan->lock);
 373
 374	/* try to free some slots if the allocation fails */
 375	tasklet_schedule(&mv_chan->irq_tasklet);
 376
 377	return NULL;
 378}
 379
 380/************************ DMA engine API functions ****************************/
 381static dma_cookie_t
 382mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
 383{
 384	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
 385	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
 386	struct mv_xor_desc_slot *old_chain_tail;
 387	dma_cookie_t cookie;
 388	int new_hw_chain = 1;
 389
 390	dev_dbg(mv_chan_to_devp(mv_chan),
 391		"%s sw_desc %p: async_tx %p\n",
 392		__func__, sw_desc, &sw_desc->async_tx);
 393
 394	spin_lock_bh(&mv_chan->lock);
 395	cookie = dma_cookie_assign(tx);
 396
 397	if (list_empty(&mv_chan->chain))
 398		list_move_tail(&sw_desc->node, &mv_chan->chain);
 399	else {
 400		new_hw_chain = 0;
 401
 402		old_chain_tail = list_entry(mv_chan->chain.prev,
 403					    struct mv_xor_desc_slot,
 404					    node);
 405		list_move_tail(&sw_desc->node, &mv_chan->chain);
 406
 407		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
 408			&old_chain_tail->async_tx.phys);
 409
 410		/* fix up the hardware chain */
 411		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
 412
 413		/* if the channel is not busy */
 414		if (!mv_chan_is_busy(mv_chan)) {
 415			u32 current_desc = mv_chan_get_current_desc(mv_chan);
 416			/*
 417			 * and the curren desc is the end of the chain before
 418			 * the append, then we need to start the channel
 419			 */
 420			if (current_desc == old_chain_tail->async_tx.phys)
 421				new_hw_chain = 1;
 422		}
 423	}
 424
 425	if (new_hw_chain)
 426		mv_chan_start_new_chain(mv_chan, sw_desc);
 427
 428	spin_unlock_bh(&mv_chan->lock);
 429
 430	return cookie;
 431}
 432
 433/* returns the number of allocated descriptors */
 434static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
 435{
 436	void *virt_desc;
 437	dma_addr_t dma_desc;
 438	int idx;
 439	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 440	struct mv_xor_desc_slot *slot = NULL;
 441	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
 442
 443	/* Allocate descriptor slots */
 444	idx = mv_chan->slots_allocated;
 445	while (idx < num_descs_in_pool) {
 446		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 447		if (!slot) {
 448			dev_info(mv_chan_to_devp(mv_chan),
 449				 "channel only initialized %d descriptor slots",
 450				 idx);
 451			break;
 452		}
 453		virt_desc = mv_chan->dma_desc_pool_virt;
 454		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
 455
 456		dma_async_tx_descriptor_init(&slot->async_tx, chan);
 457		slot->async_tx.tx_submit = mv_xor_tx_submit;
 458		INIT_LIST_HEAD(&slot->node);
 459		INIT_LIST_HEAD(&slot->sg_tx_list);
 460		dma_desc = mv_chan->dma_desc_pool;
 461		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
 462		slot->idx = idx++;
 463
 464		spin_lock_bh(&mv_chan->lock);
 465		mv_chan->slots_allocated = idx;
 466		list_add_tail(&slot->node, &mv_chan->free_slots);
 467		spin_unlock_bh(&mv_chan->lock);
 468	}
 469
 470	dev_dbg(mv_chan_to_devp(mv_chan),
 471		"allocated %d descriptor slots\n",
 472		mv_chan->slots_allocated);
 473
 474	return mv_chan->slots_allocated ? : -ENOMEM;
 475}
 476
 477/*
 478 * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
 479 * a new MBus window if necessary. Use a cache for these check so that
 480 * the MMIO mapped registers don't have to be accessed for this check
 481 * to speed up this process.
 482 */
 483static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
 484{
 485	struct mv_xor_device *xordev = mv_chan->xordev;
 486	void __iomem *base = mv_chan->mmr_high_base;
 487	u32 win_enable;
 488	u32 size;
 489	u8 target, attr;
 490	int ret;
 491	int i;
 492
 493	/* Nothing needs to get done for the Armada 3700 */
 494	if (xordev->xor_type == XOR_ARMADA_37XX)
 495		return 0;
 496
 497	/*
 498	 * Loop over the cached windows to check, if the requested area
 499	 * is already mapped. If this the case, nothing needs to be done
 500	 * and we can return.
 501	 */
 502	for (i = 0; i < WINDOW_COUNT; i++) {
 503		if (addr >= xordev->win_start[i] &&
 504		    addr <= xordev->win_end[i]) {
 505			/* Window is already mapped */
 506			return 0;
 507		}
 508	}
 509
 510	/*
 511	 * The window is not mapped, so we need to create the new mapping
 512	 */
 513
 514	/* If no IO window is found that addr has to be located in SDRAM */
 515	ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
 516	if (ret < 0)
 517		return 0;
 518
 519	/*
 520	 * Mask the base addr 'addr' according to 'size' read back from the
 521	 * MBus window. Otherwise we might end up with an address located
 522	 * somewhere in the middle of this area here.
 523	 */
 524	size -= 1;
 525	addr &= ~size;
 526
 527	/*
 528	 * Reading one of both enabled register is enough, as they are always
 529	 * programmed to the identical values
 530	 */
 531	win_enable = readl(base + WINDOW_BAR_ENABLE(0));
 532
 533	/* Set 'i' to the first free window to write the new values to */
 534	i = ffs(~win_enable) - 1;
 535	if (i >= WINDOW_COUNT)
 536		return -ENOMEM;
 537
 538	writel((addr & 0xffff0000) | (attr << 8) | target,
 539	       base + WINDOW_BASE(i));
 540	writel(size & 0xffff0000, base + WINDOW_SIZE(i));
 541
 542	/* Fill the caching variables for later use */
 543	xordev->win_start[i] = addr;
 544	xordev->win_end[i] = addr + size;
 545
 546	win_enable |= (1 << i);
 547	win_enable |= 3 << (16 + (2 * i));
 548	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
 549	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
 550
 551	return 0;
 552}
 553
 554static struct dma_async_tx_descriptor *
 555mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 556		    unsigned int src_cnt, size_t len, unsigned long flags)
 557{
 558	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 559	struct mv_xor_desc_slot *sw_desc;
 560	int ret;
 561
 562	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
 563		return NULL;
 564
 565	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
 566
 567	dev_dbg(mv_chan_to_devp(mv_chan),
 568		"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
 569		__func__, src_cnt, len, &dest, flags);
 570
 571	/* Check if a new window needs to get added for 'dest' */
 572	ret = mv_xor_add_io_win(mv_chan, dest);
 573	if (ret)
 574		return NULL;
 575
 576	sw_desc = mv_chan_alloc_slot(mv_chan);
 577	if (sw_desc) {
 578		sw_desc->type = DMA_XOR;
 579		sw_desc->async_tx.flags = flags;
 580		mv_desc_init(sw_desc, dest, len, flags);
 581		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
 582			mv_desc_set_mode(sw_desc);
 583		while (src_cnt--) {
 584			/* Check if a new window needs to get added for 'src' */
 585			ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
 586			if (ret)
 587				return NULL;
 588			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
 589		}
 590	}
 591
 592	dev_dbg(mv_chan_to_devp(mv_chan),
 593		"%s sw_desc %p async_tx %p \n",
 594		__func__, sw_desc, &sw_desc->async_tx);
 595	return sw_desc ? &sw_desc->async_tx : NULL;
 596}
 597
 598static struct dma_async_tx_descriptor *
 599mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 600		size_t len, unsigned long flags)
 601{
 602	/*
 603	 * A MEMCPY operation is identical to an XOR operation with only
 604	 * a single source address.
 605	 */
 606	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
 607}
 608
 609static struct dma_async_tx_descriptor *
 610mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 611{
 612	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 613	dma_addr_t src, dest;
 614	size_t len;
 615
 616	src = mv_chan->dummy_src_addr;
 617	dest = mv_chan->dummy_dst_addr;
 618	len = MV_XOR_MIN_BYTE_COUNT;
 619
 620	/*
 621	 * We implement the DMA_INTERRUPT operation as a minimum sized
 622	 * XOR operation with a single dummy source address.
 623	 */
 624	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
 625}
 626
 627static void mv_xor_free_chan_resources(struct dma_chan *chan)
 628{
 629	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 630	struct mv_xor_desc_slot *iter, *_iter;
 631	int in_use_descs = 0;
 632
 633	spin_lock_bh(&mv_chan->lock);
 634
 635	mv_chan_slot_cleanup(mv_chan);
 636
 637	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
 638					node) {
 639		in_use_descs++;
 640		list_move_tail(&iter->node, &mv_chan->free_slots);
 641	}
 642	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
 643				 node) {
 644		in_use_descs++;
 645		list_move_tail(&iter->node, &mv_chan->free_slots);
 646	}
 647	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
 648				 node) {
 649		in_use_descs++;
 650		list_move_tail(&iter->node, &mv_chan->free_slots);
 651	}
 652	list_for_each_entry_safe_reverse(
 653		iter, _iter, &mv_chan->free_slots, node) {
 654		list_del(&iter->node);
 655		kfree(iter);
 656		mv_chan->slots_allocated--;
 657	}
 658
 659	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
 660		__func__, mv_chan->slots_allocated);
 661	spin_unlock_bh(&mv_chan->lock);
 662
 663	if (in_use_descs)
 664		dev_err(mv_chan_to_devp(mv_chan),
 665			"freeing %d in use descriptors!\n", in_use_descs);
 666}
 667
 668/**
 669 * mv_xor_status - poll the status of an XOR transaction
 670 * @chan: XOR channel handle
 671 * @cookie: XOR transaction identifier
 672 * @txstate: XOR transactions state holder (or NULL)
 673 */
 674static enum dma_status mv_xor_status(struct dma_chan *chan,
 675					  dma_cookie_t cookie,
 676					  struct dma_tx_state *txstate)
 677{
 678	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 679	enum dma_status ret;
 680
 681	ret = dma_cookie_status(chan, cookie, txstate);
 682	if (ret == DMA_COMPLETE)
 683		return ret;
 684
 685	spin_lock_bh(&mv_chan->lock);
 686	mv_chan_slot_cleanup(mv_chan);
 687	spin_unlock_bh(&mv_chan->lock);
 688
 689	return dma_cookie_status(chan, cookie, txstate);
 690}
 691
 692static void mv_chan_dump_regs(struct mv_xor_chan *chan)
 693{
 694	u32 val;
 695
 696	val = readl_relaxed(XOR_CONFIG(chan));
 697	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
 698
 699	val = readl_relaxed(XOR_ACTIVATION(chan));
 700	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
 701
 702	val = readl_relaxed(XOR_INTR_CAUSE(chan));
 703	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
 704
 705	val = readl_relaxed(XOR_INTR_MASK(chan));
 706	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
 707
 708	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
 709	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
 710
 711	val = readl_relaxed(XOR_ERROR_ADDR(chan));
 712	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
 713}
 714
 715static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
 716					  u32 intr_cause)
 717{
 718	if (intr_cause & XOR_INT_ERR_DECODE) {
 719		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
 720		return;
 721	}
 722
 723	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
 724		chan->idx, intr_cause);
 725
 726	mv_chan_dump_regs(chan);
 727	WARN_ON(1);
 728}
 729
 730static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
 731{
 732	struct mv_xor_chan *chan = data;
 733	u32 intr_cause = mv_chan_get_intr_cause(chan);
 734
 735	dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
 736
 737	if (intr_cause & XOR_INTR_ERRORS)
 738		mv_chan_err_interrupt_handler(chan, intr_cause);
 739
 740	tasklet_schedule(&chan->irq_tasklet);
 741
 742	mv_chan_clear_eoc_cause(chan);
 743
 744	return IRQ_HANDLED;
 745}
 746
 747static void mv_xor_issue_pending(struct dma_chan *chan)
 748{
 749	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 750
 751	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
 752		mv_chan->pending = 0;
 753		mv_chan_activate(mv_chan);
 754	}
 755}
 756
 757/*
 758 * Perform a transaction to verify the HW works.
 759 */
 760
 761static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
 762{
 763	int i, ret;
 764	void *src, *dest;
 765	dma_addr_t src_dma, dest_dma;
 766	struct dma_chan *dma_chan;
 767	dma_cookie_t cookie;
 768	struct dma_async_tx_descriptor *tx;
 769	struct dmaengine_unmap_data *unmap;
 770	int err = 0;
 771
 772	src = kmalloc(PAGE_SIZE, GFP_KERNEL);
 773	if (!src)
 774		return -ENOMEM;
 775
 776	dest = kzalloc(PAGE_SIZE, GFP_KERNEL);
 777	if (!dest) {
 778		kfree(src);
 779		return -ENOMEM;
 780	}
 781
 782	/* Fill in src buffer */
 783	for (i = 0; i < PAGE_SIZE; i++)
 784		((u8 *) src)[i] = (u8)i;
 785
 786	dma_chan = &mv_chan->dmachan;
 787	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
 788		err = -ENODEV;
 789		goto out;
 790	}
 791
 792	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
 793	if (!unmap) {
 794		err = -ENOMEM;
 795		goto free_resources;
 796	}
 797
 798	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
 799			       offset_in_page(src), PAGE_SIZE,
 800			       DMA_TO_DEVICE);
 801	unmap->addr[0] = src_dma;
 802
 803	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
 804	if (ret) {
 805		err = -ENOMEM;
 806		goto free_resources;
 807	}
 808	unmap->to_cnt = 1;
 809
 810	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
 811				offset_in_page(dest), PAGE_SIZE,
 812				DMA_FROM_DEVICE);
 813	unmap->addr[1] = dest_dma;
 814
 815	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
 816	if (ret) {
 817		err = -ENOMEM;
 818		goto free_resources;
 819	}
 820	unmap->from_cnt = 1;
 821	unmap->len = PAGE_SIZE;
 822
 823	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
 824				    PAGE_SIZE, 0);
 825	if (!tx) {
 826		dev_err(dma_chan->device->dev,
 827			"Self-test cannot prepare operation, disabling\n");
 828		err = -ENODEV;
 829		goto free_resources;
 830	}
 831
 832	cookie = mv_xor_tx_submit(tx);
 833	if (dma_submit_error(cookie)) {
 834		dev_err(dma_chan->device->dev,
 835			"Self-test submit error, disabling\n");
 836		err = -ENODEV;
 837		goto free_resources;
 838	}
 839
 840	mv_xor_issue_pending(dma_chan);
 841	async_tx_ack(tx);
 842	msleep(1);
 843
 844	if (mv_xor_status(dma_chan, cookie, NULL) !=
 845	    DMA_COMPLETE) {
 846		dev_err(dma_chan->device->dev,
 847			"Self-test copy timed out, disabling\n");
 848		err = -ENODEV;
 849		goto free_resources;
 850	}
 851
 852	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
 853				PAGE_SIZE, DMA_FROM_DEVICE);
 854	if (memcmp(src, dest, PAGE_SIZE)) {
 855		dev_err(dma_chan->device->dev,
 856			"Self-test copy failed compare, disabling\n");
 857		err = -ENODEV;
 858		goto free_resources;
 859	}
 860
 861free_resources:
 862	dmaengine_unmap_put(unmap);
 863	mv_xor_free_chan_resources(dma_chan);
 864out:
 865	kfree(src);
 866	kfree(dest);
 867	return err;
 868}
 869
 870#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
 871static int
 872mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
 873{
 874	int i, src_idx, ret;
 875	struct page *dest;
 876	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
 877	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
 878	dma_addr_t dest_dma;
 879	struct dma_async_tx_descriptor *tx;
 880	struct dmaengine_unmap_data *unmap;
 881	struct dma_chan *dma_chan;
 882	dma_cookie_t cookie;
 883	u8 cmp_byte = 0;
 884	u32 cmp_word;
 885	int err = 0;
 886	int src_count = MV_XOR_NUM_SRC_TEST;
 887
 888	for (src_idx = 0; src_idx < src_count; src_idx++) {
 889		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
 890		if (!xor_srcs[src_idx]) {
 891			while (src_idx--)
 892				__free_page(xor_srcs[src_idx]);
 893			return -ENOMEM;
 894		}
 895	}
 896
 897	dest = alloc_page(GFP_KERNEL);
 898	if (!dest) {
 899		while (src_idx--)
 900			__free_page(xor_srcs[src_idx]);
 901		return -ENOMEM;
 902	}
 903
 904	/* Fill in src buffers */
 905	for (src_idx = 0; src_idx < src_count; src_idx++) {
 906		u8 *ptr = page_address(xor_srcs[src_idx]);
 907		for (i = 0; i < PAGE_SIZE; i++)
 908			ptr[i] = (1 << src_idx);
 909	}
 910
 911	for (src_idx = 0; src_idx < src_count; src_idx++)
 912		cmp_byte ^= (u8) (1 << src_idx);
 913
 914	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
 915		(cmp_byte << 8) | cmp_byte;
 916
 917	memset(page_address(dest), 0, PAGE_SIZE);
 918
 919	dma_chan = &mv_chan->dmachan;
 920	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
 921		err = -ENODEV;
 922		goto out;
 923	}
 924
 925	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
 926					 GFP_KERNEL);
 927	if (!unmap) {
 928		err = -ENOMEM;
 929		goto free_resources;
 930	}
 931
 932	/* test xor */
 933	for (i = 0; i < src_count; i++) {
 934		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
 935					      0, PAGE_SIZE, DMA_TO_DEVICE);
 936		dma_srcs[i] = unmap->addr[i];
 937		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
 938		if (ret) {
 939			err = -ENOMEM;
 940			goto free_resources;
 941		}
 942		unmap->to_cnt++;
 943	}
 944
 945	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
 946				      DMA_FROM_DEVICE);
 947	dest_dma = unmap->addr[src_count];
 948	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
 949	if (ret) {
 950		err = -ENOMEM;
 951		goto free_resources;
 952	}
 953	unmap->from_cnt = 1;
 954	unmap->len = PAGE_SIZE;
 955
 956	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
 957				 src_count, PAGE_SIZE, 0);
 958	if (!tx) {
 959		dev_err(dma_chan->device->dev,
 960			"Self-test cannot prepare operation, disabling\n");
 961		err = -ENODEV;
 962		goto free_resources;
 963	}
 964
 965	cookie = mv_xor_tx_submit(tx);
 966	if (dma_submit_error(cookie)) {
 967		dev_err(dma_chan->device->dev,
 968			"Self-test submit error, disabling\n");
 969		err = -ENODEV;
 970		goto free_resources;
 971	}
 972
 973	mv_xor_issue_pending(dma_chan);
 974	async_tx_ack(tx);
 975	msleep(8);
 976
 977	if (mv_xor_status(dma_chan, cookie, NULL) !=
 978	    DMA_COMPLETE) {
 979		dev_err(dma_chan->device->dev,
 980			"Self-test xor timed out, disabling\n");
 981		err = -ENODEV;
 982		goto free_resources;
 983	}
 984
 985	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
 986				PAGE_SIZE, DMA_FROM_DEVICE);
 987	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
 988		u32 *ptr = page_address(dest);
 989		if (ptr[i] != cmp_word) {
 990			dev_err(dma_chan->device->dev,
 991				"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
 992				i, ptr[i], cmp_word);
 993			err = -ENODEV;
 994			goto free_resources;
 995		}
 996	}
 997
 998free_resources:
 999	dmaengine_unmap_put(unmap);
1000	mv_xor_free_chan_resources(dma_chan);
1001out:
1002	src_idx = src_count;
1003	while (src_idx--)
1004		__free_page(xor_srcs[src_idx]);
1005	__free_page(dest);
1006	return err;
1007}
1008
1009static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1010{
1011	struct dma_chan *chan, *_chan;
1012	struct device *dev = mv_chan->dmadev.dev;
1013
1014	dma_async_device_unregister(&mv_chan->dmadev);
1015
1016	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1017			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1018	dma_unmap_single(dev, mv_chan->dummy_src_addr,
1019			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1020	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
1021			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1022
1023	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1024				 device_node) {
1025		list_del(&chan->device_node);
1026	}
1027
1028	free_irq(mv_chan->irq, mv_chan);
1029
1030	return 0;
1031}
1032
1033static struct mv_xor_chan *
1034mv_xor_channel_add(struct mv_xor_device *xordev,
1035		   struct platform_device *pdev,
1036		   int idx, dma_cap_mask_t cap_mask, int irq)
1037{
1038	int ret = 0;
1039	struct mv_xor_chan *mv_chan;
1040	struct dma_device *dma_dev;
1041
1042	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1043	if (!mv_chan)
1044		return ERR_PTR(-ENOMEM);
1045
1046	mv_chan->idx = idx;
1047	mv_chan->irq = irq;
1048	if (xordev->xor_type == XOR_ORION)
1049		mv_chan->op_in_desc = XOR_MODE_IN_REG;
1050	else
1051		mv_chan->op_in_desc = XOR_MODE_IN_DESC;
1052
1053	dma_dev = &mv_chan->dmadev;
1054	dma_dev->dev = &pdev->dev;
1055	mv_chan->xordev = xordev;
1056
1057	/*
1058	 * These source and destination dummy buffers are used to implement
1059	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
1060	 * Hence, we only need to map the buffers at initialization-time.
1061	 */
1062	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
1063		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1064	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
1065		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1066
1067	/* allocate coherent memory for hardware descriptors
1068	 * note: writecombine gives slightly better performance, but
1069	 * requires that we explicitly flush the writes
1070	 */
1071	mv_chan->dma_desc_pool_virt =
1072	  dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1073		       GFP_KERNEL);
1074	if (!mv_chan->dma_desc_pool_virt)
1075		return ERR_PTR(-ENOMEM);
1076
1077	/* discover transaction capabilites from the platform data */
1078	dma_dev->cap_mask = cap_mask;
1079
1080	INIT_LIST_HEAD(&dma_dev->channels);
1081
1082	/* set base routines */
1083	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1084	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1085	dma_dev->device_tx_status = mv_xor_status;
1086	dma_dev->device_issue_pending = mv_xor_issue_pending;
1087
1088	/* set prep routines based on capability */
1089	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1090		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1091	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1092		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1093	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1094		dma_dev->max_xor = 8;
1095		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1096	}
1097
1098	mv_chan->mmr_base = xordev->xor_base;
1099	mv_chan->mmr_high_base = xordev->xor_high_base;
1100	tasklet_setup(&mv_chan->irq_tasklet, mv_xor_tasklet);
 
1101
1102	/* clear errors before enabling interrupts */
1103	mv_chan_clear_err_status(mv_chan);
1104
1105	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1106			  0, dev_name(&pdev->dev), mv_chan);
1107	if (ret)
1108		goto err_free_dma;
1109
1110	mv_chan_unmask_interrupts(mv_chan);
1111
1112	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1113		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
1114	else
1115		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1116
1117	spin_lock_init(&mv_chan->lock);
1118	INIT_LIST_HEAD(&mv_chan->chain);
1119	INIT_LIST_HEAD(&mv_chan->completed_slots);
1120	INIT_LIST_HEAD(&mv_chan->free_slots);
1121	INIT_LIST_HEAD(&mv_chan->allocated_slots);
1122	mv_chan->dmachan.device = dma_dev;
1123	dma_cookie_init(&mv_chan->dmachan);
1124
1125	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1126
1127	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1128		ret = mv_chan_memcpy_self_test(mv_chan);
1129		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1130		if (ret)
1131			goto err_free_irq;
1132	}
1133
1134	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1135		ret = mv_chan_xor_self_test(mv_chan);
1136		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1137		if (ret)
1138			goto err_free_irq;
1139	}
1140
1141	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1142		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1143		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1144		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1145		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1146
1147	ret = dma_async_device_register(dma_dev);
1148	if (ret)
1149		goto err_free_irq;
1150
1151	return mv_chan;
1152
1153err_free_irq:
1154	free_irq(mv_chan->irq, mv_chan);
1155err_free_dma:
1156	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1157			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1158	return ERR_PTR(ret);
1159}
1160
1161static void
1162mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1163			 const struct mbus_dram_target_info *dram)
1164{
1165	void __iomem *base = xordev->xor_high_base;
1166	u32 win_enable = 0;
1167	int i;
1168
1169	for (i = 0; i < 8; i++) {
1170		writel(0, base + WINDOW_BASE(i));
1171		writel(0, base + WINDOW_SIZE(i));
1172		if (i < 4)
1173			writel(0, base + WINDOW_REMAP_HIGH(i));
1174	}
1175
1176	for (i = 0; i < dram->num_cs; i++) {
1177		const struct mbus_dram_window *cs = dram->cs + i;
1178
1179		writel((cs->base & 0xffff0000) |
1180		       (cs->mbus_attr << 8) |
1181		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1182		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1183
1184		/* Fill the caching variables for later use */
1185		xordev->win_start[i] = cs->base;
1186		xordev->win_end[i] = cs->base + cs->size - 1;
1187
1188		win_enable |= (1 << i);
1189		win_enable |= 3 << (16 + (2 * i));
1190	}
1191
1192	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1193	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1194	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1195	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1196}
1197
1198static void
1199mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1200{
1201	void __iomem *base = xordev->xor_high_base;
1202	u32 win_enable = 0;
1203	int i;
1204
1205	for (i = 0; i < 8; i++) {
1206		writel(0, base + WINDOW_BASE(i));
1207		writel(0, base + WINDOW_SIZE(i));
1208		if (i < 4)
1209			writel(0, base + WINDOW_REMAP_HIGH(i));
1210	}
1211	/*
1212	 * For Armada3700 open default 4GB Mbus window. The dram
1213	 * related configuration are done at AXIS level.
1214	 */
1215	writel(0xffff0000, base + WINDOW_SIZE(0));
1216	win_enable |= 1;
1217	win_enable |= 3 << 16;
1218
1219	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1220	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1221	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1222	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1223}
1224
1225/*
1226 * Since this XOR driver is basically used only for RAID5, we don't
1227 * need to care about synchronizing ->suspend with DMA activity,
1228 * because the DMA engine will naturally be quiet due to the block
1229 * devices being suspended.
1230 */
1231static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1232{
1233	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1234	int i;
1235
1236	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1237		struct mv_xor_chan *mv_chan = xordev->channels[i];
1238
1239		if (!mv_chan)
1240			continue;
1241
1242		mv_chan->saved_config_reg =
1243			readl_relaxed(XOR_CONFIG(mv_chan));
1244		mv_chan->saved_int_mask_reg =
1245			readl_relaxed(XOR_INTR_MASK(mv_chan));
1246	}
1247
1248	return 0;
1249}
1250
1251static int mv_xor_resume(struct platform_device *dev)
1252{
1253	struct mv_xor_device *xordev = platform_get_drvdata(dev);
1254	const struct mbus_dram_target_info *dram;
1255	int i;
1256
1257	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1258		struct mv_xor_chan *mv_chan = xordev->channels[i];
1259
1260		if (!mv_chan)
1261			continue;
1262
1263		writel_relaxed(mv_chan->saved_config_reg,
1264			       XOR_CONFIG(mv_chan));
1265		writel_relaxed(mv_chan->saved_int_mask_reg,
1266			       XOR_INTR_MASK(mv_chan));
1267	}
1268
1269	if (xordev->xor_type == XOR_ARMADA_37XX) {
1270		mv_xor_conf_mbus_windows_a3700(xordev);
1271		return 0;
1272	}
1273
1274	dram = mv_mbus_dram_info();
1275	if (dram)
1276		mv_xor_conf_mbus_windows(xordev, dram);
1277
1278	return 0;
1279}
1280
1281static const struct of_device_id mv_xor_dt_ids[] = {
1282	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1283	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
1284	{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
1285	{},
1286};
1287
1288static unsigned int mv_xor_engine_count;
1289
1290static int mv_xor_probe(struct platform_device *pdev)
1291{
1292	const struct mbus_dram_target_info *dram;
1293	struct mv_xor_device *xordev;
1294	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1295	struct resource *res;
1296	unsigned int max_engines, max_channels;
1297	int i, ret;
1298
1299	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1300
1301	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1302	if (!xordev)
1303		return -ENOMEM;
1304
1305	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1306	if (!res)
1307		return -ENODEV;
1308
1309	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1310					resource_size(res));
1311	if (!xordev->xor_base)
1312		return -EBUSY;
1313
1314	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1315	if (!res)
1316		return -ENODEV;
1317
1318	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1319					     resource_size(res));
1320	if (!xordev->xor_high_base)
1321		return -EBUSY;
1322
1323	platform_set_drvdata(pdev, xordev);
1324
1325
1326	/*
1327	 * We need to know which type of XOR device we use before
1328	 * setting up. In non-dt case it can only be the legacy one.
1329	 */
1330	xordev->xor_type = XOR_ORION;
1331	if (pdev->dev.of_node)
1332		xordev->xor_type = (uintptr_t)device_get_match_data(&pdev->dev);
 
 
 
 
 
1333
1334	/*
1335	 * (Re-)program MBUS remapping windows if we are asked to.
1336	 */
1337	if (xordev->xor_type == XOR_ARMADA_37XX) {
1338		mv_xor_conf_mbus_windows_a3700(xordev);
1339	} else {
1340		dram = mv_mbus_dram_info();
1341		if (dram)
1342			mv_xor_conf_mbus_windows(xordev, dram);
1343	}
1344
1345	/* Not all platforms can gate the clock, so it is not
1346	 * an error if the clock does not exists.
1347	 */
1348	xordev->clk = clk_get(&pdev->dev, NULL);
1349	if (!IS_ERR(xordev->clk))
1350		clk_prepare_enable(xordev->clk);
1351
1352	/*
1353	 * We don't want to have more than one channel per CPU in
1354	 * order for async_tx to perform well. So we limit the number
1355	 * of engines and channels so that we take into account this
1356	 * constraint. Note that we also want to use channels from
1357	 * separate engines when possible.  For dual-CPU Armada 3700
1358	 * SoC with single XOR engine allow using its both channels.
1359	 */
1360	max_engines = num_present_cpus();
1361	if (xordev->xor_type == XOR_ARMADA_37XX)
1362		max_channels =	num_present_cpus();
1363	else
1364		max_channels = min_t(unsigned int,
1365				     MV_XOR_MAX_CHANNELS,
1366				     DIV_ROUND_UP(num_present_cpus(), 2));
1367
1368	if (mv_xor_engine_count >= max_engines)
1369		return 0;
1370
1371	if (pdev->dev.of_node) {
1372		struct device_node *np;
1373		int i = 0;
1374
1375		for_each_child_of_node(pdev->dev.of_node, np) {
1376			struct mv_xor_chan *chan;
1377			dma_cap_mask_t cap_mask;
1378			int irq;
1379
1380			if (i >= max_channels)
1381				continue;
1382
1383			dma_cap_zero(cap_mask);
1384			dma_cap_set(DMA_MEMCPY, cap_mask);
1385			dma_cap_set(DMA_XOR, cap_mask);
1386			dma_cap_set(DMA_INTERRUPT, cap_mask);
1387
1388			irq = irq_of_parse_and_map(np, 0);
1389			if (!irq) {
1390				ret = -ENODEV;
1391				goto err_channel_add;
1392			}
1393
1394			chan = mv_xor_channel_add(xordev, pdev, i,
1395						  cap_mask, irq);
1396			if (IS_ERR(chan)) {
1397				ret = PTR_ERR(chan);
1398				irq_dispose_mapping(irq);
1399				goto err_channel_add;
1400			}
1401
1402			xordev->channels[i] = chan;
1403			i++;
1404		}
1405	} else if (pdata && pdata->channels) {
1406		for (i = 0; i < max_channels; i++) {
1407			struct mv_xor_channel_data *cd;
1408			struct mv_xor_chan *chan;
1409			int irq;
1410
1411			cd = &pdata->channels[i];
1412			irq = platform_get_irq(pdev, i);
1413			if (irq < 0) {
1414				ret = irq;
1415				goto err_channel_add;
1416			}
1417
1418			chan = mv_xor_channel_add(xordev, pdev, i,
1419						  cd->cap_mask, irq);
1420			if (IS_ERR(chan)) {
1421				ret = PTR_ERR(chan);
1422				goto err_channel_add;
1423			}
1424
1425			xordev->channels[i] = chan;
1426		}
1427	}
1428
1429	return 0;
1430
1431err_channel_add:
1432	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1433		if (xordev->channels[i]) {
1434			mv_xor_channel_remove(xordev->channels[i]);
1435			if (pdev->dev.of_node)
1436				irq_dispose_mapping(xordev->channels[i]->irq);
1437		}
1438
1439	if (!IS_ERR(xordev->clk)) {
1440		clk_disable_unprepare(xordev->clk);
1441		clk_put(xordev->clk);
1442	}
1443
1444	return ret;
1445}
1446
1447static struct platform_driver mv_xor_driver = {
1448	.probe		= mv_xor_probe,
1449	.suspend        = mv_xor_suspend,
1450	.resume         = mv_xor_resume,
1451	.driver		= {
1452		.name	        = MV_XOR_NAME,
1453		.of_match_table = mv_xor_dt_ids,
1454	},
1455};
1456
1457builtin_platform_driver(mv_xor_driver);
1458
1459/*
1460MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1461MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1462MODULE_LICENSE("GPL");
1463*/
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * offload engine driver for the Marvell XOR engine
   4 * Copyright (C) 2007, 2008, Marvell International Ltd.
   5 */
   6
   7#include <linux/init.h>
   8#include <linux/slab.h>
   9#include <linux/delay.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/spinlock.h>
  12#include <linux/interrupt.h>
  13#include <linux/of_device.h>
  14#include <linux/platform_device.h>
 
  15#include <linux/memory.h>
  16#include <linux/clk.h>
  17#include <linux/of.h>
  18#include <linux/of_irq.h>
  19#include <linux/irqdomain.h>
  20#include <linux/cpumask.h>
  21#include <linux/platform_data/dma-mv_xor.h>
  22
  23#include "dmaengine.h"
  24#include "mv_xor.h"
  25
  26enum mv_xor_type {
  27	XOR_ORION,
  28	XOR_ARMADA_38X,
  29	XOR_ARMADA_37XX,
  30};
  31
  32enum mv_xor_mode {
  33	XOR_MODE_IN_REG,
  34	XOR_MODE_IN_DESC,
  35};
  36
  37static void mv_xor_issue_pending(struct dma_chan *chan);
  38
  39#define to_mv_xor_chan(chan)		\
  40	container_of(chan, struct mv_xor_chan, dmachan)
  41
  42#define to_mv_xor_slot(tx)		\
  43	container_of(tx, struct mv_xor_desc_slot, async_tx)
  44
  45#define mv_chan_to_devp(chan)           \
  46	((chan)->dmadev.dev)
  47
  48static void mv_desc_init(struct mv_xor_desc_slot *desc,
  49			 dma_addr_t addr, u32 byte_count,
  50			 enum dma_ctrl_flags flags)
  51{
  52	struct mv_xor_desc *hw_desc = desc->hw_desc;
  53
  54	hw_desc->status = XOR_DESC_DMA_OWNED;
  55	hw_desc->phy_next_desc = 0;
  56	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
  57	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
  58				XOR_DESC_EOD_INT_EN : 0;
  59	hw_desc->phy_dest_addr = addr;
  60	hw_desc->byte_count = byte_count;
  61}
  62
  63static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
  64{
  65	struct mv_xor_desc *hw_desc = desc->hw_desc;
  66
  67	switch (desc->type) {
  68	case DMA_XOR:
  69	case DMA_INTERRUPT:
  70		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
  71		break;
  72	case DMA_MEMCPY:
  73		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
  74		break;
  75	default:
  76		BUG();
  77		return;
  78	}
  79}
  80
  81static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
  82				  u32 next_desc_addr)
  83{
  84	struct mv_xor_desc *hw_desc = desc->hw_desc;
  85	BUG_ON(hw_desc->phy_next_desc);
  86	hw_desc->phy_next_desc = next_desc_addr;
  87}
  88
  89static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
  90				 int index, dma_addr_t addr)
  91{
  92	struct mv_xor_desc *hw_desc = desc->hw_desc;
  93	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
  94	if (desc->type == DMA_XOR)
  95		hw_desc->desc_command |= (1 << index);
  96}
  97
  98static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
  99{
 100	return readl_relaxed(XOR_CURR_DESC(chan));
 101}
 102
 103static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
 104					u32 next_desc_addr)
 105{
 106	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
 107}
 108
 109static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
 110{
 111	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
 112	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
 113	writel_relaxed(val, XOR_INTR_MASK(chan));
 114}
 115
 116static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
 117{
 118	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
 119	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
 120	return intr_cause;
 121}
 122
 123static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
 124{
 125	u32 val;
 126
 127	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
 128	val = ~(val << (chan->idx * 16));
 129	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
 130	writel_relaxed(val, XOR_INTR_CAUSE(chan));
 131}
 132
 133static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
 134{
 135	u32 val = 0xFFFF0000 >> (chan->idx * 16);
 136	writel_relaxed(val, XOR_INTR_CAUSE(chan));
 137}
 138
 139static void mv_chan_set_mode(struct mv_xor_chan *chan,
 140			     u32 op_mode)
 141{
 142	u32 config = readl_relaxed(XOR_CONFIG(chan));
 143
 144	config &= ~0x7;
 145	config |= op_mode;
 146
 147#if defined(__BIG_ENDIAN)
 148	config |= XOR_DESCRIPTOR_SWAP;
 149#else
 150	config &= ~XOR_DESCRIPTOR_SWAP;
 151#endif
 152
 153	writel_relaxed(config, XOR_CONFIG(chan));
 154}
 155
 156static void mv_chan_activate(struct mv_xor_chan *chan)
 157{
 158	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
 159
 160	/* writel ensures all descriptors are flushed before activation */
 161	writel(BIT(0), XOR_ACTIVATION(chan));
 162}
 163
 164static char mv_chan_is_busy(struct mv_xor_chan *chan)
 165{
 166	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
 167
 168	state = (state >> 4) & 0x3;
 169
 170	return (state == 1) ? 1 : 0;
 171}
 172
 173/*
 174 * mv_chan_start_new_chain - program the engine to operate on new
 175 * chain headed by sw_desc
 176 * Caller must hold &mv_chan->lock while calling this function
 177 */
 178static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
 179				    struct mv_xor_desc_slot *sw_desc)
 180{
 181	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
 182		__func__, __LINE__, sw_desc);
 183
 184	/* set the hardware chain */
 185	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
 186
 187	mv_chan->pending++;
 188	mv_xor_issue_pending(&mv_chan->dmachan);
 189}
 190
 191static dma_cookie_t
 192mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
 193				struct mv_xor_chan *mv_chan,
 194				dma_cookie_t cookie)
 195{
 196	BUG_ON(desc->async_tx.cookie < 0);
 197
 198	if (desc->async_tx.cookie > 0) {
 199		cookie = desc->async_tx.cookie;
 200
 201		dma_descriptor_unmap(&desc->async_tx);
 202		/* call the callback (must not sleep or submit new
 203		 * operations to this channel)
 204		 */
 205		dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
 206	}
 207
 208	/* run dependent operations */
 209	dma_run_dependencies(&desc->async_tx);
 210
 211	return cookie;
 212}
 213
 214static int
 215mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
 216{
 217	struct mv_xor_desc_slot *iter, *_iter;
 218
 219	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 220	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
 221				 node) {
 222
 223		if (async_tx_test_ack(&iter->async_tx)) {
 224			list_move_tail(&iter->node, &mv_chan->free_slots);
 225			if (!list_empty(&iter->sg_tx_list)) {
 226				list_splice_tail_init(&iter->sg_tx_list,
 227							&mv_chan->free_slots);
 228			}
 229		}
 230	}
 231	return 0;
 232}
 233
 234static int
 235mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
 236		   struct mv_xor_chan *mv_chan)
 237{
 238	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
 239		__func__, __LINE__, desc, desc->async_tx.flags);
 240
 241	/* the client is allowed to attach dependent operations
 242	 * until 'ack' is set
 243	 */
 244	if (!async_tx_test_ack(&desc->async_tx)) {
 245		/* move this slot to the completed_slots */
 246		list_move_tail(&desc->node, &mv_chan->completed_slots);
 247		if (!list_empty(&desc->sg_tx_list)) {
 248			list_splice_tail_init(&desc->sg_tx_list,
 249					      &mv_chan->completed_slots);
 250		}
 251	} else {
 252		list_move_tail(&desc->node, &mv_chan->free_slots);
 253		if (!list_empty(&desc->sg_tx_list)) {
 254			list_splice_tail_init(&desc->sg_tx_list,
 255					      &mv_chan->free_slots);
 256		}
 257	}
 258
 259	return 0;
 260}
 261
 262/* This function must be called with the mv_xor_chan spinlock held */
 263static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
 264{
 265	struct mv_xor_desc_slot *iter, *_iter;
 266	dma_cookie_t cookie = 0;
 267	int busy = mv_chan_is_busy(mv_chan);
 268	u32 current_desc = mv_chan_get_current_desc(mv_chan);
 269	int current_cleaned = 0;
 270	struct mv_xor_desc *hw_desc;
 271
 272	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 273	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
 274	mv_chan_clean_completed_slots(mv_chan);
 275
 276	/* free completed slots from the chain starting with
 277	 * the oldest descriptor
 278	 */
 279
 280	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
 281				 node) {
 282
 283		/* clean finished descriptors */
 284		hw_desc = iter->hw_desc;
 285		if (hw_desc->status & XOR_DESC_SUCCESS) {
 286			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
 287								 cookie);
 288
 289			/* done processing desc, clean slot */
 290			mv_desc_clean_slot(iter, mv_chan);
 291
 292			/* break if we did cleaned the current */
 293			if (iter->async_tx.phys == current_desc) {
 294				current_cleaned = 1;
 295				break;
 296			}
 297		} else {
 298			if (iter->async_tx.phys == current_desc) {
 299				current_cleaned = 0;
 300				break;
 301			}
 302		}
 303	}
 304
 305	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
 306		if (current_cleaned) {
 307			/*
 308			 * current descriptor cleaned and removed, run
 309			 * from list head
 310			 */
 311			iter = list_entry(mv_chan->chain.next,
 312					  struct mv_xor_desc_slot,
 313					  node);
 314			mv_chan_start_new_chain(mv_chan, iter);
 315		} else {
 316			if (!list_is_last(&iter->node, &mv_chan->chain)) {
 317				/*
 318				 * descriptors are still waiting after
 319				 * current, trigger them
 320				 */
 321				iter = list_entry(iter->node.next,
 322						  struct mv_xor_desc_slot,
 323						  node);
 324				mv_chan_start_new_chain(mv_chan, iter);
 325			} else {
 326				/*
 327				 * some descriptors are still waiting
 328				 * to be cleaned
 329				 */
 330				tasklet_schedule(&mv_chan->irq_tasklet);
 331			}
 332		}
 333	}
 334
 335	if (cookie > 0)
 336		mv_chan->dmachan.completed_cookie = cookie;
 337}
 338
 339static void mv_xor_tasklet(unsigned long data)
 340{
 341	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
 342
 343	spin_lock(&chan->lock);
 344	mv_chan_slot_cleanup(chan);
 345	spin_unlock(&chan->lock);
 346}
 347
 348static struct mv_xor_desc_slot *
 349mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
 350{
 351	struct mv_xor_desc_slot *iter;
 352
 353	spin_lock_bh(&mv_chan->lock);
 354
 355	if (!list_empty(&mv_chan->free_slots)) {
 356		iter = list_first_entry(&mv_chan->free_slots,
 357					struct mv_xor_desc_slot,
 358					node);
 359
 360		list_move_tail(&iter->node, &mv_chan->allocated_slots);
 361
 362		spin_unlock_bh(&mv_chan->lock);
 363
 364		/* pre-ack descriptor */
 365		async_tx_ack(&iter->async_tx);
 366		iter->async_tx.cookie = -EBUSY;
 367
 368		return iter;
 369
 370	}
 371
 372	spin_unlock_bh(&mv_chan->lock);
 373
 374	/* try to free some slots if the allocation fails */
 375	tasklet_schedule(&mv_chan->irq_tasklet);
 376
 377	return NULL;
 378}
 379
 380/************************ DMA engine API functions ****************************/
 381static dma_cookie_t
 382mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
 383{
 384	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
 385	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
 386	struct mv_xor_desc_slot *old_chain_tail;
 387	dma_cookie_t cookie;
 388	int new_hw_chain = 1;
 389
 390	dev_dbg(mv_chan_to_devp(mv_chan),
 391		"%s sw_desc %p: async_tx %p\n",
 392		__func__, sw_desc, &sw_desc->async_tx);
 393
 394	spin_lock_bh(&mv_chan->lock);
 395	cookie = dma_cookie_assign(tx);
 396
 397	if (list_empty(&mv_chan->chain))
 398		list_move_tail(&sw_desc->node, &mv_chan->chain);
 399	else {
 400		new_hw_chain = 0;
 401
 402		old_chain_tail = list_entry(mv_chan->chain.prev,
 403					    struct mv_xor_desc_slot,
 404					    node);
 405		list_move_tail(&sw_desc->node, &mv_chan->chain);
 406
 407		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
 408			&old_chain_tail->async_tx.phys);
 409
 410		/* fix up the hardware chain */
 411		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
 412
 413		/* if the channel is not busy */
 414		if (!mv_chan_is_busy(mv_chan)) {
 415			u32 current_desc = mv_chan_get_current_desc(mv_chan);
 416			/*
 417			 * and the curren desc is the end of the chain before
 418			 * the append, then we need to start the channel
 419			 */
 420			if (current_desc == old_chain_tail->async_tx.phys)
 421				new_hw_chain = 1;
 422		}
 423	}
 424
 425	if (new_hw_chain)
 426		mv_chan_start_new_chain(mv_chan, sw_desc);
 427
 428	spin_unlock_bh(&mv_chan->lock);
 429
 430	return cookie;
 431}
 432
 433/* returns the number of allocated descriptors */
 434static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
 435{
 436	void *virt_desc;
 437	dma_addr_t dma_desc;
 438	int idx;
 439	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 440	struct mv_xor_desc_slot *slot = NULL;
 441	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
 442
 443	/* Allocate descriptor slots */
 444	idx = mv_chan->slots_allocated;
 445	while (idx < num_descs_in_pool) {
 446		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 447		if (!slot) {
 448			dev_info(mv_chan_to_devp(mv_chan),
 449				 "channel only initialized %d descriptor slots",
 450				 idx);
 451			break;
 452		}
 453		virt_desc = mv_chan->dma_desc_pool_virt;
 454		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
 455
 456		dma_async_tx_descriptor_init(&slot->async_tx, chan);
 457		slot->async_tx.tx_submit = mv_xor_tx_submit;
 458		INIT_LIST_HEAD(&slot->node);
 459		INIT_LIST_HEAD(&slot->sg_tx_list);
 460		dma_desc = mv_chan->dma_desc_pool;
 461		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
 462		slot->idx = idx++;
 463
 464		spin_lock_bh(&mv_chan->lock);
 465		mv_chan->slots_allocated = idx;
 466		list_add_tail(&slot->node, &mv_chan->free_slots);
 467		spin_unlock_bh(&mv_chan->lock);
 468	}
 469
 470	dev_dbg(mv_chan_to_devp(mv_chan),
 471		"allocated %d descriptor slots\n",
 472		mv_chan->slots_allocated);
 473
 474	return mv_chan->slots_allocated ? : -ENOMEM;
 475}
 476
 477/*
 478 * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
 479 * a new MBus window if necessary. Use a cache for these check so that
 480 * the MMIO mapped registers don't have to be accessed for this check
 481 * to speed up this process.
 482 */
 483static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
 484{
 485	struct mv_xor_device *xordev = mv_chan->xordev;
 486	void __iomem *base = mv_chan->mmr_high_base;
 487	u32 win_enable;
 488	u32 size;
 489	u8 target, attr;
 490	int ret;
 491	int i;
 492
 493	/* Nothing needs to get done for the Armada 3700 */
 494	if (xordev->xor_type == XOR_ARMADA_37XX)
 495		return 0;
 496
 497	/*
 498	 * Loop over the cached windows to check, if the requested area
 499	 * is already mapped. If this the case, nothing needs to be done
 500	 * and we can return.
 501	 */
 502	for (i = 0; i < WINDOW_COUNT; i++) {
 503		if (addr >= xordev->win_start[i] &&
 504		    addr <= xordev->win_end[i]) {
 505			/* Window is already mapped */
 506			return 0;
 507		}
 508	}
 509
 510	/*
 511	 * The window is not mapped, so we need to create the new mapping
 512	 */
 513
 514	/* If no IO window is found that addr has to be located in SDRAM */
 515	ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
 516	if (ret < 0)
 517		return 0;
 518
 519	/*
 520	 * Mask the base addr 'addr' according to 'size' read back from the
 521	 * MBus window. Otherwise we might end up with an address located
 522	 * somewhere in the middle of this area here.
 523	 */
 524	size -= 1;
 525	addr &= ~size;
 526
 527	/*
 528	 * Reading one of both enabled register is enough, as they are always
 529	 * programmed to the identical values
 530	 */
 531	win_enable = readl(base + WINDOW_BAR_ENABLE(0));
 532
 533	/* Set 'i' to the first free window to write the new values to */
 534	i = ffs(~win_enable) - 1;
 535	if (i >= WINDOW_COUNT)
 536		return -ENOMEM;
 537
 538	writel((addr & 0xffff0000) | (attr << 8) | target,
 539	       base + WINDOW_BASE(i));
 540	writel(size & 0xffff0000, base + WINDOW_SIZE(i));
 541
 542	/* Fill the caching variables for later use */
 543	xordev->win_start[i] = addr;
 544	xordev->win_end[i] = addr + size;
 545
 546	win_enable |= (1 << i);
 547	win_enable |= 3 << (16 + (2 * i));
 548	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
 549	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
 550
 551	return 0;
 552}
 553
 554static struct dma_async_tx_descriptor *
 555mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 556		    unsigned int src_cnt, size_t len, unsigned long flags)
 557{
 558	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 559	struct mv_xor_desc_slot *sw_desc;
 560	int ret;
 561
 562	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
 563		return NULL;
 564
 565	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
 566
 567	dev_dbg(mv_chan_to_devp(mv_chan),
 568		"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
 569		__func__, src_cnt, len, &dest, flags);
 570
 571	/* Check if a new window needs to get added for 'dest' */
 572	ret = mv_xor_add_io_win(mv_chan, dest);
 573	if (ret)
 574		return NULL;
 575
 576	sw_desc = mv_chan_alloc_slot(mv_chan);
 577	if (sw_desc) {
 578		sw_desc->type = DMA_XOR;
 579		sw_desc->async_tx.flags = flags;
 580		mv_desc_init(sw_desc, dest, len, flags);
 581		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
 582			mv_desc_set_mode(sw_desc);
 583		while (src_cnt--) {
 584			/* Check if a new window needs to get added for 'src' */
 585			ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
 586			if (ret)
 587				return NULL;
 588			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
 589		}
 590	}
 591
 592	dev_dbg(mv_chan_to_devp(mv_chan),
 593		"%s sw_desc %p async_tx %p \n",
 594		__func__, sw_desc, &sw_desc->async_tx);
 595	return sw_desc ? &sw_desc->async_tx : NULL;
 596}
 597
 598static struct dma_async_tx_descriptor *
 599mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 600		size_t len, unsigned long flags)
 601{
 602	/*
 603	 * A MEMCPY operation is identical to an XOR operation with only
 604	 * a single source address.
 605	 */
 606	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
 607}
 608
 609static struct dma_async_tx_descriptor *
 610mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 611{
 612	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 613	dma_addr_t src, dest;
 614	size_t len;
 615
 616	src = mv_chan->dummy_src_addr;
 617	dest = mv_chan->dummy_dst_addr;
 618	len = MV_XOR_MIN_BYTE_COUNT;
 619
 620	/*
 621	 * We implement the DMA_INTERRUPT operation as a minimum sized
 622	 * XOR operation with a single dummy source address.
 623	 */
 624	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
 625}
 626
 627static void mv_xor_free_chan_resources(struct dma_chan *chan)
 628{
 629	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 630	struct mv_xor_desc_slot *iter, *_iter;
 631	int in_use_descs = 0;
 632
 633	spin_lock_bh(&mv_chan->lock);
 634
 635	mv_chan_slot_cleanup(mv_chan);
 636
 637	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
 638					node) {
 639		in_use_descs++;
 640		list_move_tail(&iter->node, &mv_chan->free_slots);
 641	}
 642	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
 643				 node) {
 644		in_use_descs++;
 645		list_move_tail(&iter->node, &mv_chan->free_slots);
 646	}
 647	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
 648				 node) {
 649		in_use_descs++;
 650		list_move_tail(&iter->node, &mv_chan->free_slots);
 651	}
 652	list_for_each_entry_safe_reverse(
 653		iter, _iter, &mv_chan->free_slots, node) {
 654		list_del(&iter->node);
 655		kfree(iter);
 656		mv_chan->slots_allocated--;
 657	}
 658
 659	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
 660		__func__, mv_chan->slots_allocated);
 661	spin_unlock_bh(&mv_chan->lock);
 662
 663	if (in_use_descs)
 664		dev_err(mv_chan_to_devp(mv_chan),
 665			"freeing %d in use descriptors!\n", in_use_descs);
 666}
 667
 668/**
 669 * mv_xor_status - poll the status of an XOR transaction
 670 * @chan: XOR channel handle
 671 * @cookie: XOR transaction identifier
 672 * @txstate: XOR transactions state holder (or NULL)
 673 */
 674static enum dma_status mv_xor_status(struct dma_chan *chan,
 675					  dma_cookie_t cookie,
 676					  struct dma_tx_state *txstate)
 677{
 678	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 679	enum dma_status ret;
 680
 681	ret = dma_cookie_status(chan, cookie, txstate);
 682	if (ret == DMA_COMPLETE)
 683		return ret;
 684
 685	spin_lock_bh(&mv_chan->lock);
 686	mv_chan_slot_cleanup(mv_chan);
 687	spin_unlock_bh(&mv_chan->lock);
 688
 689	return dma_cookie_status(chan, cookie, txstate);
 690}
 691
 692static void mv_chan_dump_regs(struct mv_xor_chan *chan)
 693{
 694	u32 val;
 695
 696	val = readl_relaxed(XOR_CONFIG(chan));
 697	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
 698
 699	val = readl_relaxed(XOR_ACTIVATION(chan));
 700	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
 701
 702	val = readl_relaxed(XOR_INTR_CAUSE(chan));
 703	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
 704
 705	val = readl_relaxed(XOR_INTR_MASK(chan));
 706	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
 707
 708	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
 709	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
 710
 711	val = readl_relaxed(XOR_ERROR_ADDR(chan));
 712	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
 713}
 714
 715static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
 716					  u32 intr_cause)
 717{
 718	if (intr_cause & XOR_INT_ERR_DECODE) {
 719		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
 720		return;
 721	}
 722
 723	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
 724		chan->idx, intr_cause);
 725
 726	mv_chan_dump_regs(chan);
 727	WARN_ON(1);
 728}
 729
 730static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
 731{
 732	struct mv_xor_chan *chan = data;
 733	u32 intr_cause = mv_chan_get_intr_cause(chan);
 734
 735	dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
 736
 737	if (intr_cause & XOR_INTR_ERRORS)
 738		mv_chan_err_interrupt_handler(chan, intr_cause);
 739
 740	tasklet_schedule(&chan->irq_tasklet);
 741
 742	mv_chan_clear_eoc_cause(chan);
 743
 744	return IRQ_HANDLED;
 745}
 746
 747static void mv_xor_issue_pending(struct dma_chan *chan)
 748{
 749	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
 750
 751	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
 752		mv_chan->pending = 0;
 753		mv_chan_activate(mv_chan);
 754	}
 755}
 756
 757/*
 758 * Perform a transaction to verify the HW works.
 759 */
 760
 761static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
 762{
 763	int i, ret;
 764	void *src, *dest;
 765	dma_addr_t src_dma, dest_dma;
 766	struct dma_chan *dma_chan;
 767	dma_cookie_t cookie;
 768	struct dma_async_tx_descriptor *tx;
 769	struct dmaengine_unmap_data *unmap;
 770	int err = 0;
 771
 772	src = kmalloc(PAGE_SIZE, GFP_KERNEL);
 773	if (!src)
 774		return -ENOMEM;
 775
 776	dest = kzalloc(PAGE_SIZE, GFP_KERNEL);
 777	if (!dest) {
 778		kfree(src);
 779		return -ENOMEM;
 780	}
 781
 782	/* Fill in src buffer */
 783	for (i = 0; i < PAGE_SIZE; i++)
 784		((u8 *) src)[i] = (u8)i;
 785
 786	dma_chan = &mv_chan->dmachan;
 787	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
 788		err = -ENODEV;
 789		goto out;
 790	}
 791
 792	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
 793	if (!unmap) {
 794		err = -ENOMEM;
 795		goto free_resources;
 796	}
 797
 798	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
 799			       offset_in_page(src), PAGE_SIZE,
 800			       DMA_TO_DEVICE);
 801	unmap->addr[0] = src_dma;
 802
 803	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
 804	if (ret) {
 805		err = -ENOMEM;
 806		goto free_resources;
 807	}
 808	unmap->to_cnt = 1;
 809
 810	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
 811				offset_in_page(dest), PAGE_SIZE,
 812				DMA_FROM_DEVICE);
 813	unmap->addr[1] = dest_dma;
 814
 815	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
 816	if (ret) {
 817		err = -ENOMEM;
 818		goto free_resources;
 819	}
 820	unmap->from_cnt = 1;
 821	unmap->len = PAGE_SIZE;
 822
 823	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
 824				    PAGE_SIZE, 0);
 825	if (!tx) {
 826		dev_err(dma_chan->device->dev,
 827			"Self-test cannot prepare operation, disabling\n");
 828		err = -ENODEV;
 829		goto free_resources;
 830	}
 831
 832	cookie = mv_xor_tx_submit(tx);
 833	if (dma_submit_error(cookie)) {
 834		dev_err(dma_chan->device->dev,
 835			"Self-test submit error, disabling\n");
 836		err = -ENODEV;
 837		goto free_resources;
 838	}
 839
 840	mv_xor_issue_pending(dma_chan);
 841	async_tx_ack(tx);
 842	msleep(1);
 843
 844	if (mv_xor_status(dma_chan, cookie, NULL) !=
 845	    DMA_COMPLETE) {
 846		dev_err(dma_chan->device->dev,
 847			"Self-test copy timed out, disabling\n");
 848		err = -ENODEV;
 849		goto free_resources;
 850	}
 851
 852	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
 853				PAGE_SIZE, DMA_FROM_DEVICE);
 854	if (memcmp(src, dest, PAGE_SIZE)) {
 855		dev_err(dma_chan->device->dev,
 856			"Self-test copy failed compare, disabling\n");
 857		err = -ENODEV;
 858		goto free_resources;
 859	}
 860
 861free_resources:
 862	dmaengine_unmap_put(unmap);
 863	mv_xor_free_chan_resources(dma_chan);
 864out:
 865	kfree(src);
 866	kfree(dest);
 867	return err;
 868}
 869
 870#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
 871static int
 872mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
 873{
 874	int i, src_idx, ret;
 875	struct page *dest;
 876	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
 877	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
 878	dma_addr_t dest_dma;
 879	struct dma_async_tx_descriptor *tx;
 880	struct dmaengine_unmap_data *unmap;
 881	struct dma_chan *dma_chan;
 882	dma_cookie_t cookie;
 883	u8 cmp_byte = 0;
 884	u32 cmp_word;
 885	int err = 0;
 886	int src_count = MV_XOR_NUM_SRC_TEST;
 887
 888	for (src_idx = 0; src_idx < src_count; src_idx++) {
 889		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
 890		if (!xor_srcs[src_idx]) {
 891			while (src_idx--)
 892				__free_page(xor_srcs[src_idx]);
 893			return -ENOMEM;
 894		}
 895	}
 896
 897	dest = alloc_page(GFP_KERNEL);
 898	if (!dest) {
 899		while (src_idx--)
 900			__free_page(xor_srcs[src_idx]);
 901		return -ENOMEM;
 902	}
 903
 904	/* Fill in src buffers */
 905	for (src_idx = 0; src_idx < src_count; src_idx++) {
 906		u8 *ptr = page_address(xor_srcs[src_idx]);
 907		for (i = 0; i < PAGE_SIZE; i++)
 908			ptr[i] = (1 << src_idx);
 909	}
 910
 911	for (src_idx = 0; src_idx < src_count; src_idx++)
 912		cmp_byte ^= (u8) (1 << src_idx);
 913
 914	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
 915		(cmp_byte << 8) | cmp_byte;
 916
 917	memset(page_address(dest), 0, PAGE_SIZE);
 918
 919	dma_chan = &mv_chan->dmachan;
 920	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
 921		err = -ENODEV;
 922		goto out;
 923	}
 924
 925	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
 926					 GFP_KERNEL);
 927	if (!unmap) {
 928		err = -ENOMEM;
 929		goto free_resources;
 930	}
 931
 932	/* test xor */
 933	for (i = 0; i < src_count; i++) {
 934		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
 935					      0, PAGE_SIZE, DMA_TO_DEVICE);
 936		dma_srcs[i] = unmap->addr[i];
 937		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
 938		if (ret) {
 939			err = -ENOMEM;
 940			goto free_resources;
 941		}
 942		unmap->to_cnt++;
 943	}
 944
 945	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
 946				      DMA_FROM_DEVICE);
 947	dest_dma = unmap->addr[src_count];
 948	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
 949	if (ret) {
 950		err = -ENOMEM;
 951		goto free_resources;
 952	}
 953	unmap->from_cnt = 1;
 954	unmap->len = PAGE_SIZE;
 955
 956	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
 957				 src_count, PAGE_SIZE, 0);
 958	if (!tx) {
 959		dev_err(dma_chan->device->dev,
 960			"Self-test cannot prepare operation, disabling\n");
 961		err = -ENODEV;
 962		goto free_resources;
 963	}
 964
 965	cookie = mv_xor_tx_submit(tx);
 966	if (dma_submit_error(cookie)) {
 967		dev_err(dma_chan->device->dev,
 968			"Self-test submit error, disabling\n");
 969		err = -ENODEV;
 970		goto free_resources;
 971	}
 972
 973	mv_xor_issue_pending(dma_chan);
 974	async_tx_ack(tx);
 975	msleep(8);
 976
 977	if (mv_xor_status(dma_chan, cookie, NULL) !=
 978	    DMA_COMPLETE) {
 979		dev_err(dma_chan->device->dev,
 980			"Self-test xor timed out, disabling\n");
 981		err = -ENODEV;
 982		goto free_resources;
 983	}
 984
 985	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
 986				PAGE_SIZE, DMA_FROM_DEVICE);
 987	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
 988		u32 *ptr = page_address(dest);
 989		if (ptr[i] != cmp_word) {
 990			dev_err(dma_chan->device->dev,
 991				"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
 992				i, ptr[i], cmp_word);
 993			err = -ENODEV;
 994			goto free_resources;
 995		}
 996	}
 997
 998free_resources:
 999	dmaengine_unmap_put(unmap);
1000	mv_xor_free_chan_resources(dma_chan);
1001out:
1002	src_idx = src_count;
1003	while (src_idx--)
1004		__free_page(xor_srcs[src_idx]);
1005	__free_page(dest);
1006	return err;
1007}
1008
1009static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1010{
1011	struct dma_chan *chan, *_chan;
1012	struct device *dev = mv_chan->dmadev.dev;
1013
1014	dma_async_device_unregister(&mv_chan->dmadev);
1015
1016	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1017			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1018	dma_unmap_single(dev, mv_chan->dummy_src_addr,
1019			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1020	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
1021			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1022
1023	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1024				 device_node) {
1025		list_del(&chan->device_node);
1026	}
1027
1028	free_irq(mv_chan->irq, mv_chan);
1029
1030	return 0;
1031}
1032
1033static struct mv_xor_chan *
1034mv_xor_channel_add(struct mv_xor_device *xordev,
1035		   struct platform_device *pdev,
1036		   int idx, dma_cap_mask_t cap_mask, int irq)
1037{
1038	int ret = 0;
1039	struct mv_xor_chan *mv_chan;
1040	struct dma_device *dma_dev;
1041
1042	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1043	if (!mv_chan)
1044		return ERR_PTR(-ENOMEM);
1045
1046	mv_chan->idx = idx;
1047	mv_chan->irq = irq;
1048	if (xordev->xor_type == XOR_ORION)
1049		mv_chan->op_in_desc = XOR_MODE_IN_REG;
1050	else
1051		mv_chan->op_in_desc = XOR_MODE_IN_DESC;
1052
1053	dma_dev = &mv_chan->dmadev;
1054	dma_dev->dev = &pdev->dev;
1055	mv_chan->xordev = xordev;
1056
1057	/*
1058	 * These source and destination dummy buffers are used to implement
1059	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
1060	 * Hence, we only need to map the buffers at initialization-time.
1061	 */
1062	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
1063		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1064	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
1065		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1066
1067	/* allocate coherent memory for hardware descriptors
1068	 * note: writecombine gives slightly better performance, but
1069	 * requires that we explicitly flush the writes
1070	 */
1071	mv_chan->dma_desc_pool_virt =
1072	  dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1073		       GFP_KERNEL);
1074	if (!mv_chan->dma_desc_pool_virt)
1075		return ERR_PTR(-ENOMEM);
1076
1077	/* discover transaction capabilites from the platform data */
1078	dma_dev->cap_mask = cap_mask;
1079
1080	INIT_LIST_HEAD(&dma_dev->channels);
1081
1082	/* set base routines */
1083	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1084	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1085	dma_dev->device_tx_status = mv_xor_status;
1086	dma_dev->device_issue_pending = mv_xor_issue_pending;
1087
1088	/* set prep routines based on capability */
1089	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1090		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1091	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1092		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1093	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1094		dma_dev->max_xor = 8;
1095		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1096	}
1097
1098	mv_chan->mmr_base = xordev->xor_base;
1099	mv_chan->mmr_high_base = xordev->xor_high_base;
1100	tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1101		     mv_chan);
1102
1103	/* clear errors before enabling interrupts */
1104	mv_chan_clear_err_status(mv_chan);
1105
1106	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1107			  0, dev_name(&pdev->dev), mv_chan);
1108	if (ret)
1109		goto err_free_dma;
1110
1111	mv_chan_unmask_interrupts(mv_chan);
1112
1113	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1114		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
1115	else
1116		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1117
1118	spin_lock_init(&mv_chan->lock);
1119	INIT_LIST_HEAD(&mv_chan->chain);
1120	INIT_LIST_HEAD(&mv_chan->completed_slots);
1121	INIT_LIST_HEAD(&mv_chan->free_slots);
1122	INIT_LIST_HEAD(&mv_chan->allocated_slots);
1123	mv_chan->dmachan.device = dma_dev;
1124	dma_cookie_init(&mv_chan->dmachan);
1125
1126	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1127
1128	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1129		ret = mv_chan_memcpy_self_test(mv_chan);
1130		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1131		if (ret)
1132			goto err_free_irq;
1133	}
1134
1135	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1136		ret = mv_chan_xor_self_test(mv_chan);
1137		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1138		if (ret)
1139			goto err_free_irq;
1140	}
1141
1142	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1143		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1144		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1145		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1146		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1147
1148	ret = dma_async_device_register(dma_dev);
1149	if (ret)
1150		goto err_free_irq;
1151
1152	return mv_chan;
1153
1154err_free_irq:
1155	free_irq(mv_chan->irq, mv_chan);
1156err_free_dma:
1157	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1158			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1159	return ERR_PTR(ret);
1160}
1161
1162static void
1163mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1164			 const struct mbus_dram_target_info *dram)
1165{
1166	void __iomem *base = xordev->xor_high_base;
1167	u32 win_enable = 0;
1168	int i;
1169
1170	for (i = 0; i < 8; i++) {
1171		writel(0, base + WINDOW_BASE(i));
1172		writel(0, base + WINDOW_SIZE(i));
1173		if (i < 4)
1174			writel(0, base + WINDOW_REMAP_HIGH(i));
1175	}
1176
1177	for (i = 0; i < dram->num_cs; i++) {
1178		const struct mbus_dram_window *cs = dram->cs + i;
1179
1180		writel((cs->base & 0xffff0000) |
1181		       (cs->mbus_attr << 8) |
1182		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1183		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1184
1185		/* Fill the caching variables for later use */
1186		xordev->win_start[i] = cs->base;
1187		xordev->win_end[i] = cs->base + cs->size - 1;
1188
1189		win_enable |= (1 << i);
1190		win_enable |= 3 << (16 + (2 * i));
1191	}
1192
1193	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1194	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1195	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1196	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1197}
1198
1199static void
1200mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1201{
1202	void __iomem *base = xordev->xor_high_base;
1203	u32 win_enable = 0;
1204	int i;
1205
1206	for (i = 0; i < 8; i++) {
1207		writel(0, base + WINDOW_BASE(i));
1208		writel(0, base + WINDOW_SIZE(i));
1209		if (i < 4)
1210			writel(0, base + WINDOW_REMAP_HIGH(i));
1211	}
1212	/*
1213	 * For Armada3700 open default 4GB Mbus window. The dram
1214	 * related configuration are done at AXIS level.
1215	 */
1216	writel(0xffff0000, base + WINDOW_SIZE(0));
1217	win_enable |= 1;
1218	win_enable |= 3 << 16;
1219
1220	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1221	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1222	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1223	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1224}
1225
1226/*
1227 * Since this XOR driver is basically used only for RAID5, we don't
1228 * need to care about synchronizing ->suspend with DMA activity,
1229 * because the DMA engine will naturally be quiet due to the block
1230 * devices being suspended.
1231 */
1232static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1233{
1234	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1235	int i;
1236
1237	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1238		struct mv_xor_chan *mv_chan = xordev->channels[i];
1239
1240		if (!mv_chan)
1241			continue;
1242
1243		mv_chan->saved_config_reg =
1244			readl_relaxed(XOR_CONFIG(mv_chan));
1245		mv_chan->saved_int_mask_reg =
1246			readl_relaxed(XOR_INTR_MASK(mv_chan));
1247	}
1248
1249	return 0;
1250}
1251
1252static int mv_xor_resume(struct platform_device *dev)
1253{
1254	struct mv_xor_device *xordev = platform_get_drvdata(dev);
1255	const struct mbus_dram_target_info *dram;
1256	int i;
1257
1258	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1259		struct mv_xor_chan *mv_chan = xordev->channels[i];
1260
1261		if (!mv_chan)
1262			continue;
1263
1264		writel_relaxed(mv_chan->saved_config_reg,
1265			       XOR_CONFIG(mv_chan));
1266		writel_relaxed(mv_chan->saved_int_mask_reg,
1267			       XOR_INTR_MASK(mv_chan));
1268	}
1269
1270	if (xordev->xor_type == XOR_ARMADA_37XX) {
1271		mv_xor_conf_mbus_windows_a3700(xordev);
1272		return 0;
1273	}
1274
1275	dram = mv_mbus_dram_info();
1276	if (dram)
1277		mv_xor_conf_mbus_windows(xordev, dram);
1278
1279	return 0;
1280}
1281
1282static const struct of_device_id mv_xor_dt_ids[] = {
1283	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1284	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
1285	{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
1286	{},
1287};
1288
1289static unsigned int mv_xor_engine_count;
1290
1291static int mv_xor_probe(struct platform_device *pdev)
1292{
1293	const struct mbus_dram_target_info *dram;
1294	struct mv_xor_device *xordev;
1295	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1296	struct resource *res;
1297	unsigned int max_engines, max_channels;
1298	int i, ret;
1299
1300	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1301
1302	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1303	if (!xordev)
1304		return -ENOMEM;
1305
1306	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1307	if (!res)
1308		return -ENODEV;
1309
1310	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1311					resource_size(res));
1312	if (!xordev->xor_base)
1313		return -EBUSY;
1314
1315	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1316	if (!res)
1317		return -ENODEV;
1318
1319	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1320					     resource_size(res));
1321	if (!xordev->xor_high_base)
1322		return -EBUSY;
1323
1324	platform_set_drvdata(pdev, xordev);
1325
1326
1327	/*
1328	 * We need to know which type of XOR device we use before
1329	 * setting up. In non-dt case it can only be the legacy one.
1330	 */
1331	xordev->xor_type = XOR_ORION;
1332	if (pdev->dev.of_node) {
1333		const struct of_device_id *of_id =
1334			of_match_device(mv_xor_dt_ids,
1335					&pdev->dev);
1336
1337		xordev->xor_type = (uintptr_t)of_id->data;
1338	}
1339
1340	/*
1341	 * (Re-)program MBUS remapping windows if we are asked to.
1342	 */
1343	if (xordev->xor_type == XOR_ARMADA_37XX) {
1344		mv_xor_conf_mbus_windows_a3700(xordev);
1345	} else {
1346		dram = mv_mbus_dram_info();
1347		if (dram)
1348			mv_xor_conf_mbus_windows(xordev, dram);
1349	}
1350
1351	/* Not all platforms can gate the clock, so it is not
1352	 * an error if the clock does not exists.
1353	 */
1354	xordev->clk = clk_get(&pdev->dev, NULL);
1355	if (!IS_ERR(xordev->clk))
1356		clk_prepare_enable(xordev->clk);
1357
1358	/*
1359	 * We don't want to have more than one channel per CPU in
1360	 * order for async_tx to perform well. So we limit the number
1361	 * of engines and channels so that we take into account this
1362	 * constraint. Note that we also want to use channels from
1363	 * separate engines when possible.  For dual-CPU Armada 3700
1364	 * SoC with single XOR engine allow using its both channels.
1365	 */
1366	max_engines = num_present_cpus();
1367	if (xordev->xor_type == XOR_ARMADA_37XX)
1368		max_channels =	num_present_cpus();
1369	else
1370		max_channels = min_t(unsigned int,
1371				     MV_XOR_MAX_CHANNELS,
1372				     DIV_ROUND_UP(num_present_cpus(), 2));
1373
1374	if (mv_xor_engine_count >= max_engines)
1375		return 0;
1376
1377	if (pdev->dev.of_node) {
1378		struct device_node *np;
1379		int i = 0;
1380
1381		for_each_child_of_node(pdev->dev.of_node, np) {
1382			struct mv_xor_chan *chan;
1383			dma_cap_mask_t cap_mask;
1384			int irq;
1385
1386			if (i >= max_channels)
1387				continue;
1388
1389			dma_cap_zero(cap_mask);
1390			dma_cap_set(DMA_MEMCPY, cap_mask);
1391			dma_cap_set(DMA_XOR, cap_mask);
1392			dma_cap_set(DMA_INTERRUPT, cap_mask);
1393
1394			irq = irq_of_parse_and_map(np, 0);
1395			if (!irq) {
1396				ret = -ENODEV;
1397				goto err_channel_add;
1398			}
1399
1400			chan = mv_xor_channel_add(xordev, pdev, i,
1401						  cap_mask, irq);
1402			if (IS_ERR(chan)) {
1403				ret = PTR_ERR(chan);
1404				irq_dispose_mapping(irq);
1405				goto err_channel_add;
1406			}
1407
1408			xordev->channels[i] = chan;
1409			i++;
1410		}
1411	} else if (pdata && pdata->channels) {
1412		for (i = 0; i < max_channels; i++) {
1413			struct mv_xor_channel_data *cd;
1414			struct mv_xor_chan *chan;
1415			int irq;
1416
1417			cd = &pdata->channels[i];
1418			irq = platform_get_irq(pdev, i);
1419			if (irq < 0) {
1420				ret = irq;
1421				goto err_channel_add;
1422			}
1423
1424			chan = mv_xor_channel_add(xordev, pdev, i,
1425						  cd->cap_mask, irq);
1426			if (IS_ERR(chan)) {
1427				ret = PTR_ERR(chan);
1428				goto err_channel_add;
1429			}
1430
1431			xordev->channels[i] = chan;
1432		}
1433	}
1434
1435	return 0;
1436
1437err_channel_add:
1438	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1439		if (xordev->channels[i]) {
1440			mv_xor_channel_remove(xordev->channels[i]);
1441			if (pdev->dev.of_node)
1442				irq_dispose_mapping(xordev->channels[i]->irq);
1443		}
1444
1445	if (!IS_ERR(xordev->clk)) {
1446		clk_disable_unprepare(xordev->clk);
1447		clk_put(xordev->clk);
1448	}
1449
1450	return ret;
1451}
1452
1453static struct platform_driver mv_xor_driver = {
1454	.probe		= mv_xor_probe,
1455	.suspend        = mv_xor_suspend,
1456	.resume         = mv_xor_resume,
1457	.driver		= {
1458		.name	        = MV_XOR_NAME,
1459		.of_match_table = of_match_ptr(mv_xor_dt_ids),
1460	},
1461};
1462
1463builtin_platform_driver(mv_xor_driver);
1464
1465/*
1466MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1467MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1468MODULE_LICENSE("GPL");
1469*/