Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
   3 *
   4 * Copyright (C) 2008 Atmel Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 *
  12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
  13 * The only Atmel DMA Controller that is not covered by this driver is the one
  14 * found on AT91SAM9263.
  15 */
  16
  17#include <dt-bindings/dma/at91.h>
 
  18#include <linux/clk.h>
  19#include <linux/dmaengine.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/dmapool.h>
 
  22#include <linux/interrupt.h>
  23#include <linux/module.h>
  24#include <linux/platform_device.h>
  25#include <linux/slab.h>
  26#include <linux/of.h>
 
  27#include <linux/of_device.h>
  28#include <linux/of_dma.h>
 
 
  29
  30#include "at_hdmac_regs.h"
  31#include "dmaengine.h"
 
  32
  33/*
  34 * Glossary
  35 * --------
  36 *
  37 * at_hdmac		: Name of the ATmel AHB DMA Controller
  38 * at_dma_ / atdma	: ATmel DMA controller entity related
  39 * atc_	/ atchan	: ATmel DMA Channel entity related
  40 */
  41
  42#define	ATC_DEFAULT_CFG		(ATC_FIFOCFG_HALFFIFO)
  43#define	ATC_DEFAULT_CTRLB	(ATC_SIF(AT_DMA_MEM_IF) \
  44				|ATC_DIF(AT_DMA_MEM_IF))
  45#define ATC_DMA_BUSWIDTHS\
  46	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
  47	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
  48	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
  49	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  50
  51#define ATC_MAX_DSCR_TRIALS	10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52
  53/*
  54 * Initial number of descriptors to allocate for each channel. This could
  55 * be increased during dma usage.
 
 
  56 */
  57static unsigned int init_nr_desc_per_channel = 64;
  58module_param(init_nr_desc_per_channel, uint, 0644);
  59MODULE_PARM_DESC(init_nr_desc_per_channel,
  60		 "initial descriptors per channel (default: 64)");
 
 
 
  61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  62
  63/* prototypes */
  64static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
  65static void atc_issue_pending(struct dma_chan *chan);
  66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  67
  68/*----------------------------------------------------------------------*/
 
 
 
  69
  70static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
  71						size_t len)
  72{
  73	unsigned int width;
 
  74
  75	if (!((src | dst  | len) & 3))
  76		width = 2;
  77	else if (!((src | dst | len) & 1))
  78		width = 1;
  79	else
  80		width = 0;
  81
  82	return width;
 
 
  83}
  84
  85static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
 
 
 
  86{
  87	return list_first_entry(&atchan->active_list,
  88				struct at_desc, desc_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89}
 
 
 
  90
  91static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
  92{
  93	return list_first_entry(&atchan->queue,
  94				struct at_desc, desc_node);
 
 
  95}
  96
  97/**
  98 * atc_alloc_descriptor - allocate and return an initialized descriptor
  99 * @chan: the channel to allocate descriptors for
 100 * @gfp_flags: GFP allocation flags
 101 *
 102 * Note: The ack-bit is positioned in the descriptor flag at creation time
 103 *       to make initial allocation more convenient. This bit will be cleared
 104 *       and control will be given to client at usage time (during
 105 *       preparation functions).
 106 */
 107static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
 108					    gfp_t gfp_flags)
 109{
 110	struct at_desc	*desc = NULL;
 111	struct at_dma	*atdma = to_at_dma(chan->device);
 112	dma_addr_t phys;
 113
 114	desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
 115	if (desc) {
 116		memset(desc, 0, sizeof(struct at_desc));
 117		INIT_LIST_HEAD(&desc->tx_list);
 118		dma_async_tx_descriptor_init(&desc->txd, chan);
 119		/* txd.flags will be overwritten in prep functions */
 120		desc->txd.flags = DMA_CTRL_ACK;
 121		desc->txd.tx_submit = atc_tx_submit;
 122		desc->txd.phys = phys;
 123	}
 124
 125	return desc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126}
 127
 
 128/**
 129 * atc_desc_get - get an unused descriptor from free_list
 130 * @atchan: channel we want a new descriptor for
 131 */
 132static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
 133{
 134	struct at_desc *desc, *_desc;
 135	struct at_desc *ret = NULL;
 136	unsigned long flags;
 137	unsigned int i = 0;
 138	LIST_HEAD(tmp_list);
 139
 140	spin_lock_irqsave(&atchan->lock, flags);
 141	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
 142		i++;
 143		if (async_tx_test_ack(&desc->txd)) {
 144			list_del(&desc->desc_node);
 145			ret = desc;
 146			break;
 147		}
 148		dev_dbg(chan2dev(&atchan->chan_common),
 149				"desc %p not ACKed\n", desc);
 150	}
 151	spin_unlock_irqrestore(&atchan->lock, flags);
 152	dev_vdbg(chan2dev(&atchan->chan_common),
 153		"scanned %u descriptors on freelist\n", i);
 154
 155	/* no more descriptor available in initial pool: create one more */
 156	if (!ret) {
 157		ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
 158		if (ret) {
 159			spin_lock_irqsave(&atchan->lock, flags);
 160			atchan->descs_allocated++;
 161			spin_unlock_irqrestore(&atchan->lock, flags);
 162		} else {
 163			dev_err(chan2dev(&atchan->chan_common),
 164					"not enough descriptors available\n");
 165		}
 166	}
 167
 168	return ret;
 
 
 
 
 
 
 169}
 170
 171/**
 172 * atc_desc_put - move a descriptor, including any children, to the free list
 173 * @atchan: channel we work on
 174 * @desc: descriptor, at the head of a chain, to move to free list
 175 */
 176static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
 177{
 178	if (desc) {
 179		struct at_desc *child;
 180		unsigned long flags;
 181
 182		spin_lock_irqsave(&atchan->lock, flags);
 183		list_for_each_entry(child, &desc->tx_list, desc_node)
 184			dev_vdbg(chan2dev(&atchan->chan_common),
 185					"moving child desc %p to freelist\n",
 186					child);
 187		list_splice_init(&desc->tx_list, &atchan->free_list);
 188		dev_vdbg(chan2dev(&atchan->chan_common),
 189			 "moving desc %p to freelist\n", desc);
 190		list_add(&desc->desc_node, &atchan->free_list);
 191		spin_unlock_irqrestore(&atchan->lock, flags);
 192	}
 
 
 
 193}
 194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 195/**
 196 * atc_desc_chain - build chain adding a descriptor
 197 * @first: address of first descriptor of the chain
 198 * @prev: address of previous descriptor of the chain
 199 * @desc: descriptor to queue
 200 *
 201 * Called from prep_* functions
 
 
 
 
 
 
 
 202 */
 203static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
 204			   struct at_desc *desc)
 
 
 
 
 
 205{
 206	if (!(*first)) {
 207		*first = desc;
 208	} else {
 209		/* inform the HW lli about chaining */
 210		(*prev)->lli.dscr = desc->txd.phys;
 211		/* insert the link descriptor to the LD ring */
 212		list_add_tail(&desc->desc_node,
 213				&(*first)->tx_list);
 214	}
 215	*prev = desc;
 
 
 
 
 
 
 
 
 216}
 217
 218/**
 219 * atc_dostart - starts the DMA engine for real
 220 * @atchan: the channel we want to start
 221 * @first: first descriptor in the list we want to begin with
 222 *
 223 * Called with atchan->lock held and bh disabled
 224 */
 225static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
 226{
 227	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
 228
 229	/* ASSERT:  channel is idle */
 230	if (atc_chan_is_enabled(atchan)) {
 231		dev_err(chan2dev(&atchan->chan_common),
 232			"BUG: Attempted to start non-idle channel\n");
 233		dev_err(chan2dev(&atchan->chan_common),
 234			"  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
 235			channel_readl(atchan, SADDR),
 236			channel_readl(atchan, DADDR),
 237			channel_readl(atchan, CTRLA),
 238			channel_readl(atchan, CTRLB),
 239			channel_readl(atchan, DSCR));
 240
 241		/* The tasklet will hopefully advance the queue... */
 
 242		return;
 243	}
 244
 245	vdbg_dump_regs(atchan);
 246
 
 
 
 247	channel_writel(atchan, SADDR, 0);
 248	channel_writel(atchan, DADDR, 0);
 249	channel_writel(atchan, CTRLA, 0);
 250	channel_writel(atchan, CTRLB, 0);
 251	channel_writel(atchan, DSCR, first->txd.phys);
 252	channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
 253		       ATC_SPIP_BOUNDARY(first->boundary));
 254	channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
 255		       ATC_DPIP_BOUNDARY(first->boundary));
 256	dma_writel(atdma, CHER, atchan->mask);
 
 
 
 
 
 257
 258	vdbg_dump_regs(atchan);
 259}
 260
 261/*
 262 * atc_get_desc_by_cookie - get the descriptor of a cookie
 263 * @atchan: the DMA channel
 264 * @cookie: the cookie to get the descriptor for
 265 */
 266static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
 267						dma_cookie_t cookie)
 268{
 269	struct at_desc *desc, *_desc;
 270
 271	list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
 272		if (desc->txd.cookie == cookie)
 273			return desc;
 
 
 
 274	}
 275
 276	list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
 277		if (desc->txd.cookie == cookie)
 278			return desc;
 
 
 279	}
 280
 281	return NULL;
 282}
 283
 284/**
 285 * atc_calc_bytes_left - calculates the number of bytes left according to the
 286 * value read from CTRLA.
 287 *
 288 * @current_len: the number of bytes left before reading CTRLA
 289 * @ctrla: the value of CTRLA
 290 */
 291static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
 292{
 293	u32 btsize = (ctrla & ATC_BTSIZE_MAX);
 294	u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
 295
 296	/*
 297	 * According to the datasheet, when reading the Control A Register
 298	 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
 299	 * number of transfers completed on the Source Interface.
 300	 * So btsize is always a number of source width transfers.
 301	 */
 302	return current_len - (btsize << src_width);
 303}
 304
 305/**
 306 * atc_get_bytes_left - get the number of bytes residue for a cookie
 307 * @chan: DMA channel
 308 * @cookie: transaction identifier to check status of
 309 */
 310static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
 311{
 312	struct at_dma_chan      *atchan = to_at_dma_chan(chan);
 313	struct at_desc *desc_first = atc_first_active(atchan);
 314	struct at_desc *desc;
 315	int ret;
 316	u32 ctrla, dscr, trials;
 317
 318	/*
 319	 * If the cookie doesn't match to the currently running transfer then
 320	 * we can return the total length of the associated DMA transfer,
 321	 * because it is still queued.
 322	 */
 323	desc = atc_get_desc_by_cookie(atchan, cookie);
 324	if (desc == NULL)
 325		return -EINVAL;
 326	else if (desc != desc_first)
 327		return desc->total_len;
 328
 329	/* cookie matches to the currently running transfer */
 330	ret = desc_first->total_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 331
 332	if (desc_first->lli.dscr) {
 333		/* hardware linked list transfer */
 334
 335		/*
 336		 * Calculate the residue by removing the length of the child
 337		 * descriptors already transferred from the total length.
 338		 * To get the current child descriptor we can use the value of
 339		 * the channel's DSCR register and compare it against the value
 340		 * of the hardware linked list structure of each child
 341		 * descriptor.
 342		 *
 343		 * The CTRLA register provides us with the amount of data
 344		 * already read from the source for the current child
 345		 * descriptor. So we can compute a more accurate residue by also
 346		 * removing the number of bytes corresponding to this amount of
 347		 * data.
 348		 *
 349		 * However, the DSCR and CTRLA registers cannot be read both
 350		 * atomically. Hence a race condition may occur: the first read
 351		 * register may refer to one child descriptor whereas the second
 352		 * read may refer to a later child descriptor in the list
 353		 * because of the DMA transfer progression inbetween the two
 354		 * reads.
 355		 *
 356		 * One solution could have been to pause the DMA transfer, read
 357		 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
 358		 * this approach presents some drawbacks:
 359		 * - If the DMA transfer is paused, RX overruns or TX underruns
 360		 *   are more likey to occur depending on the system latency.
 361		 *   Taking the USART driver as an example, it uses a cyclic DMA
 362		 *   transfer to read data from the Receive Holding Register
 363		 *   (RHR) to avoid RX overruns since the RHR is not protected
 364		 *   by any FIFO on most Atmel SoCs. So pausing the DMA transfer
 365		 *   to compute the residue would break the USART driver design.
 366		 * - The atc_pause() function masks interrupts but we'd rather
 367		 *   avoid to do so for system latency purpose.
 368		 *
 369		 * Then we'd rather use another solution: the DSCR is read a
 370		 * first time, the CTRLA is read in turn, next the DSCR is read
 371		 * a second time. If the two consecutive read values of the DSCR
 372		 * are the same then we assume both refers to the very same
 373		 * child descriptor as well as the CTRLA value read inbetween
 374		 * does. For cyclic tranfers, the assumption is that a full loop
 375		 * is "not so fast".
 376		 * If the two DSCR values are different, we read again the CTRLA
 377		 * then the DSCR till two consecutive read values from DSCR are
 378		 * equal or till the maxium trials is reach.
 379		 * This algorithm is very unlikely not to find a stable value for
 380		 * DSCR.
 381		 */
 382
 383		dscr = channel_readl(atchan, DSCR);
 384		rmb(); /* ensure DSCR is read before CTRLA */
 385		ctrla = channel_readl(atchan, CTRLA);
 386		for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
 387			u32 new_dscr;
 388
 389			rmb(); /* ensure DSCR is read after CTRLA */
 390			new_dscr = channel_readl(atchan, DSCR);
 391
 392			/*
 393			 * If the DSCR register value has not changed inside the
 394			 * DMA controller since the previous read, we assume
 395			 * that both the dscr and ctrla values refers to the
 396			 * very same descriptor.
 397			 */
 398			if (likely(new_dscr == dscr))
 399				break;
 400
 401			/*
 402			 * DSCR has changed inside the DMA controller, so the
 403			 * previouly read value of CTRLA may refer to an already
 404			 * processed descriptor hence could be outdated.
 405			 * We need to update ctrla to match the current
 406			 * descriptor.
 407			 */
 408			dscr = new_dscr;
 409			rmb(); /* ensure DSCR is read before CTRLA */
 410			ctrla = channel_readl(atchan, CTRLA);
 411		}
 412		if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
 413			return -ETIMEDOUT;
 414
 415		/* for the first descriptor we can be more accurate */
 416		if (desc_first->lli.dscr == dscr)
 417			return atc_calc_bytes_left(ret, ctrla);
 418
 419		ret -= desc_first->len;
 420		list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
 421			if (desc->lli.dscr == dscr)
 422				break;
 423
 424			ret -= desc->len;
 425		}
 426
 427		/*
 428		 * For the current descriptor in the chain we can calculate
 429		 * the remaining bytes using the channel's register.
 
 
 430		 */
 431		ret = atc_calc_bytes_left(ret, ctrla);
 432	} else {
 433		/* single transfer */
 434		ctrla = channel_readl(atchan, CTRLA);
 435		ret = atc_calc_bytes_left(ret, ctrla);
 436	}
 
 
 437
 438	return ret;
 439}
 440
 441/**
 442 * atc_chain_complete - finish work for one transaction chain
 443 * @atchan: channel we work on
 444 * @desc: descriptor at the head of the chain we want do complete
 445 *
 446 * Called with atchan->lock held and bh disabled */
 447static void
 448atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
 449{
 450	struct dma_async_tx_descriptor	*txd = &desc->txd;
 451	struct at_dma			*atdma = to_at_dma(atchan->chan_common.device);
 452
 453	dev_vdbg(chan2dev(&atchan->chan_common),
 454		"descriptor %u complete\n", txd->cookie);
 455
 456	/* mark the descriptor as complete for non cyclic cases only */
 457	if (!atc_chan_is_cyclic(atchan))
 458		dma_cookie_complete(txd);
 459
 460	/* If the transfer was a memset, free our temporary buffer */
 461	if (desc->memset_buffer) {
 462		dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
 463			      desc->memset_paddr);
 464		desc->memset_buffer = false;
 465	}
 
 466
 467	/* move children to free_list */
 468	list_splice_init(&desc->tx_list, &atchan->free_list);
 469	/* move myself to free_list */
 470	list_move(&desc->desc_node, &atchan->free_list);
 471
 472	dma_descriptor_unmap(txd);
 473	/* for cyclic transfers,
 474	 * no need to replay callback function while stopping */
 475	if (!atc_chan_is_cyclic(atchan)) {
 476		dma_async_tx_callback	callback = txd->callback;
 477		void			*param = txd->callback_param;
 478
 479		/*
 480		 * The API requires that no submissions are done from a
 481		 * callback, so we don't need to drop the lock here
 482		 */
 483		if (callback)
 484			callback(param);
 485	}
 486
 487	dma_run_dependencies(txd);
 488}
 489
 490/**
 491 * atc_complete_all - finish work for all transactions
 492 * @atchan: channel to complete transactions for
 493 *
 494 * Eventually submit queued descriptors if any
 495 *
 496 * Assume channel is idle while calling this function
 497 * Called with atchan->lock held and bh disabled
 498 */
 499static void atc_complete_all(struct at_dma_chan *atchan)
 500{
 501	struct at_desc *desc, *_desc;
 502	LIST_HEAD(list);
 503
 504	dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
 505
 506	/*
 507	 * Submit queued descriptors ASAP, i.e. before we go through
 508	 * the completed ones.
 509	 */
 510	if (!list_empty(&atchan->queue))
 511		atc_dostart(atchan, atc_first_queued(atchan));
 512	/* empty active_list now it is completed */
 513	list_splice_init(&atchan->active_list, &list);
 514	/* empty queue list by moving descriptors (if any) to active_list */
 515	list_splice_init(&atchan->queue, &atchan->active_list);
 516
 517	list_for_each_entry_safe(desc, _desc, &list, desc_node)
 518		atc_chain_complete(atchan, desc);
 519}
 520
 521/**
 522 * atc_advance_work - at the end of a transaction, move forward
 523 * @atchan: channel where the transaction ended
 524 *
 525 * Called with atchan->lock held and bh disabled
 
 
 526 */
 527static void atc_advance_work(struct at_dma_chan *atchan)
 
 528{
 529	dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
 
 
 
 
 
 
 
 
 
 530
 531	if (atc_chan_is_enabled(atchan))
 532		return;
 533
 534	if (list_empty(&atchan->active_list) ||
 535	    list_is_singular(&atchan->active_list)) {
 536		atc_complete_all(atchan);
 537	} else {
 538		atc_chain_complete(atchan, atc_first_active(atchan));
 539		/* advance work */
 540		atc_dostart(atchan, atc_first_active(atchan));
 541	}
 542}
 543
 
 
 
 
 
 
 544
 545/**
 546 * atc_handle_error - handle errors reported by DMA controller
 547 * @atchan: channel where error occurs
 548 *
 549 * Called with atchan->lock held and bh disabled
 550 */
 551static void atc_handle_error(struct at_dma_chan *atchan)
 552{
 553	struct at_desc *bad_desc;
 554	struct at_desc *child;
 555
 556	/*
 557	 * The descriptor currently at the head of the active list is
 558	 * broked. Since we don't have any way to report errors, we'll
 559	 * just have to scream loudly and try to carry on.
 560	 */
 561	bad_desc = atc_first_active(atchan);
 562	list_del_init(&bad_desc->desc_node);
 563
 564	/* As we are stopped, take advantage to push queued descriptors
 565	 * in active_list */
 566	list_splice_init(&atchan->queue, atchan->active_list.prev);
 567
 568	/* Try to restart the controller */
 569	if (!list_empty(&atchan->active_list))
 570		atc_dostart(atchan, atc_first_active(atchan));
 571
 572	/*
 573	 * KERN_CRITICAL may seem harsh, but since this only happens
 574	 * when someone submits a bad physical address in a
 575	 * descriptor, we should consider ourselves lucky that the
 576	 * controller flagged an error instead of scribbling over
 577	 * random memory locations.
 578	 */
 579	dev_crit(chan2dev(&atchan->chan_common),
 580			"Bad descriptor submitted for DMA!\n");
 581	dev_crit(chan2dev(&atchan->chan_common),
 582			"  cookie: %d\n", bad_desc->txd.cookie);
 583	atc_dump_lli(atchan, &bad_desc->lli);
 584	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
 585		atc_dump_lli(atchan, &child->lli);
 586
 587	/* Pretend the descriptor completed successfully */
 588	atc_chain_complete(atchan, bad_desc);
 589}
 590
 591/**
 592 * atc_handle_cyclic - at the end of a period, run callback function
 593 * @atchan: channel used for cyclic operations
 594 *
 595 * Called with atchan->lock held and bh disabled
 596 */
 597static void atc_handle_cyclic(struct at_dma_chan *atchan)
 598{
 599	struct at_desc			*first = atc_first_active(atchan);
 600	struct dma_async_tx_descriptor	*txd = &first->txd;
 601	dma_async_tx_callback		callback = txd->callback;
 602	void				*param = txd->callback_param;
 603
 604	dev_vdbg(chan2dev(&atchan->chan_common),
 605			"new cyclic period llp 0x%08x\n",
 606			channel_readl(atchan, DSCR));
 607
 608	if (callback)
 609		callback(param);
 610}
 611
 612/*--  IRQ & Tasklet  ---------------------------------------------------*/
 613
 614static void atc_tasklet(unsigned long data)
 615{
 616	struct at_dma_chan *atchan = (struct at_dma_chan *)data;
 617	unsigned long flags;
 618
 619	spin_lock_irqsave(&atchan->lock, flags);
 620	if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
 621		atc_handle_error(atchan);
 622	else if (atc_chan_is_cyclic(atchan))
 623		atc_handle_cyclic(atchan);
 624	else
 625		atc_advance_work(atchan);
 626
 627	spin_unlock_irqrestore(&atchan->lock, flags);
 
 
 
 
 
 
 
 
 
 628}
 629
 630static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
 631{
 632	struct at_dma		*atdma = (struct at_dma *)dev_id;
 633	struct at_dma_chan	*atchan;
 634	int			i;
 635	u32			status, pending, imr;
 636	int			ret = IRQ_NONE;
 637
 638	do {
 639		imr = dma_readl(atdma, EBCIMR);
 640		status = dma_readl(atdma, EBCISR);
 641		pending = status & imr;
 642
 643		if (!pending)
 644			break;
 645
 646		dev_vdbg(atdma->dma_common.dev,
 647			"interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
 648			 status, imr, pending);
 649
 650		for (i = 0; i < atdma->dma_common.chancnt; i++) {
 651			atchan = &atdma->chan[i];
 652			if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
 653				if (pending & AT_DMA_ERR(i)) {
 654					/* Disable channel on AHB error */
 655					dma_writel(atdma, CHDR,
 656						AT_DMA_RES(i) | atchan->mask);
 657					/* Give information to tasklet */
 658					set_bit(ATC_IS_ERROR, &atchan->status);
 659				}
 660				tasklet_schedule(&atchan->tasklet);
 661				ret = IRQ_HANDLED;
 662			}
 663		}
 664
 665	} while (pending);
 666
 667	return ret;
 668}
 669
 670
 671/*--  DMA Engine API  --------------------------------------------------*/
 672
 673/**
 674 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
 675 * @desc: descriptor at the head of the transaction chain
 676 *
 677 * Queue chain if DMA engine is working already
 678 *
 679 * Cookie increment and adding to active_list or queue must be atomic
 680 */
 681static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
 682{
 683	struct at_desc		*desc = txd_to_at_desc(tx);
 684	struct at_dma_chan	*atchan = to_at_dma_chan(tx->chan);
 685	dma_cookie_t		cookie;
 686	unsigned long		flags;
 687
 688	spin_lock_irqsave(&atchan->lock, flags);
 689	cookie = dma_cookie_assign(tx);
 690
 691	if (list_empty(&atchan->active_list)) {
 692		dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
 693				desc->txd.cookie);
 694		atc_dostart(atchan, desc);
 695		list_add_tail(&desc->desc_node, &atchan->active_list);
 696	} else {
 697		dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
 698				desc->txd.cookie);
 699		list_add_tail(&desc->desc_node, &atchan->queue);
 700	}
 701
 702	spin_unlock_irqrestore(&atchan->lock, flags);
 703
 704	return cookie;
 705}
 706
 707/**
 708 * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
 709 * @chan: the channel to prepare operation on
 710 * @xt: Interleaved transfer template
 711 * @flags: tx descriptor status flags
 712 */
 713static struct dma_async_tx_descriptor *
 714atc_prep_dma_interleaved(struct dma_chan *chan,
 715			 struct dma_interleaved_template *xt,
 716			 unsigned long flags)
 717{
 
 718	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 719	struct data_chunk	*first = xt->sgl;
 720	struct at_desc		*desc = NULL;
 
 
 721	size_t			xfer_count;
 722	unsigned int		dwidth;
 723	u32			ctrla;
 724	u32			ctrlb;
 725	size_t			len = 0;
 726	int			i;
 727
 728	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
 729		return NULL;
 730
 
 
 731	dev_info(chan2dev(chan),
 732		 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
 733		__func__, &xt->src_start, &xt->dst_start, xt->numf,
 734		xt->frame_size, flags);
 735
 736	/*
 737	 * The controller can only "skip" X bytes every Y bytes, so we
 738	 * need to make sure we are given a template that fit that
 739	 * description, ie a template with chunks that always have the
 740	 * same size, with the same ICGs.
 741	 */
 742	for (i = 0; i < xt->frame_size; i++) {
 743		struct data_chunk *chunk = xt->sgl + i;
 744
 745		if ((chunk->size != xt->sgl->size) ||
 746		    (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
 747		    (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
 748			dev_err(chan2dev(chan),
 749				"%s: the controller can transfer only identical chunks\n",
 750				__func__);
 751			return NULL;
 752		}
 753
 754		len += chunk->size;
 755	}
 756
 757	dwidth = atc_get_xfer_width(xt->src_start,
 758				    xt->dst_start, len);
 759
 760	xfer_count = len >> dwidth;
 761	if (xfer_count > ATC_BTSIZE_MAX) {
 762		dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
 763		return NULL;
 764	}
 765
 766	ctrla = ATC_SRC_WIDTH(dwidth) |
 767		ATC_DST_WIDTH(dwidth);
 
 
 
 
 
 
 768
 769	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
 770		| ATC_SRC_ADDR_MODE_INCR
 771		| ATC_DST_ADDR_MODE_INCR
 772		| ATC_SRC_PIP
 773		| ATC_DST_PIP
 774		| ATC_FC_MEM2MEM;
 775
 776	/* create the transfer */
 777	desc = atc_desc_get(atchan);
 778	if (!desc) {
 779		dev_err(chan2dev(chan),
 780			"%s: couldn't allocate our descriptor\n", __func__);
 781		return NULL;
 782	}
 
 783
 784	desc->lli.saddr = xt->src_start;
 785	desc->lli.daddr = xt->dst_start;
 786	desc->lli.ctrla = ctrla | xfer_count;
 787	desc->lli.ctrlb = ctrlb;
 788
 789	desc->boundary = first->size >> dwidth;
 790	desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
 791	desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
 792
 793	desc->txd.cookie = -EBUSY;
 794	desc->total_len = desc->len = len;
 795
 796	/* set end-of-link to the last link descriptor of list*/
 797	set_desc_eol(desc);
 798
 799	desc->txd.flags = flags; /* client is in control of this ack */
 800
 801	return &desc->txd;
 
 802}
 803
 804/**
 805 * atc_prep_dma_memcpy - prepare a memcpy operation
 806 * @chan: the channel to prepare operation on
 807 * @dest: operation virtual destination address
 808 * @src: operation virtual source address
 809 * @len: operation length
 810 * @flags: tx descriptor status flags
 811 */
 812static struct dma_async_tx_descriptor *
 813atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 814		size_t len, unsigned long flags)
 815{
 
 816	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 817	struct at_desc		*desc = NULL;
 818	struct at_desc		*first = NULL;
 819	struct at_desc		*prev = NULL;
 820	size_t			xfer_count;
 821	size_t			offset;
 
 822	unsigned int		src_width;
 823	unsigned int		dst_width;
 
 824	u32			ctrla;
 825	u32			ctrlb;
 826
 827	dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
 828			&dest, &src, len, flags);
 829
 830	if (unlikely(!len)) {
 831		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
 832		return NULL;
 833	}
 834
 835	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
 836		| ATC_SRC_ADDR_MODE_INCR
 837		| ATC_DST_ADDR_MODE_INCR
 838		| ATC_FC_MEM2MEM;
 
 
 
 
 
 
 839
 840	/*
 841	 * We can be a lot more clever here, but this should take care
 842	 * of the most common optimization.
 843	 */
 844	src_width = dst_width = atc_get_xfer_width(src, dest, len);
 845
 846	ctrla = ATC_SRC_WIDTH(src_width) |
 847		ATC_DST_WIDTH(dst_width);
 848
 849	for (offset = 0; offset < len; offset += xfer_count << src_width) {
 850		xfer_count = min_t(size_t, (len - offset) >> src_width,
 851				ATC_BTSIZE_MAX);
 852
 853		desc = atc_desc_get(atchan);
 854		if (!desc)
 
 
 
 
 
 
 855			goto err_desc_get;
 
 856
 857		desc->lli.saddr = src + offset;
 858		desc->lli.daddr = dest + offset;
 859		desc->lli.ctrla = ctrla | xfer_count;
 860		desc->lli.ctrlb = ctrlb;
 
 
 
 861
 862		desc->txd.cookie = 0;
 863		desc->len = xfer_count << src_width;
 864
 865		atc_desc_chain(&first, &prev, desc);
 866	}
 867
 868	/* First descriptor of the chain embedds additional information */
 869	first->txd.cookie = -EBUSY;
 870	first->total_len = len;
 871
 872	/* set end-of-link to the last link descriptor of list*/
 873	set_desc_eol(desc);
 874
 875	first->txd.flags = flags; /* client is in control of this ack */
 876
 877	return &first->txd;
 878
 879err_desc_get:
 880	atc_desc_put(atchan, first);
 881	return NULL;
 882}
 883
 884static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
 885					      dma_addr_t psrc,
 886					      dma_addr_t pdst,
 887					      size_t len)
 888{
 889	struct at_dma_chan *atchan = to_at_dma_chan(chan);
 890	struct at_desc *desc;
 891	size_t xfer_count;
 892
 893	u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
 894	u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
 895		ATC_SRC_ADDR_MODE_FIXED |
 896		ATC_DST_ADDR_MODE_INCR |
 897		ATC_FC_MEM2MEM;
 898
 899	xfer_count = len >> 2;
 900	if (xfer_count > ATC_BTSIZE_MAX) {
 901		dev_err(chan2dev(chan), "%s: buffer is too big\n",
 902			__func__);
 903		return NULL;
 904	}
 905
 906	desc = atc_desc_get(atchan);
 907	if (!desc) {
 908		dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
 909			__func__);
 910		return NULL;
 911	}
 912
 913	desc->lli.saddr = psrc;
 914	desc->lli.daddr = pdst;
 915	desc->lli.ctrla = ctrla | xfer_count;
 916	desc->lli.ctrlb = ctrlb;
 917
 918	desc->txd.cookie = 0;
 919	desc->len = len;
 920
 921	return desc;
 922}
 923
 924/**
 925 * atc_prep_dma_memset - prepare a memcpy operation
 926 * @chan: the channel to prepare operation on
 927 * @dest: operation virtual destination address
 928 * @value: value to set memory buffer to
 929 * @len: operation length
 930 * @flags: tx descriptor status flags
 931 */
 932static struct dma_async_tx_descriptor *
 933atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
 934		    size_t len, unsigned long flags)
 935{
 
 936	struct at_dma		*atdma = to_at_dma(chan->device);
 937	struct at_desc		*desc;
 938	void __iomem		*vaddr;
 939	dma_addr_t		paddr;
 
 
 940
 941	dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
 942		&dest, value, len, flags);
 943
 944	if (unlikely(!len)) {
 945		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
 946		return NULL;
 947	}
 948
 949	if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
 950		dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
 951			__func__);
 952		return NULL;
 953	}
 954
 955	vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
 956	if (!vaddr) {
 957		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
 958			__func__);
 959		return NULL;
 960	}
 961	*(u32*)vaddr = value;
 962
 963	desc = atc_create_memset_desc(chan, paddr, dest, len);
 964	if (!desc) {
 965		dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
 966			__func__);
 
 
 
 
 
 
 967		goto err_free_buffer;
 968	}
 
 
 
 
 969
 970	desc->memset_paddr = paddr;
 971	desc->memset_vaddr = vaddr;
 972	desc->memset_buffer = true;
 973
 974	desc->txd.cookie = -EBUSY;
 975	desc->total_len = len;
 976
 977	/* set end-of-link on the descriptor */
 978	set_desc_eol(desc);
 979
 980	desc->txd.flags = flags;
 981
 982	return &desc->txd;
 983
 
 
 984err_free_buffer:
 985	dma_pool_free(atdma->memset_pool, vaddr, paddr);
 986	return NULL;
 987}
 988
 989static struct dma_async_tx_descriptor *
 990atc_prep_dma_memset_sg(struct dma_chan *chan,
 991		       struct scatterlist *sgl,
 992		       unsigned int sg_len, int value,
 993		       unsigned long flags)
 994{
 995	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 996	struct at_dma		*atdma = to_at_dma(chan->device);
 997	struct at_desc		*desc = NULL, *first = NULL, *prev = NULL;
 998	struct scatterlist	*sg;
 999	void __iomem		*vaddr;
1000	dma_addr_t		paddr;
1001	size_t			total_len = 0;
1002	int			i;
 
1003
1004	dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
1005		 value, sg_len, flags);
1006
1007	if (unlikely(!sgl || !sg_len)) {
1008		dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1009			__func__);
1010		return NULL;
1011	}
1012
1013	vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
1014	if (!vaddr) {
1015		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1016			__func__);
1017		return NULL;
1018	}
1019	*(u32*)vaddr = value;
1020
 
 
 
 
 
1021	for_each_sg(sgl, sg, sg_len, i) {
1022		dma_addr_t dest = sg_dma_address(sg);
1023		size_t len = sg_dma_len(sg);
1024
1025		dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1026			 __func__, &dest, len);
1027
1028		if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1029			dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1030				__func__);
1031			goto err_put_desc;
1032		}
1033
1034		desc = atc_create_memset_desc(chan, paddr, dest, len);
1035		if (!desc)
1036			goto err_put_desc;
1037
1038		atc_desc_chain(&first, &prev, desc);
1039
 
1040		total_len += len;
1041	}
1042
1043	/*
1044	 * Only set the buffer pointers on the last descriptor to
1045	 * avoid free'ing while we have our transfer still going
1046	 */
1047	desc->memset_paddr = paddr;
1048	desc->memset_vaddr = vaddr;
1049	desc->memset_buffer = true;
1050
1051	first->txd.cookie = -EBUSY;
1052	first->total_len = total_len;
1053
1054	/* set end-of-link on the descriptor */
1055	set_desc_eol(desc);
1056
1057	first->txd.flags = flags;
1058
1059	return &first->txd;
1060
1061err_put_desc:
1062	atc_desc_put(atchan, first);
 
 
1063	return NULL;
1064}
1065
1066/**
1067 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1068 * @chan: DMA channel
1069 * @sgl: scatterlist to transfer to/from
1070 * @sg_len: number of entries in @scatterlist
1071 * @direction: DMA direction
1072 * @flags: tx descriptor status flags
1073 * @context: transaction context (ignored)
1074 */
1075static struct dma_async_tx_descriptor *
1076atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1077		unsigned int sg_len, enum dma_transfer_direction direction,
1078		unsigned long flags, void *context)
1079{
 
1080	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1081	struct at_dma_slave	*atslave = chan->private;
1082	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1083	struct at_desc		*first = NULL;
1084	struct at_desc		*prev = NULL;
1085	u32			ctrla;
1086	u32			ctrlb;
1087	dma_addr_t		reg;
1088	unsigned int		reg_width;
1089	unsigned int		mem_width;
1090	unsigned int		i;
1091	struct scatterlist	*sg;
1092	size_t			total_len = 0;
1093
1094	dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1095			sg_len,
1096			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1097			flags);
1098
1099	if (unlikely(!atslave || !sg_len)) {
1100		dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1101		return NULL;
1102	}
1103
1104	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1105		| ATC_DCSIZE(sconfig->dst_maxburst);
 
 
 
 
 
1106	ctrlb = ATC_IEN;
1107
1108	switch (direction) {
1109	case DMA_MEM_TO_DEV:
1110		reg_width = convert_buswidth(sconfig->dst_addr_width);
1111		ctrla |=  ATC_DST_WIDTH(reg_width);
1112		ctrlb |=  ATC_DST_ADDR_MODE_FIXED
1113			| ATC_SRC_ADDR_MODE_INCR
1114			| ATC_FC_MEM2PER
1115			| ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
 
 
1116		reg = sconfig->dst_addr;
1117		for_each_sg(sgl, sg, sg_len, i) {
1118			struct at_desc	*desc;
 
1119			u32		len;
1120			u32		mem;
1121
1122			desc = atc_desc_get(atchan);
1123			if (!desc)
 
 
1124				goto err_desc_get;
 
1125
1126			mem = sg_dma_address(sg);
1127			len = sg_dma_len(sg);
1128			if (unlikely(!len)) {
1129				dev_dbg(chan2dev(chan),
1130					"prep_slave_sg: sg(%d) data length is zero\n", i);
1131				goto err;
1132			}
1133			mem_width = 2;
1134			if (unlikely(mem & 3 || len & 3))
1135				mem_width = 0;
1136
1137			desc->lli.saddr = mem;
1138			desc->lli.daddr = reg;
1139			desc->lli.ctrla = ctrla
1140					| ATC_SRC_WIDTH(mem_width)
1141					| len >> mem_width;
1142			desc->lli.ctrlb = ctrlb;
1143			desc->len = len;
1144
1145			atc_desc_chain(&first, &prev, desc);
1146			total_len += len;
 
 
 
1147		}
1148		break;
1149	case DMA_DEV_TO_MEM:
1150		reg_width = convert_buswidth(sconfig->src_addr_width);
1151		ctrla |=  ATC_SRC_WIDTH(reg_width);
1152		ctrlb |=  ATC_DST_ADDR_MODE_INCR
1153			| ATC_SRC_ADDR_MODE_FIXED
1154			| ATC_FC_PER2MEM
1155			| ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
 
 
1156
1157		reg = sconfig->src_addr;
1158		for_each_sg(sgl, sg, sg_len, i) {
1159			struct at_desc	*desc;
 
1160			u32		len;
1161			u32		mem;
1162
1163			desc = atc_desc_get(atchan);
1164			if (!desc)
 
 
1165				goto err_desc_get;
 
1166
1167			mem = sg_dma_address(sg);
1168			len = sg_dma_len(sg);
1169			if (unlikely(!len)) {
1170				dev_dbg(chan2dev(chan),
1171					"prep_slave_sg: sg(%d) data length is zero\n", i);
1172				goto err;
1173			}
1174			mem_width = 2;
1175			if (unlikely(mem & 3 || len & 3))
1176				mem_width = 0;
1177
1178			desc->lli.saddr = reg;
1179			desc->lli.daddr = mem;
1180			desc->lli.ctrla = ctrla
1181					| ATC_DST_WIDTH(mem_width)
1182					| len >> reg_width;
1183			desc->lli.ctrlb = ctrlb;
1184			desc->len = len;
1185
1186			atc_desc_chain(&first, &prev, desc);
1187			total_len += len;
 
 
1188		}
1189		break;
1190	default:
1191		return NULL;
1192	}
1193
1194	/* set end-of-link to the last link descriptor of list*/
1195	set_desc_eol(prev);
1196
1197	/* First descriptor of the chain embedds additional information */
1198	first->txd.cookie = -EBUSY;
1199	first->total_len = total_len;
1200
1201	/* first link descriptor of list is responsible of flags */
1202	first->txd.flags = flags; /* client is in control of this ack */
1203
1204	return &first->txd;
1205
1206err_desc_get:
1207	dev_err(chan2dev(chan), "not enough descriptors available\n");
1208err:
1209	atc_desc_put(atchan, first);
1210	return NULL;
1211}
1212
1213/**
1214 * atc_prep_dma_sg - prepare memory to memory scather-gather operation
1215 * @chan: the channel to prepare operation on
1216 * @dst_sg: destination scatterlist
1217 * @dst_nents: number of destination scatterlist entries
1218 * @src_sg: source scatterlist
1219 * @src_nents: number of source scatterlist entries
1220 * @flags: tx descriptor status flags
1221 */
1222static struct dma_async_tx_descriptor *
1223atc_prep_dma_sg(struct dma_chan *chan,
1224		struct scatterlist *dst_sg, unsigned int dst_nents,
1225		struct scatterlist *src_sg, unsigned int src_nents,
1226		unsigned long flags)
1227{
1228	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1229	struct at_desc		*desc = NULL;
1230	struct at_desc		*first = NULL;
1231	struct at_desc		*prev = NULL;
1232	unsigned int		src_width;
1233	unsigned int		dst_width;
1234	size_t			xfer_count;
1235	u32			ctrla;
1236	u32			ctrlb;
1237	size_t			dst_len = 0, src_len = 0;
1238	dma_addr_t		dst = 0, src = 0;
1239	size_t			len = 0, total_len = 0;
1240
1241	if (unlikely(dst_nents == 0 || src_nents == 0))
1242		return NULL;
1243
1244	if (unlikely(dst_sg == NULL || src_sg == NULL))
1245		return NULL;
1246
1247	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
1248		| ATC_SRC_ADDR_MODE_INCR
1249		| ATC_DST_ADDR_MODE_INCR
1250		| ATC_FC_MEM2MEM;
1251
1252	/*
1253	 * loop until there is either no more source or no more destination
1254	 * scatterlist entry
1255	 */
1256	while (true) {
1257
1258		/* prepare the next transfer */
1259		if (dst_len == 0) {
1260
1261			/* no more destination scatterlist entries */
1262			if (!dst_sg || !dst_nents)
1263				break;
1264
1265			dst = sg_dma_address(dst_sg);
1266			dst_len = sg_dma_len(dst_sg);
1267
1268			dst_sg = sg_next(dst_sg);
1269			dst_nents--;
1270		}
1271
1272		if (src_len == 0) {
1273
1274			/* no more source scatterlist entries */
1275			if (!src_sg || !src_nents)
1276				break;
1277
1278			src = sg_dma_address(src_sg);
1279			src_len = sg_dma_len(src_sg);
1280
1281			src_sg = sg_next(src_sg);
1282			src_nents--;
1283		}
1284
1285		len = min_t(size_t, src_len, dst_len);
1286		if (len == 0)
1287			continue;
1288
1289		/* take care for the alignment */
1290		src_width = dst_width = atc_get_xfer_width(src, dst, len);
1291
1292		ctrla = ATC_SRC_WIDTH(src_width) |
1293			ATC_DST_WIDTH(dst_width);
1294
1295		/*
1296		 * The number of transfers to set up refer to the source width
1297		 * that depends on the alignment.
1298		 */
1299		xfer_count = len >> src_width;
1300		if (xfer_count > ATC_BTSIZE_MAX) {
1301			xfer_count = ATC_BTSIZE_MAX;
1302			len = ATC_BTSIZE_MAX << src_width;
1303		}
1304
1305		/* create the transfer */
1306		desc = atc_desc_get(atchan);
1307		if (!desc)
1308			goto err_desc_get;
1309
1310		desc->lli.saddr = src;
1311		desc->lli.daddr = dst;
1312		desc->lli.ctrla = ctrla | xfer_count;
1313		desc->lli.ctrlb = ctrlb;
1314
1315		desc->txd.cookie = 0;
1316		desc->len = len;
1317
1318		atc_desc_chain(&first, &prev, desc);
1319
1320		/* update the lengths and addresses for the next loop cycle */
1321		dst_len -= len;
1322		src_len -= len;
1323		dst += len;
1324		src += len;
1325
1326		total_len += len;
1327	}
1328
1329	/* First descriptor of the chain embedds additional information */
1330	first->txd.cookie = -EBUSY;
1331	first->total_len = total_len;
1332
1333	/* set end-of-link to the last link descriptor of list*/
1334	set_desc_eol(desc);
1335
1336	first->txd.flags = flags; /* client is in control of this ack */
1337
1338	return &first->txd;
1339
1340err_desc_get:
1341	atc_desc_put(atchan, first);
1342	return NULL;
1343}
1344
1345/**
1346 * atc_dma_cyclic_check_values
1347 * Check for too big/unaligned periods and unaligned DMA buffer
1348 */
1349static int
1350atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1351		size_t period_len)
1352{
1353	if (period_len > (ATC_BTSIZE_MAX << reg_width))
1354		goto err_out;
1355	if (unlikely(period_len & ((1 << reg_width) - 1)))
1356		goto err_out;
1357	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1358		goto err_out;
1359
1360	return 0;
1361
1362err_out:
1363	return -EINVAL;
1364}
1365
1366/**
1367 * atc_dma_cyclic_fill_desc - Fill one period descriptor
1368 */
1369static int
1370atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1371		unsigned int period_index, dma_addr_t buf_addr,
1372		unsigned int reg_width, size_t period_len,
1373		enum dma_transfer_direction direction)
1374{
 
1375	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1376	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1377	u32			ctrla;
 
1378
1379	/* prepare common CRTLA value */
1380	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1381		| ATC_DCSIZE(sconfig->dst_maxburst)
1382		| ATC_DST_WIDTH(reg_width)
1383		| ATC_SRC_WIDTH(reg_width)
1384		| period_len >> reg_width;
1385
1386	switch (direction) {
1387	case DMA_MEM_TO_DEV:
1388		desc->lli.saddr = buf_addr + (period_len * period_index);
1389		desc->lli.daddr = sconfig->dst_addr;
1390		desc->lli.ctrla = ctrla;
1391		desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1392				| ATC_SRC_ADDR_MODE_INCR
1393				| ATC_FC_MEM2PER
1394				| ATC_SIF(atchan->mem_if)
1395				| ATC_DIF(atchan->per_if);
1396		desc->len = period_len;
 
1397		break;
1398
1399	case DMA_DEV_TO_MEM:
1400		desc->lli.saddr = sconfig->src_addr;
1401		desc->lli.daddr = buf_addr + (period_len * period_index);
1402		desc->lli.ctrla = ctrla;
1403		desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1404				| ATC_SRC_ADDR_MODE_FIXED
1405				| ATC_FC_PER2MEM
1406				| ATC_SIF(atchan->per_if)
1407				| ATC_DIF(atchan->mem_if);
1408		desc->len = period_len;
1409		break;
1410
1411	default:
1412		return -EINVAL;
1413	}
1414
 
 
 
 
 
 
 
1415	return 0;
1416}
1417
1418/**
1419 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1420 * @chan: the DMA channel to prepare
1421 * @buf_addr: physical DMA address where the buffer starts
1422 * @buf_len: total number of bytes for the entire buffer
1423 * @period_len: number of bytes for each period
1424 * @direction: transfer direction, to or from device
1425 * @flags: tx descriptor status flags
1426 */
1427static struct dma_async_tx_descriptor *
1428atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1429		size_t period_len, enum dma_transfer_direction direction,
1430		unsigned long flags)
1431{
1432	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1433	struct at_dma_slave	*atslave = chan->private;
1434	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1435	struct at_desc		*first = NULL;
1436	struct at_desc		*prev = NULL;
1437	unsigned long		was_cyclic;
1438	unsigned int		reg_width;
1439	unsigned int		periods = buf_len / period_len;
1440	unsigned int		i;
1441
1442	dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1443			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1444			&buf_addr,
1445			periods, buf_len, period_len);
1446
1447	if (unlikely(!atslave || !buf_len || !period_len)) {
1448		dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1449		return NULL;
1450	}
1451
1452	was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1453	if (was_cyclic) {
1454		dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1455		return NULL;
1456	}
1457
1458	if (unlikely(!is_slave_direction(direction)))
1459		goto err_out;
1460
1461	if (sconfig->direction == DMA_MEM_TO_DEV)
1462		reg_width = convert_buswidth(sconfig->dst_addr_width);
1463	else
1464		reg_width = convert_buswidth(sconfig->src_addr_width);
1465
1466	/* Check for too big/unaligned periods and unaligned DMA buffer */
1467	if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1468		goto err_out;
1469
 
 
 
 
 
1470	/* build cyclic linked list */
1471	for (i = 0; i < periods; i++) {
1472		struct at_desc	*desc;
1473
1474		desc = atc_desc_get(atchan);
1475		if (!desc)
1476			goto err_desc_get;
1477
1478		if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1479					     reg_width, period_len, direction))
1480			goto err_desc_get;
1481
1482		atc_desc_chain(&first, &prev, desc);
1483	}
1484
1485	/* lets make a cyclic list */
1486	prev->lli.dscr = first->txd.phys;
1487
1488	/* First descriptor of the chain embedds additional information */
1489	first->txd.cookie = -EBUSY;
1490	first->total_len = buf_len;
1491
1492	return &first->txd;
1493
1494err_desc_get:
1495	dev_err(chan2dev(chan), "not enough descriptors available\n");
1496	atc_desc_put(atchan, first);
1497err_out:
1498	clear_bit(ATC_IS_CYCLIC, &atchan->status);
1499	return NULL;
1500}
1501
1502static int atc_config(struct dma_chan *chan,
1503		      struct dma_slave_config *sconfig)
1504{
1505	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1506
1507	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1508
1509	/* Check if it is chan is configured for slave transfers */
1510	if (!chan->private)
1511		return -EINVAL;
1512
1513	memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1514
1515	convert_burst(&atchan->dma_sconfig.src_maxburst);
1516	convert_burst(&atchan->dma_sconfig.dst_maxburst);
1517
1518	return 0;
1519}
1520
1521static int atc_pause(struct dma_chan *chan)
1522{
1523	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1524	struct at_dma		*atdma = to_at_dma(chan->device);
1525	int			chan_id = atchan->chan_common.chan_id;
1526	unsigned long		flags;
1527
1528	LIST_HEAD(list);
1529
1530	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1531
1532	spin_lock_irqsave(&atchan->lock, flags);
1533
1534	dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1535	set_bit(ATC_IS_PAUSED, &atchan->status);
1536
1537	spin_unlock_irqrestore(&atchan->lock, flags);
1538
1539	return 0;
1540}
1541
1542static int atc_resume(struct dma_chan *chan)
1543{
1544	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1545	struct at_dma		*atdma = to_at_dma(chan->device);
1546	int			chan_id = atchan->chan_common.chan_id;
1547	unsigned long		flags;
1548
1549	LIST_HEAD(list);
1550
1551	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1552
1553	if (!atc_chan_is_paused(atchan))
1554		return 0;
1555
1556	spin_lock_irqsave(&atchan->lock, flags);
1557
1558	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1559	clear_bit(ATC_IS_PAUSED, &atchan->status);
1560
1561	spin_unlock_irqrestore(&atchan->lock, flags);
1562
1563	return 0;
1564}
1565
1566static int atc_terminate_all(struct dma_chan *chan)
1567{
1568	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1569	struct at_dma		*atdma = to_at_dma(chan->device);
1570	int			chan_id = atchan->chan_common.chan_id;
1571	struct at_desc		*desc, *_desc;
1572	unsigned long		flags;
1573
1574	LIST_HEAD(list);
1575
1576	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1577
1578	/*
1579	 * This is only called when something went wrong elsewhere, so
1580	 * we don't really care about the data. Just disable the
1581	 * channel. We still have to poll the channel enable bit due
1582	 * to AHB/HSB limitations.
1583	 */
1584	spin_lock_irqsave(&atchan->lock, flags);
1585
1586	/* disabling channel: must also remove suspend state */
1587	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1588
1589	/* confirm that this channel is disabled */
1590	while (dma_readl(atdma, CHSR) & atchan->mask)
1591		cpu_relax();
1592
1593	/* active_list entries will end up before queued entries */
1594	list_splice_init(&atchan->queue, &list);
1595	list_splice_init(&atchan->active_list, &list);
1596
1597	/* Flush all pending and queued descriptors */
1598	list_for_each_entry_safe(desc, _desc, &list, desc_node)
1599		atc_chain_complete(atchan, desc);
1600
1601	clear_bit(ATC_IS_PAUSED, &atchan->status);
1602	/* if channel dedicated to cyclic operations, free it */
1603	clear_bit(ATC_IS_CYCLIC, &atchan->status);
1604
1605	spin_unlock_irqrestore(&atchan->lock, flags);
 
 
1606
1607	return 0;
1608}
1609
1610/**
1611 * atc_tx_status - poll for transaction completion
1612 * @chan: DMA channel
1613 * @cookie: transaction identifier to check status of
1614 * @txstate: if not %NULL updated with transaction state
1615 *
1616 * If @txstate is passed in, upon return it reflect the driver
1617 * internal state and can be used with dma_async_is_complete() to check
1618 * the status of multiple cookies without re-checking hardware state.
1619 */
1620static enum dma_status
1621atc_tx_status(struct dma_chan *chan,
1622		dma_cookie_t cookie,
1623		struct dma_tx_state *txstate)
1624{
1625	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1626	unsigned long		flags;
1627	enum dma_status		ret;
1628	int bytes = 0;
1629
1630	ret = dma_cookie_status(chan, cookie, txstate);
1631	if (ret == DMA_COMPLETE)
1632		return ret;
1633	/*
1634	 * There's no point calculating the residue if there's
1635	 * no txstate to store the value.
1636	 */
1637	if (!txstate)
1638		return DMA_ERROR;
1639
1640	spin_lock_irqsave(&atchan->lock, flags);
 
 
1641
 
1642	/*  Get number of bytes left in the active transactions */
1643	bytes = atc_get_bytes_left(chan, cookie);
1644
1645	spin_unlock_irqrestore(&atchan->lock, flags);
1646
1647	if (unlikely(bytes < 0)) {
1648		dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1649		return DMA_ERROR;
1650	} else {
1651		dma_set_residue(txstate, bytes);
1652	}
1653
1654	dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1655		 ret, cookie, bytes);
1656
1657	return ret;
1658}
1659
1660/**
1661 * atc_issue_pending - try to finish work
1662 * @chan: target DMA channel
1663 */
1664static void atc_issue_pending(struct dma_chan *chan)
1665{
1666	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1667	unsigned long		flags;
1668
1669	dev_vdbg(chan2dev(chan), "issue_pending\n");
1670
1671	/* Not needed for cyclic transfers */
1672	if (atc_chan_is_cyclic(atchan))
1673		return;
1674
1675	spin_lock_irqsave(&atchan->lock, flags);
1676	atc_advance_work(atchan);
1677	spin_unlock_irqrestore(&atchan->lock, flags);
 
 
 
1678}
1679
1680/**
1681 * atc_alloc_chan_resources - allocate resources for DMA channel
1682 * @chan: allocate descriptor resources for this channel
1683 * @client: current client requesting the channel be ready for requests
1684 *
1685 * return - the number of allocated descriptors
1686 */
1687static int atc_alloc_chan_resources(struct dma_chan *chan)
1688{
1689	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1690	struct at_dma		*atdma = to_at_dma(chan->device);
1691	struct at_desc		*desc;
1692	struct at_dma_slave	*atslave;
1693	unsigned long		flags;
1694	int			i;
1695	u32			cfg;
1696	LIST_HEAD(tmp_list);
1697
1698	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1699
1700	/* ASSERT:  channel is idle */
1701	if (atc_chan_is_enabled(atchan)) {
1702		dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1703		return -EIO;
1704	}
1705
1706	cfg = ATC_DEFAULT_CFG;
1707
1708	atslave = chan->private;
1709	if (atslave) {
1710		/*
1711		 * We need controller-specific data to set up slave
1712		 * transfers.
1713		 */
1714		BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1715
1716		/* if cfg configuration specified take it instead of default */
1717		if (atslave->cfg)
1718			cfg = atslave->cfg;
1719	}
1720
1721	/* have we already been set up?
1722	 * reconfigure channel but no need to reallocate descriptors */
1723	if (!list_empty(&atchan->free_list))
1724		return atchan->descs_allocated;
1725
1726	/* Allocate initial pool of descriptors */
1727	for (i = 0; i < init_nr_desc_per_channel; i++) {
1728		desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1729		if (!desc) {
1730			dev_err(atdma->dma_common.dev,
1731				"Only %d initial descriptors\n", i);
1732			break;
1733		}
1734		list_add_tail(&desc->desc_node, &tmp_list);
1735	}
1736
1737	spin_lock_irqsave(&atchan->lock, flags);
1738	atchan->descs_allocated = i;
1739	list_splice(&tmp_list, &atchan->free_list);
1740	dma_cookie_init(chan);
1741	spin_unlock_irqrestore(&atchan->lock, flags);
1742
1743	/* channel parameters */
1744	channel_writel(atchan, CFG, cfg);
1745
1746	dev_dbg(chan2dev(chan),
1747		"alloc_chan_resources: allocated %d descriptors\n",
1748		atchan->descs_allocated);
1749
1750	return atchan->descs_allocated;
1751}
1752
1753/**
1754 * atc_free_chan_resources - free all channel resources
1755 * @chan: DMA channel
1756 */
1757static void atc_free_chan_resources(struct dma_chan *chan)
1758{
1759	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1760	struct at_dma		*atdma = to_at_dma(chan->device);
1761	struct at_desc		*desc, *_desc;
1762	LIST_HEAD(list);
1763
1764	dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1765		atchan->descs_allocated);
1766
1767	/* ASSERT:  channel is idle */
1768	BUG_ON(!list_empty(&atchan->active_list));
1769	BUG_ON(!list_empty(&atchan->queue));
1770	BUG_ON(atc_chan_is_enabled(atchan));
1771
1772	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1773		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1774		list_del(&desc->desc_node);
1775		/* free link descriptor */
1776		dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1777	}
1778	list_splice_init(&atchan->free_list, &list);
1779	atchan->descs_allocated = 0;
1780	atchan->status = 0;
1781
 
 
 
 
 
 
1782	dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1783}
1784
1785#ifdef CONFIG_OF
1786static bool at_dma_filter(struct dma_chan *chan, void *slave)
1787{
1788	struct at_dma_slave *atslave = slave;
1789
1790	if (atslave->dma_dev == chan->device->dev) {
1791		chan->private = atslave;
1792		return true;
1793	} else {
1794		return false;
1795	}
1796}
1797
1798static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1799				     struct of_dma *of_dma)
1800{
1801	struct dma_chan *chan;
1802	struct at_dma_chan *atchan;
1803	struct at_dma_slave *atslave;
1804	dma_cap_mask_t mask;
1805	unsigned int per_id;
1806	struct platform_device *dmac_pdev;
1807
1808	if (dma_spec->args_count != 2)
1809		return NULL;
1810
1811	dmac_pdev = of_find_device_by_node(dma_spec->np);
 
 
1812
1813	dma_cap_zero(mask);
1814	dma_cap_set(DMA_SLAVE, mask);
1815
1816	atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1817	if (!atslave)
 
1818		return NULL;
 
1819
1820	atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1821	/*
1822	 * We can fill both SRC_PER and DST_PER, one of these fields will be
1823	 * ignored depending on DMA transfer direction.
1824	 */
1825	per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1826	atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1827		     | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1828	/*
1829	 * We have to translate the value we get from the device tree since
1830	 * the half FIFO configuration value had to be 0 to keep backward
1831	 * compatibility.
1832	 */
1833	switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1834	case AT91_DMA_CFG_FIFOCFG_ALAP:
1835		atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
 
1836		break;
1837	case AT91_DMA_CFG_FIFOCFG_ASAP:
1838		atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
 
1839		break;
1840	case AT91_DMA_CFG_FIFOCFG_HALF:
1841	default:
1842		atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1843	}
1844	atslave->dma_dev = &dmac_pdev->dev;
1845
1846	chan = dma_request_channel(mask, at_dma_filter, atslave);
1847	if (!chan)
 
 
1848		return NULL;
 
1849
1850	atchan = to_at_dma_chan(chan);
1851	atchan->per_if = dma_spec->args[0] & 0xff;
1852	atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1853
1854	return chan;
1855}
1856#else
1857static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1858				     struct of_dma *of_dma)
1859{
1860	return NULL;
1861}
1862#endif
1863
1864/*--  Module Management  -----------------------------------------------*/
1865
1866/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1867static struct at_dma_platform_data at91sam9rl_config = {
1868	.nr_channels = 2,
1869};
1870static struct at_dma_platform_data at91sam9g45_config = {
1871	.nr_channels = 8,
1872};
1873
1874#if defined(CONFIG_OF)
1875static const struct of_device_id atmel_dma_dt_ids[] = {
1876	{
1877		.compatible = "atmel,at91sam9rl-dma",
1878		.data = &at91sam9rl_config,
1879	}, {
1880		.compatible = "atmel,at91sam9g45-dma",
1881		.data = &at91sam9g45_config,
1882	}, {
1883		/* sentinel */
1884	}
1885};
1886
1887MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1888#endif
1889
1890static const struct platform_device_id atdma_devtypes[] = {
1891	{
1892		.name = "at91sam9rl_dma",
1893		.driver_data = (unsigned long) &at91sam9rl_config,
1894	}, {
1895		.name = "at91sam9g45_dma",
1896		.driver_data = (unsigned long) &at91sam9g45_config,
1897	}, {
1898		/* sentinel */
1899	}
1900};
1901
1902static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1903						struct platform_device *pdev)
1904{
1905	if (pdev->dev.of_node) {
1906		const struct of_device_id *match;
1907		match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1908		if (match == NULL)
1909			return NULL;
1910		return match->data;
1911	}
1912	return (struct at_dma_platform_data *)
1913			platform_get_device_id(pdev)->driver_data;
1914}
1915
1916/**
1917 * at_dma_off - disable DMA controller
1918 * @atdma: the Atmel HDAMC device
1919 */
1920static void at_dma_off(struct at_dma *atdma)
1921{
1922	dma_writel(atdma, EN, 0);
1923
1924	/* disable all interrupts */
1925	dma_writel(atdma, EBCIDR, -1L);
1926
1927	/* confirm that all channels are disabled */
1928	while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1929		cpu_relax();
1930}
1931
1932static int __init at_dma_probe(struct platform_device *pdev)
1933{
1934	struct resource		*io;
1935	struct at_dma		*atdma;
1936	size_t			size;
1937	int			irq;
1938	int			err;
1939	int			i;
1940	const struct at_dma_platform_data *plat_dat;
1941
1942	/* setup platform data for each SoC */
1943	dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1944	dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
1945	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1946	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1947	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1948	dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1949	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1950	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1951	dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
1952
1953	/* get DMA parameters from controller type */
1954	plat_dat = at_dma_get_driver_data(pdev);
1955	if (!plat_dat)
1956		return -ENODEV;
1957
1958	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1959	if (!io)
1960		return -EINVAL;
 
 
 
 
 
 
1961
1962	irq = platform_get_irq(pdev, 0);
1963	if (irq < 0)
1964		return irq;
1965
1966	size = sizeof(struct at_dma);
1967	size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1968	atdma = kzalloc(size, GFP_KERNEL);
1969	if (!atdma)
1970		return -ENOMEM;
1971
1972	/* discover transaction capabilities */
1973	atdma->dma_common.cap_mask = plat_dat->cap_mask;
1974	atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1975
1976	size = resource_size(io);
1977	if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1978		err = -EBUSY;
1979		goto err_kfree;
1980	}
1981
1982	atdma->regs = ioremap(io->start, size);
1983	if (!atdma->regs) {
1984		err = -ENOMEM;
1985		goto err_release_r;
1986	}
1987
1988	atdma->clk = clk_get(&pdev->dev, "dma_clk");
1989	if (IS_ERR(atdma->clk)) {
1990		err = PTR_ERR(atdma->clk);
1991		goto err_clk;
1992	}
1993	err = clk_prepare_enable(atdma->clk);
1994	if (err)
1995		goto err_clk_prepare;
1996
1997	/* force dma off, just in case */
1998	at_dma_off(atdma);
1999
2000	err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
2001	if (err)
2002		goto err_irq;
2003
2004	platform_set_drvdata(pdev, atdma);
2005
2006	/* create a pool of consistent memory blocks for hardware descriptors */
2007	atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
2008			&pdev->dev, sizeof(struct at_desc),
2009			4 /* word alignment */, 0);
2010	if (!atdma->dma_desc_pool) {
2011		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
2012		err = -ENOMEM;
2013		goto err_desc_pool_create;
2014	}
2015
2016	/* create a pool of consistent memory blocks for memset blocks */
2017	atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
2018					     &pdev->dev, sizeof(int), 4, 0);
2019	if (!atdma->memset_pool) {
2020		dev_err(&pdev->dev, "No memory for memset dma pool\n");
2021		err = -ENOMEM;
2022		goto err_memset_pool_create;
2023	}
2024
2025	/* clear any pending interrupt */
2026	while (dma_readl(atdma, EBCISR))
2027		cpu_relax();
2028
2029	/* initialize channels related values */
2030	INIT_LIST_HEAD(&atdma->dma_common.channels);
2031	for (i = 0; i < plat_dat->nr_channels; i++) {
2032		struct at_dma_chan	*atchan = &atdma->chan[i];
2033
2034		atchan->mem_if = AT_DMA_MEM_IF;
2035		atchan->per_if = AT_DMA_PER_IF;
2036		atchan->chan_common.device = &atdma->dma_common;
2037		dma_cookie_init(&atchan->chan_common);
2038		list_add_tail(&atchan->chan_common.device_node,
2039				&atdma->dma_common.channels);
2040
2041		atchan->ch_regs = atdma->regs + ch_regs(i);
2042		spin_lock_init(&atchan->lock);
2043		atchan->mask = 1 << i;
2044
2045		INIT_LIST_HEAD(&atchan->active_list);
2046		INIT_LIST_HEAD(&atchan->queue);
2047		INIT_LIST_HEAD(&atchan->free_list);
2048
2049		tasklet_init(&atchan->tasklet, atc_tasklet,
2050				(unsigned long)atchan);
2051		atc_enable_chan_irq(atdma, i);
2052	}
2053
2054	/* set base routines */
2055	atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
2056	atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
2057	atdma->dma_common.device_tx_status = atc_tx_status;
2058	atdma->dma_common.device_issue_pending = atc_issue_pending;
2059	atdma->dma_common.dev = &pdev->dev;
2060
2061	/* set prep routines based on capability */
2062	if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
2063		atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
2064
2065	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
2066		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
2067
2068	if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
2069		atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
2070		atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
2071		atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
2072	}
2073
2074	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
2075		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
2076		/* controller can do slave DMA: can trigger cyclic transfers */
2077		dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
2078		atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
2079		atdma->dma_common.device_config = atc_config;
2080		atdma->dma_common.device_pause = atc_pause;
2081		atdma->dma_common.device_resume = atc_resume;
2082		atdma->dma_common.device_terminate_all = atc_terminate_all;
2083		atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
2084		atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
2085		atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2086		atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2087	}
2088
2089	if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
2090		atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
2091
2092	dma_writel(atdma, EN, AT_DMA_ENABLE);
2093
2094	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
2095	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
2096	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
2097	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
2098	  dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
2099	  plat_dat->nr_channels);
2100
2101	dma_async_device_register(&atdma->dma_common);
 
 
 
 
2102
2103	/*
2104	 * Do not return an error if the dmac node is not present in order to
2105	 * not break the existing way of requesting channel with
2106	 * dma_request_channel().
2107	 */
2108	if (pdev->dev.of_node) {
2109		err = of_dma_controller_register(pdev->dev.of_node,
2110						 at_dma_xlate, atdma);
2111		if (err) {
2112			dev_err(&pdev->dev, "could not register of_dma_controller\n");
2113			goto err_of_dma_controller_register;
2114		}
2115	}
2116
2117	return 0;
2118
2119err_of_dma_controller_register:
2120	dma_async_device_unregister(&atdma->dma_common);
 
2121	dma_pool_destroy(atdma->memset_pool);
2122err_memset_pool_create:
2123	dma_pool_destroy(atdma->dma_desc_pool);
2124err_desc_pool_create:
2125	free_irq(platform_get_irq(pdev, 0), atdma);
2126err_irq:
2127	clk_disable_unprepare(atdma->clk);
2128err_clk_prepare:
2129	clk_put(atdma->clk);
2130err_clk:
2131	iounmap(atdma->regs);
2132	atdma->regs = NULL;
2133err_release_r:
2134	release_mem_region(io->start, size);
2135err_kfree:
2136	kfree(atdma);
2137	return err;
2138}
2139
2140static int at_dma_remove(struct platform_device *pdev)
2141{
2142	struct at_dma		*atdma = platform_get_drvdata(pdev);
2143	struct dma_chan		*chan, *_chan;
2144	struct resource		*io;
2145
2146	at_dma_off(atdma);
2147	dma_async_device_unregister(&atdma->dma_common);
 
 
2148
2149	dma_pool_destroy(atdma->memset_pool);
2150	dma_pool_destroy(atdma->dma_desc_pool);
2151	free_irq(platform_get_irq(pdev, 0), atdma);
2152
2153	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2154			device_node) {
2155		struct at_dma_chan	*atchan = to_at_dma_chan(chan);
2156
2157		/* Disable interrupts */
2158		atc_disable_chan_irq(atdma, chan->chan_id);
2159
2160		tasklet_kill(&atchan->tasklet);
2161		list_del(&chan->device_node);
2162	}
2163
2164	clk_disable_unprepare(atdma->clk);
2165	clk_put(atdma->clk);
2166
2167	iounmap(atdma->regs);
2168	atdma->regs = NULL;
2169
2170	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2171	release_mem_region(io->start, resource_size(io));
2172
2173	kfree(atdma);
2174
2175	return 0;
2176}
2177
2178static void at_dma_shutdown(struct platform_device *pdev)
2179{
2180	struct at_dma	*atdma = platform_get_drvdata(pdev);
2181
2182	at_dma_off(platform_get_drvdata(pdev));
2183	clk_disable_unprepare(atdma->clk);
2184}
2185
2186static int at_dma_prepare(struct device *dev)
2187{
2188	struct platform_device *pdev = to_platform_device(dev);
2189	struct at_dma *atdma = platform_get_drvdata(pdev);
2190	struct dma_chan *chan, *_chan;
2191
2192	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2193			device_node) {
2194		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2195		/* wait for transaction completion (except in cyclic case) */
2196		if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2197			return -EAGAIN;
2198	}
2199	return 0;
2200}
2201
2202static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2203{
2204	struct dma_chan	*chan = &atchan->chan_common;
2205
2206	/* Channel should be paused by user
2207	 * do it anyway even if it is not done already */
2208	if (!atc_chan_is_paused(atchan)) {
2209		dev_warn(chan2dev(chan),
2210		"cyclic channel not paused, should be done by channel user\n");
2211		atc_pause(chan);
2212	}
2213
2214	/* now preserve additional data for cyclic operations */
2215	/* next descriptor address in the cyclic list */
2216	atchan->save_dscr = channel_readl(atchan, DSCR);
2217
2218	vdbg_dump_regs(atchan);
2219}
2220
2221static int at_dma_suspend_noirq(struct device *dev)
2222{
2223	struct platform_device *pdev = to_platform_device(dev);
2224	struct at_dma *atdma = platform_get_drvdata(pdev);
2225	struct dma_chan *chan, *_chan;
2226
2227	/* preserve data */
2228	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2229			device_node) {
2230		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2231
2232		if (atc_chan_is_cyclic(atchan))
2233			atc_suspend_cyclic(atchan);
2234		atchan->save_cfg = channel_readl(atchan, CFG);
2235	}
2236	atdma->save_imr = dma_readl(atdma, EBCIMR);
2237
2238	/* disable DMA controller */
2239	at_dma_off(atdma);
2240	clk_disable_unprepare(atdma->clk);
2241	return 0;
2242}
2243
2244static void atc_resume_cyclic(struct at_dma_chan *atchan)
2245{
2246	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
2247
2248	/* restore channel status for cyclic descriptors list:
2249	 * next descriptor in the cyclic list at the time of suspend */
2250	channel_writel(atchan, SADDR, 0);
2251	channel_writel(atchan, DADDR, 0);
2252	channel_writel(atchan, CTRLA, 0);
2253	channel_writel(atchan, CTRLB, 0);
2254	channel_writel(atchan, DSCR, atchan->save_dscr);
2255	dma_writel(atdma, CHER, atchan->mask);
2256
2257	/* channel pause status should be removed by channel user
2258	 * We cannot take the initiative to do it here */
2259
2260	vdbg_dump_regs(atchan);
2261}
2262
2263static int at_dma_resume_noirq(struct device *dev)
2264{
2265	struct platform_device *pdev = to_platform_device(dev);
2266	struct at_dma *atdma = platform_get_drvdata(pdev);
2267	struct dma_chan *chan, *_chan;
2268
2269	/* bring back DMA controller */
2270	clk_prepare_enable(atdma->clk);
2271	dma_writel(atdma, EN, AT_DMA_ENABLE);
2272
2273	/* clear any pending interrupt */
2274	while (dma_readl(atdma, EBCISR))
2275		cpu_relax();
2276
2277	/* restore saved data */
2278	dma_writel(atdma, EBCIER, atdma->save_imr);
2279	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2280			device_node) {
2281		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2282
2283		channel_writel(atchan, CFG, atchan->save_cfg);
2284		if (atc_chan_is_cyclic(atchan))
2285			atc_resume_cyclic(atchan);
2286	}
2287	return 0;
2288}
2289
2290static const struct dev_pm_ops at_dma_dev_pm_ops = {
2291	.prepare = at_dma_prepare,
2292	.suspend_noirq = at_dma_suspend_noirq,
2293	.resume_noirq = at_dma_resume_noirq,
2294};
2295
2296static struct platform_driver at_dma_driver = {
2297	.remove		= at_dma_remove,
2298	.shutdown	= at_dma_shutdown,
2299	.id_table	= atdma_devtypes,
2300	.driver = {
2301		.name	= "at_hdmac",
2302		.pm	= &at_dma_dev_pm_ops,
2303		.of_match_table	= of_match_ptr(atmel_dma_dt_ids),
2304	},
2305};
2306
2307static int __init at_dma_init(void)
2308{
2309	return platform_driver_probe(&at_dma_driver, at_dma_probe);
2310}
2311subsys_initcall(at_dma_init);
2312
2313static void __exit at_dma_exit(void)
2314{
2315	platform_driver_unregister(&at_dma_driver);
2316}
2317module_exit(at_dma_exit);
2318
2319MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2320MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
 
2321MODULE_LICENSE("GPL");
2322MODULE_ALIAS("platform:at_hdmac");
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
   4 *
   5 * Copyright (C) 2008 Atmel Corporation
   6 * Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries
 
 
 
 
 
   7 *
   8 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
   9 * The only Atmel DMA Controller that is not covered by this driver is the one
  10 * found on AT91SAM9263.
  11 */
  12
  13#include <dt-bindings/dma/at91.h>
  14#include <linux/bitfield.h>
  15#include <linux/clk.h>
  16#include <linux/dmaengine.h>
 
  17#include <linux/dmapool.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/interrupt.h>
  20#include <linux/module.h>
 
 
  21#include <linux/of.h>
  22#include <linux/overflow.h>
  23#include <linux/of_device.h>
  24#include <linux/of_dma.h>
  25#include <linux/platform_device.h>
  26#include <linux/slab.h>
  27
 
  28#include "dmaengine.h"
  29#include "virt-dma.h"
  30
  31/*
  32 * Glossary
  33 * --------
  34 *
  35 * at_hdmac		: Name of the ATmel AHB DMA Controller
  36 * at_dma_ / atdma	: ATmel DMA controller entity related
  37 * atc_	/ atchan	: ATmel DMA Channel entity related
  38 */
  39
  40#define	AT_DMA_MAX_NR_CHANNELS	8
 
 
 
 
 
 
 
  41
  42/* Global Configuration Register */
  43#define AT_DMA_GCFG		0x00
  44#define AT_DMA_IF_BIGEND(i)	BIT((i))	/* AHB-Lite Interface i in Big-endian mode */
  45#define AT_DMA_ARB_CFG		BIT(4)		/* Arbiter mode. */
  46
  47/* Controller Enable Register */
  48#define AT_DMA_EN		0x04
  49#define AT_DMA_ENABLE		BIT(0)
  50
  51/* Software Single Request Register */
  52#define AT_DMA_SREQ		0x08
  53#define AT_DMA_SSREQ(x)		BIT((x) << 1)		/* Request a source single transfer on channel x */
  54#define AT_DMA_DSREQ(x)		BIT(1 + ((x) << 1))	/* Request a destination single transfer on channel x */
  55
  56/* Software Chunk Transfer Request Register */
  57#define AT_DMA_CREQ		0x0c
  58#define AT_DMA_SCREQ(x)		BIT((x) << 1)		/* Request a source chunk transfer on channel x */
  59#define AT_DMA_DCREQ(x)		BIT(1 + ((x) << 1))	/* Request a destination chunk transfer on channel x */
  60
  61/* Software Last Transfer Flag Register */
  62#define AT_DMA_LAST		0x10
  63#define AT_DMA_SLAST(x)		BIT((x) << 1)		/* This src rq is last tx of buffer on channel x */
  64#define AT_DMA_DLAST(x)		BIT(1 + ((x) << 1))	/* This dst rq is last tx of buffer on channel x */
  65
  66/* Request Synchronization Register */
  67#define AT_DMA_SYNC		0x14
  68#define AT_DMA_SYR(h)		BIT((h))		/* Synchronize handshake line h */
  69
  70/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */
  71#define AT_DMA_EBCIER		0x18			/* Enable register */
  72#define AT_DMA_EBCIDR		0x1c			/* Disable register */
  73#define AT_DMA_EBCIMR		0x20			/* Mask Register */
  74#define AT_DMA_EBCISR		0x24			/* Status Register */
  75#define AT_DMA_CBTC_OFFSET	8
  76#define AT_DMA_ERR_OFFSET	16
  77#define AT_DMA_BTC(x)		BIT((x))
  78#define AT_DMA_CBTC(x)		BIT(AT_DMA_CBTC_OFFSET + (x))
  79#define AT_DMA_ERR(x)		BIT(AT_DMA_ERR_OFFSET + (x))
  80
  81/* Channel Handler Enable Register */
  82#define AT_DMA_CHER		0x28
  83#define AT_DMA_ENA(x)		BIT((x))
  84#define AT_DMA_SUSP(x)		BIT(8 + (x))
  85#define AT_DMA_KEEP(x)		BIT(24 + (x))
  86
  87/* Channel Handler Disable Register */
  88#define AT_DMA_CHDR		0x2c
  89#define AT_DMA_DIS(x)		BIT(x)
  90#define AT_DMA_RES(x)		BIT(8 + (x))
  91
  92/* Channel Handler Status Register */
  93#define AT_DMA_CHSR		0x30
  94#define AT_DMA_EMPT(x)		BIT(16 + (x))
  95#define AT_DMA_STAL(x)		BIT(24 + (x))
  96
  97/* Channel registers base address */
  98#define AT_DMA_CH_REGS_BASE	0x3c
  99#define ch_regs(x)		(AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */
 100
 101/* Hardware register offset for each channel */
 102#define ATC_SADDR_OFFSET	0x00	/* Source Address Register */
 103#define ATC_DADDR_OFFSET	0x04	/* Destination Address Register */
 104#define ATC_DSCR_OFFSET		0x08	/* Descriptor Address Register */
 105#define ATC_CTRLA_OFFSET	0x0c	/* Control A Register */
 106#define ATC_CTRLB_OFFSET	0x10	/* Control B Register */
 107#define ATC_CFG_OFFSET		0x14	/* Configuration Register */
 108#define ATC_SPIP_OFFSET		0x18	/* Src PIP Configuration Register */
 109#define ATC_DPIP_OFFSET		0x1c	/* Dst PIP Configuration Register */
 110
 111
 112/* Bitfield definitions */
 113
 114/* Bitfields in DSCR */
 115#define ATC_DSCR_IF		GENMASK(1, 0)	/* Dsc feched via AHB-Lite Interface */
 116
 117/* Bitfields in CTRLA */
 118#define ATC_BTSIZE_MAX		GENMASK(15, 0)	/* Maximum Buffer Transfer Size */
 119#define ATC_BTSIZE		GENMASK(15, 0)	/* Buffer Transfer Size */
 120#define ATC_SCSIZE		GENMASK(18, 16)	/* Source Chunk Transfer Size */
 121#define ATC_DCSIZE		GENMASK(22, 20)	/* Destination Chunk Transfer Size */
 122#define ATC_SRC_WIDTH		GENMASK(25, 24)	/* Source Single Transfer Size */
 123#define ATC_DST_WIDTH		GENMASK(29, 28)	/* Destination Single Transfer Size */
 124#define ATC_DONE		BIT(31)	/* Tx Done (only written back in descriptor) */
 125
 126/* Bitfields in CTRLB */
 127#define ATC_SIF			GENMASK(1, 0)	/* Src tx done via AHB-Lite Interface i */
 128#define ATC_DIF			GENMASK(5, 4)	/* Dst tx done via AHB-Lite Interface i */
 129#define AT_DMA_MEM_IF		0x0		/* interface 0 as memory interface */
 130#define AT_DMA_PER_IF		0x1		/* interface 1 as peripheral interface */
 131#define ATC_SRC_PIP		BIT(8)		/* Source Picture-in-Picture enabled */
 132#define ATC_DST_PIP		BIT(12)		/* Destination Picture-in-Picture enabled */
 133#define ATC_SRC_DSCR_DIS	BIT(16)		/* Src Descriptor fetch disable */
 134#define ATC_DST_DSCR_DIS	BIT(20)		/* Dst Descriptor fetch disable */
 135#define ATC_FC			GENMASK(22, 21)	/* Choose Flow Controller */
 136#define ATC_FC_MEM2MEM		0x0		/* Mem-to-Mem (DMA) */
 137#define ATC_FC_MEM2PER		0x1		/* Mem-to-Periph (DMA) */
 138#define ATC_FC_PER2MEM		0x2		/* Periph-to-Mem (DMA) */
 139#define ATC_FC_PER2PER		0x3		/* Periph-to-Periph (DMA) */
 140#define ATC_FC_PER2MEM_PER	0x4		/* Periph-to-Mem (Peripheral) */
 141#define ATC_FC_MEM2PER_PER	0x5		/* Mem-to-Periph (Peripheral) */
 142#define ATC_FC_PER2PER_SRCPER	0x6		/* Periph-to-Periph (Src Peripheral) */
 143#define ATC_FC_PER2PER_DSTPER	0x7		/* Periph-to-Periph (Dst Peripheral) */
 144#define ATC_SRC_ADDR_MODE	GENMASK(25, 24)
 145#define ATC_SRC_ADDR_MODE_INCR	0x0		/* Incrementing Mode */
 146#define ATC_SRC_ADDR_MODE_DECR	0x1		/* Decrementing Mode */
 147#define ATC_SRC_ADDR_MODE_FIXED	0x2		/* Fixed Mode */
 148#define ATC_DST_ADDR_MODE	GENMASK(29, 28)
 149#define ATC_DST_ADDR_MODE_INCR	0x0		/* Incrementing Mode */
 150#define ATC_DST_ADDR_MODE_DECR	0x1		/* Decrementing Mode */
 151#define ATC_DST_ADDR_MODE_FIXED	0x2		/* Fixed Mode */
 152#define ATC_IEN			BIT(30)		/* BTC interrupt enable (active low) */
 153#define ATC_AUTO		BIT(31)		/* Auto multiple buffer tx enable */
 154
 155/* Bitfields in CFG */
 156#define ATC_PER_MSB(h)	((0x30U & (h)) >> 4)	/* Extract most significant bits of a handshaking identifier */
 157
 158#define ATC_SRC_PER		GENMASK(3, 0)	/* Channel src rq associated with periph handshaking ifc h */
 159#define ATC_DST_PER		GENMASK(7, 4)	/* Channel dst rq associated with periph handshaking ifc h */
 160#define ATC_SRC_REP		BIT(8)		/* Source Replay Mod */
 161#define ATC_SRC_H2SEL		BIT(9)		/* Source Handshaking Mod */
 162#define ATC_SRC_PER_MSB		GENMASK(11, 10)	/* Channel src rq (most significant bits) */
 163#define ATC_DST_REP		BIT(12)		/* Destination Replay Mod */
 164#define ATC_DST_H2SEL		BIT(13)		/* Destination Handshaking Mod */
 165#define ATC_DST_PER_MSB		GENMASK(15, 14)	/* Channel dst rq (most significant bits) */
 166#define ATC_SOD			BIT(16)		/* Stop On Done */
 167#define ATC_LOCK_IF		BIT(20)		/* Interface Lock */
 168#define ATC_LOCK_B		BIT(21)		/* AHB Bus Lock */
 169#define ATC_LOCK_IF_L		BIT(22)		/* Master Interface Arbiter Lock */
 170#define ATC_AHB_PROT		GENMASK(26, 24)	/* AHB Protection */
 171#define ATC_FIFOCFG		GENMASK(29, 28)	/* FIFO Request Configuration */
 172#define ATC_FIFOCFG_LARGESTBURST	0x0
 173#define ATC_FIFOCFG_HALFFIFO		0x1
 174#define ATC_FIFOCFG_ENOUGHSPACE		0x2
 175
 176/* Bitfields in SPIP */
 177#define ATC_SPIP_HOLE		GENMASK(15, 0)
 178#define ATC_SPIP_BOUNDARY	GENMASK(25, 16)
 179
 180/* Bitfields in DPIP */
 181#define ATC_DPIP_HOLE		GENMASK(15, 0)
 182#define ATC_DPIP_BOUNDARY	GENMASK(25, 16)
 183
 184#define ATC_SRC_PER_ID(id)	(FIELD_PREP(ATC_SRC_PER_MSB, (id)) |	\
 185				 FIELD_PREP(ATC_SRC_PER, (id)))
 186#define ATC_DST_PER_ID(id)	(FIELD_PREP(ATC_DST_PER_MSB, (id)) |	\
 187				 FIELD_PREP(ATC_DST_PER, (id)))
 188
 189
 190
 191/*--  descriptors  -----------------------------------------------------*/
 192
 193/* LLI == Linked List Item; aka DMA buffer descriptor */
 194struct at_lli {
 195	/* values that are not changed by hardware */
 196	u32 saddr;
 197	u32 daddr;
 198	/* value that may get written back: */
 199	u32 ctrla;
 200	/* more values that are not changed by hardware */
 201	u32 ctrlb;
 202	u32 dscr;	/* chain to next lli */
 203};
 204
 205/**
 206 * struct atdma_sg - atdma scatter gather entry
 207 * @len: length of the current Linked List Item.
 208 * @lli: linked list item that is passed to the DMA controller
 209 * @lli_phys: physical address of the LLI.
 210 */
 211struct atdma_sg {
 212	unsigned int len;
 213	struct at_lli *lli;
 214	dma_addr_t lli_phys;
 215};
 216
 217/**
 218 * struct at_desc - software descriptor
 219 * @vd: pointer to the virtual dma descriptor.
 220 * @atchan: pointer to the atmel dma channel.
 221 * @total_len: total transaction byte count
 222 * @sg_len: number of sg entries.
 223 * @sg: array of sgs.
 224 */
 225struct at_desc {
 226	struct				virt_dma_desc vd;
 227	struct				at_dma_chan *atchan;
 228	size_t				total_len;
 229	unsigned int			sglen;
 230	/* Interleaved data */
 231	size_t				boundary;
 232	size_t				dst_hole;
 233	size_t				src_hole;
 234
 235	/* Memset temporary buffer */
 236	bool				memset_buffer;
 237	dma_addr_t			memset_paddr;
 238	int				*memset_vaddr;
 239	struct atdma_sg			sg[];
 240};
 241
 242/*--  Channels  --------------------------------------------------------*/
 243
 244/**
 245 * atc_status - information bits stored in channel status flag
 246 *
 247 * Manipulated with atomic operations.
 248 */
 249enum atc_status {
 250	ATC_IS_PAUSED = 1,
 251	ATC_IS_CYCLIC = 24,
 252};
 253
 254/**
 255 * struct at_dma_chan - internal representation of an Atmel HDMAC channel
 256 * @vc: virtual dma channel entry.
 257 * @atdma: pointer to the driver data.
 258 * @ch_regs: memory mapped register base
 259 * @mask: channel index in a mask
 260 * @per_if: peripheral interface
 261 * @mem_if: memory interface
 262 * @status: transmit status information from irq/prep* functions
 263 *                to tasklet (use atomic operations)
 264 * @save_cfg: configuration register that is saved on suspend/resume cycle
 265 * @save_dscr: for cyclic operations, preserve next descriptor address in
 266 *             the cyclic list on suspend/resume cycle
 267 * @dma_sconfig: configuration for slave transfers, passed via
 268 * .device_config
 269 * @desc: pointer to the atmel dma descriptor.
 270 */
 271struct at_dma_chan {
 272	struct virt_dma_chan	vc;
 273	struct at_dma		*atdma;
 274	void __iomem		*ch_regs;
 275	u8			mask;
 276	u8			per_if;
 277	u8			mem_if;
 278	unsigned long		status;
 279	u32			save_cfg;
 280	u32			save_dscr;
 281	struct dma_slave_config	dma_sconfig;
 282	bool			cyclic;
 283	struct at_desc		*desc;
 284};
 285
 286#define	channel_readl(atchan, name) \
 287	__raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET)
 288
 289#define	channel_writel(atchan, name, val) \
 290	__raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET)
 291
 292/*
 293 * Fix sconfig's burst size according to at_hdmac. We need to convert them as:
 294 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7.
 295 *
 296 * This can be done by finding most significant bit set.
 297 */
 298static inline void convert_burst(u32 *maxburst)
 299{
 300	if (*maxburst > 1)
 301		*maxburst = fls(*maxburst) - 2;
 302	else
 303		*maxburst = 0;
 304}
 305
 306/*
 307 * Fix sconfig's bus width according to at_hdmac.
 308 * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2.
 309 */
 310static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
 311{
 312	switch (addr_width) {
 313	case DMA_SLAVE_BUSWIDTH_2_BYTES:
 314		return 1;
 315	case DMA_SLAVE_BUSWIDTH_4_BYTES:
 316		return 2;
 317	default:
 318		/* For 1 byte width or fallback */
 319		return 0;
 320	}
 321}
 322
 323/*--  Controller  ------------------------------------------------------*/
 
 
 324
 325/**
 326 * struct at_dma - internal representation of an Atmel HDMA Controller
 327 * @dma_device: dmaengine dma_device object members
 328 * @atdma_devtype: identifier of DMA controller compatibility
 329 * @ch_regs: memory mapped register base
 330 * @clk: dma controller clock
 331 * @save_imr: interrupt mask register that is saved on suspend/resume cycle
 332 * @all_chan_mask: all channels availlable in a mask
 333 * @lli_pool: hw lli table
 334 * @chan: channels table to store at_dma_chan structures
 335 */
 336struct at_dma {
 337	struct dma_device	dma_device;
 338	void __iomem		*regs;
 339	struct clk		*clk;
 340	u32			save_imr;
 341
 342	u8			all_chan_mask;
 343
 344	struct dma_pool		*lli_pool;
 345	struct dma_pool		*memset_pool;
 346	/* AT THE END channels table */
 347	struct at_dma_chan	chan[];
 348};
 349
 350#define	dma_readl(atdma, name) \
 351	__raw_readl((atdma)->regs + AT_DMA_##name)
 352#define	dma_writel(atdma, name, val) \
 353	__raw_writel((val), (atdma)->regs + AT_DMA_##name)
 354
 355static inline struct at_desc *to_atdma_desc(struct dma_async_tx_descriptor *t)
 
 356{
 357	return container_of(t, struct at_desc, vd.tx);
 358}
 359
 360static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *chan)
 361{
 362	return container_of(chan, struct at_dma_chan, vc.chan);
 363}
 
 
 364
 365static inline struct at_dma *to_at_dma(struct dma_device *ddev)
 366{
 367	return container_of(ddev, struct at_dma, dma_device);
 368}
 369
 370
 371/*--  Helper functions  ------------------------------------------------*/
 372
 373static struct device *chan2dev(struct dma_chan *chan)
 374{
 375	return &chan->dev->device;
 376}
 377
 378#if defined(VERBOSE_DEBUG)
 379static void vdbg_dump_regs(struct at_dma_chan *atchan)
 380{
 381	struct at_dma	*atdma = to_at_dma(atchan->vc.chan.device);
 382
 383	dev_err(chan2dev(&atchan->vc.chan),
 384		"  channel %d : imr = 0x%x, chsr = 0x%x\n",
 385		atchan->vc.chan.chan_id,
 386		dma_readl(atdma, EBCIMR),
 387		dma_readl(atdma, CHSR));
 388
 389	dev_err(chan2dev(&atchan->vc.chan),
 390		"  channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n",
 391		channel_readl(atchan, SADDR),
 392		channel_readl(atchan, DADDR),
 393		channel_readl(atchan, CTRLA),
 394		channel_readl(atchan, CTRLB),
 395		channel_readl(atchan, CFG),
 396		channel_readl(atchan, DSCR));
 397}
 398#else
 399static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
 400#endif
 401
 402static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
 403{
 404	dev_crit(chan2dev(&atchan->vc.chan),
 405		 "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n",
 406		 &lli->saddr, &lli->daddr,
 407		 lli->ctrla, lli->ctrlb, &lli->dscr);
 408}
 409
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 410
 411static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on)
 412{
 413	u32 ebci;
 
 
 
 
 
 
 
 414
 415	/* enable interrupts on buffer transfer completion & error */
 416	ebci =    AT_DMA_BTC(chan_id)
 417		| AT_DMA_ERR(chan_id);
 418	if (on)
 419		dma_writel(atdma, EBCIER, ebci);
 420	else
 421		dma_writel(atdma, EBCIDR, ebci);
 422}
 423
 424static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id)
 425{
 426	atc_setup_irq(atdma, chan_id, 1);
 427}
 428
 429static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id)
 430{
 431	atc_setup_irq(atdma, chan_id, 0);
 432}
 433
 434
 435/**
 436 * atc_chan_is_enabled - test if given channel is enabled
 437 * @atchan: channel we want to test status
 438 */
 439static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
 440{
 441	struct at_dma *atdma = to_at_dma(atchan->vc.chan.device);
 
 
 
 
 442
 443	return !!(dma_readl(atdma, CHSR) & atchan->mask);
 444}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445
 446/**
 447 * atc_chan_is_paused - test channel pause/resume status
 448 * @atchan: channel we want to test status
 449 */
 450static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
 451{
 452	return test_bit(ATC_IS_PAUSED, &atchan->status);
 453}
 454
 455/**
 456 * atc_chan_is_cyclic - test if given channel has cyclic property set
 457 * @atchan: channel we want to test status
 
 458 */
 459static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
 460{
 461	return test_bit(ATC_IS_CYCLIC, &atchan->status);
 462}
 
 463
 464/**
 465 * set_lli_eol - set end-of-link to descriptor so it will end transfer
 466 * @desc: descriptor, signle or at the end of a chain, to end chain on
 467 * @i: index of the atmel scatter gather entry that is at the end of the chain.
 468 */
 469static void set_lli_eol(struct at_desc *desc, unsigned int i)
 470{
 471	u32 ctrlb = desc->sg[i].lli->ctrlb;
 472
 473	ctrlb &= ~ATC_IEN;
 474	ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
 475
 476	desc->sg[i].lli->ctrlb = ctrlb;
 477	desc->sg[i].lli->dscr = 0;
 478}
 479
 480#define	ATC_DEFAULT_CFG		FIELD_PREP(ATC_FIFOCFG, ATC_FIFOCFG_HALFFIFO)
 481#define	ATC_DEFAULT_CTRLB	(FIELD_PREP(ATC_SIF, AT_DMA_MEM_IF) | \
 482				 FIELD_PREP(ATC_DIF, AT_DMA_MEM_IF))
 483#define ATC_DMA_BUSWIDTHS\
 484	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
 485	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
 486	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
 487	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 488
 489#define ATC_MAX_DSCR_TRIALS	10
 490
 491/*
 492 * Initial number of descriptors to allocate for each channel. This could
 493 * be increased during dma usage.
 494 */
 495static unsigned int init_nr_desc_per_channel = 64;
 496module_param(init_nr_desc_per_channel, uint, 0644);
 497MODULE_PARM_DESC(init_nr_desc_per_channel,
 498		 "initial descriptors per channel (default: 64)");
 499
 500/**
 501 * struct at_dma_platform_data - Controller configuration parameters
 502 * @nr_channels: Number of channels supported by hardware (max 8)
 503 * @cap_mask: dma_capability flags supported by the platform
 504 */
 505struct at_dma_platform_data {
 506	unsigned int	nr_channels;
 507	dma_cap_mask_t  cap_mask;
 508};
 509
 510/**
 511 * struct at_dma_slave - Controller-specific information about a slave
 512 * @dma_dev: required DMA master device
 513 * @cfg: Platform-specific initializer for the CFG register
 514 */
 515struct at_dma_slave {
 516	struct device		*dma_dev;
 517	u32			cfg;
 518};
 519
 520static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
 521						size_t len)
 522{
 523	unsigned int width;
 524
 525	if (!((src | dst  | len) & 3))
 526		width = 2;
 527	else if (!((src | dst | len) & 1))
 528		width = 1;
 529	else
 530		width = 0;
 531
 532	return width;
 533}
 534
 535static void atdma_lli_chain(struct at_desc *desc, unsigned int i)
 536{
 537	struct atdma_sg *atdma_sg = &desc->sg[i];
 538
 539	if (i)
 540		desc->sg[i - 1].lli->dscr = atdma_sg->lli_phys;
 541}
 542
 543/**
 544 * atc_dostart - starts the DMA engine for real
 545 * @atchan: the channel we want to start
 
 
 
 546 */
 547static void atc_dostart(struct at_dma_chan *atchan)
 548{
 549	struct virt_dma_desc *vd = vchan_next_desc(&atchan->vc);
 550	struct at_desc *desc;
 
 
 
 
 
 
 
 
 
 
 
 551
 552	if (!vd) {
 553		atchan->desc = NULL;
 554		return;
 555	}
 556
 557	vdbg_dump_regs(atchan);
 558
 559	list_del(&vd->node);
 560	atchan->desc = desc = to_atdma_desc(&vd->tx);
 561
 562	channel_writel(atchan, SADDR, 0);
 563	channel_writel(atchan, DADDR, 0);
 564	channel_writel(atchan, CTRLA, 0);
 565	channel_writel(atchan, CTRLB, 0);
 566	channel_writel(atchan, DSCR, desc->sg[0].lli_phys);
 567	channel_writel(atchan, SPIP,
 568		       FIELD_PREP(ATC_SPIP_HOLE, desc->src_hole) |
 569		       FIELD_PREP(ATC_SPIP_BOUNDARY, desc->boundary));
 570	channel_writel(atchan, DPIP,
 571		       FIELD_PREP(ATC_DPIP_HOLE, desc->dst_hole) |
 572		       FIELD_PREP(ATC_DPIP_BOUNDARY, desc->boundary));
 573
 574	/* Don't allow CPU to reorder channel enable. */
 575	wmb();
 576	dma_writel(atchan->atdma, CHER, atchan->mask);
 577
 578	vdbg_dump_regs(atchan);
 579}
 580
 581static void atdma_desc_free(struct virt_dma_desc *vd)
 
 
 
 
 
 
 582{
 583	struct at_dma *atdma = to_at_dma(vd->tx.chan->device);
 584	struct at_desc *desc = to_atdma_desc(&vd->tx);
 585	unsigned int i;
 586
 587	for (i = 0; i < desc->sglen; i++) {
 588		if (desc->sg[i].lli)
 589			dma_pool_free(atdma->lli_pool, desc->sg[i].lli,
 590				      desc->sg[i].lli_phys);
 591	}
 592
 593	/* If the transfer was a memset, free our temporary buffer */
 594	if (desc->memset_buffer) {
 595		dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
 596			      desc->memset_paddr);
 597		desc->memset_buffer = false;
 598	}
 599
 600	kfree(desc);
 601}
 602
 603/**
 604 * atc_calc_bytes_left - calculates the number of bytes left according to the
 605 * value read from CTRLA.
 606 *
 607 * @current_len: the number of bytes left before reading CTRLA
 608 * @ctrla: the value of CTRLA
 609 */
 610static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
 611{
 612	u32 btsize = FIELD_GET(ATC_BTSIZE, ctrla);
 613	u32 src_width = FIELD_GET(ATC_SRC_WIDTH, ctrla);
 614
 615	/*
 616	 * According to the datasheet, when reading the Control A Register
 617	 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
 618	 * number of transfers completed on the Source Interface.
 619	 * So btsize is always a number of source width transfers.
 620	 */
 621	return current_len - (btsize << src_width);
 622}
 623
 624/**
 625 * atc_get_llis_residue - Get residue for a hardware linked list transfer
 626 *
 627 * Calculate the residue by removing the length of the Linked List Item (LLI)
 628 * already transferred from the total length. To get the current LLI we can use
 629 * the value of the channel's DSCR register and compare it against the DSCR
 630 * value of each LLI.
 631 *
 632 * The CTRLA register provides us with the amount of data already read from the
 633 * source for the LLI. So we can compute a more accurate residue by also
 634 * removing the number of bytes corresponding to this amount of data.
 635 *
 636 * However, the DSCR and CTRLA registers cannot be read both atomically. Hence a
 637 * race condition may occur: the first read register may refer to one LLI
 638 * whereas the second read may refer to a later LLI in the list because of the
 639 * DMA transfer progression inbetween the two reads.
 640 *
 641 * One solution could have been to pause the DMA transfer, read the DSCR and
 642 * CTRLA then resume the DMA transfer. Nonetheless, this approach presents some
 643 * drawbacks:
 644 * - If the DMA transfer is paused, RX overruns or TX underruns are more likey
 645 *   to occur depending on the system latency. Taking the USART driver as an
 646 *   example, it uses a cyclic DMA transfer to read data from the Receive
 647 *   Holding Register (RHR) to avoid RX overruns since the RHR is not protected
 648 *   by any FIFO on most Atmel SoCs. So pausing the DMA transfer to compute the
 649 *   residue would break the USART driver design.
 650 * - The atc_pause() function masks interrupts but we'd rather avoid to do so
 651 * for system latency purpose.
 652 *
 653 * Then we'd rather use another solution: the DSCR is read a first time, the
 654 * CTRLA is read in turn, next the DSCR is read a second time. If the two
 655 * consecutive read values of the DSCR are the same then we assume both refers
 656 * to the very same LLI as well as the CTRLA value read inbetween does. For
 657 * cyclic tranfers, the assumption is that a full loop is "not so fast". If the
 658 * two DSCR values are different, we read again the CTRLA then the DSCR till two
 659 * consecutive read values from DSCR are equal or till the maximum trials is
 660 * reach. This algorithm is very unlikely not to find a stable value for DSCR.
 661 * @atchan: pointer to an atmel hdmac channel.
 662 * @desc: pointer to the descriptor for which the residue is calculated.
 663 * @residue: residue to be set to dma_tx_state.
 664 * Returns 0 on success, -errno otherwise.
 665 */
 666static int atc_get_llis_residue(struct at_dma_chan *atchan,
 667				struct at_desc *desc, u32 *residue)
 668{
 669	u32 len, ctrla, dscr;
 670	unsigned int i;
 671
 672	len = desc->total_len;
 673	dscr = channel_readl(atchan, DSCR);
 674	rmb(); /* ensure DSCR is read before CTRLA */
 675	ctrla = channel_readl(atchan, CTRLA);
 676	for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
 677		u32 new_dscr;
 678
 679		rmb(); /* ensure DSCR is read after CTRLA */
 680		new_dscr = channel_readl(atchan, DSCR);
 681
 682		/*
 683		 * If the DSCR register value has not changed inside the DMA
 684		 * controller since the previous read, we assume that both the
 685		 * dscr and ctrla values refers to the very same descriptor.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686		 */
 687		if (likely(new_dscr == dscr))
 688			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 689
 690		/*
 691		 * DSCR has changed inside the DMA controller, so the previouly
 692		 * read value of CTRLA may refer to an already processed
 693		 * descriptor hence could be outdated. We need to update ctrla
 694		 * to match the current descriptor.
 695		 */
 696		dscr = new_dscr;
 697		rmb(); /* ensure DSCR is read before CTRLA */
 
 698		ctrla = channel_readl(atchan, CTRLA);
 
 699	}
 700	if (unlikely(i == ATC_MAX_DSCR_TRIALS))
 701		return -ETIMEDOUT;
 702
 703	/* For the first descriptor we can be more accurate. */
 704	if (desc->sg[0].lli->dscr == dscr) {
 705		*residue = atc_calc_bytes_left(len, ctrla);
 706		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707	}
 708	len -= desc->sg[0].len;
 709
 710	for (i = 1; i < desc->sglen; i++) {
 711		if (desc->sg[i].lli && desc->sg[i].lli->dscr == dscr)
 712			break;
 713		len -= desc->sg[i].len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 714	}
 715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716	/*
 717	 * For the current LLI in the chain we can calculate the remaining bytes
 718	 * using the channel's CTRLA register.
 719	 */
 720	*residue = atc_calc_bytes_left(len, ctrla);
 721	return 0;
 
 
 
 
 722
 
 
 723}
 724
 725/**
 726 * atc_get_residue - get the number of bytes residue for a cookie.
 727 * The residue is passed by address and updated on success.
 728 * @chan: DMA channel
 729 * @cookie: transaction identifier to check status of
 730 * @residue: residue to be updated.
 731 * Return 0 on success, -errono otherwise.
 732 */
 733static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie,
 734			   u32 *residue)
 735{
 736	struct at_dma_chan *atchan = to_at_dma_chan(chan);
 737	struct virt_dma_desc *vd;
 738	struct at_desc *desc = NULL;
 739	u32 len, ctrla;
 740
 741	vd = vchan_find_desc(&atchan->vc, cookie);
 742	if (vd)
 743		desc = to_atdma_desc(&vd->tx);
 744	else if (atchan->desc && atchan->desc->vd.tx.cookie == cookie)
 745		desc = atchan->desc;
 746
 747	if (!desc)
 748		return -EINVAL;
 749
 750	if (desc->sg[0].lli->dscr)
 751		/* hardware linked list transfer */
 752		return atc_get_llis_residue(atchan, desc, residue);
 
 
 
 
 
 
 753
 754	/* single transfer */
 755	len = desc->total_len;
 756	ctrla = channel_readl(atchan, CTRLA);
 757	*residue = atc_calc_bytes_left(len, ctrla);
 758	return 0;
 759}
 760
 761/**
 762 * atc_handle_error - handle errors reported by DMA controller
 763 * @atchan: channel where error occurs.
 764 * @i: channel index
 
 765 */
 766static void atc_handle_error(struct at_dma_chan *atchan, unsigned int i)
 767{
 768	struct at_desc *desc = atchan->desc;
 
 
 
 
 
 
 
 
 
 769
 770	/* Disable channel on AHB error */
 771	dma_writel(atchan->atdma, CHDR, AT_DMA_RES(i) | atchan->mask);
 
 
 
 
 
 772
 773	/*
 774	 * KERN_CRITICAL may seem harsh, but since this only happens
 775	 * when someone submits a bad physical address in a
 776	 * descriptor, we should consider ourselves lucky that the
 777	 * controller flagged an error instead of scribbling over
 778	 * random memory locations.
 779	 */
 780	dev_crit(chan2dev(&atchan->vc.chan), "Bad descriptor submitted for DMA!\n");
 781	dev_crit(chan2dev(&atchan->vc.chan), "cookie: %d\n",
 782		 desc->vd.tx.cookie);
 783	for (i = 0; i < desc->sglen; i++)
 784		atc_dump_lli(atchan, desc->sg[i].lli);
 
 
 
 
 
 785}
 786
 787static void atdma_handle_chan_done(struct at_dma_chan *atchan, u32 pending,
 788				   unsigned int i)
 
 
 
 
 
 789{
 790	struct at_desc *desc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 791
 792	spin_lock(&atchan->vc.lock);
 793	desc = atchan->desc;
 
 
 794
 795	if (desc) {
 796		if (pending & AT_DMA_ERR(i)) {
 797			atc_handle_error(atchan, i);
 798			/* Pretend the descriptor completed successfully */
 799		}
 
 
 800
 801		if (atc_chan_is_cyclic(atchan)) {
 802			vchan_cyclic_callback(&desc->vd);
 803		} else {
 804			vchan_cookie_complete(&desc->vd);
 805			atchan->desc = NULL;
 806			if (!(atc_chan_is_enabled(atchan)))
 807				atc_dostart(atchan);
 808		}
 809	}
 810	spin_unlock(&atchan->vc.lock);
 811}
 812
 813static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
 814{
 815	struct at_dma		*atdma = dev_id;
 816	struct at_dma_chan	*atchan;
 817	int			i;
 818	u32			status, pending, imr;
 819	int			ret = IRQ_NONE;
 820
 821	do {
 822		imr = dma_readl(atdma, EBCIMR);
 823		status = dma_readl(atdma, EBCISR);
 824		pending = status & imr;
 825
 826		if (!pending)
 827			break;
 828
 829		dev_vdbg(atdma->dma_device.dev,
 830			"interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
 831			 status, imr, pending);
 832
 833		for (i = 0; i < atdma->dma_device.chancnt; i++) {
 834			atchan = &atdma->chan[i];
 835			if (!(pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))))
 836				continue;
 837			atdma_handle_chan_done(atchan, pending, i);
 838			ret = IRQ_HANDLED;
 
 
 
 
 
 
 
 839		}
 840
 841	} while (pending);
 842
 843	return ret;
 844}
 845
 
 846/*--  DMA Engine API  --------------------------------------------------*/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 847/**
 848 * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
 849 * @chan: the channel to prepare operation on
 850 * @xt: Interleaved transfer template
 851 * @flags: tx descriptor status flags
 852 */
 853static struct dma_async_tx_descriptor *
 854atc_prep_dma_interleaved(struct dma_chan *chan,
 855			 struct dma_interleaved_template *xt,
 856			 unsigned long flags)
 857{
 858	struct at_dma		*atdma = to_at_dma(chan->device);
 859	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 860	struct data_chunk	*first;
 861	struct atdma_sg		*atdma_sg;
 862	struct at_desc		*desc;
 863	struct at_lli		*lli;
 864	size_t			xfer_count;
 865	unsigned int		dwidth;
 866	u32			ctrla;
 867	u32			ctrlb;
 868	size_t			len = 0;
 869	int			i;
 870
 871	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
 872		return NULL;
 873
 874	first = xt->sgl;
 875
 876	dev_info(chan2dev(chan),
 877		 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
 878		__func__, &xt->src_start, &xt->dst_start, xt->numf,
 879		xt->frame_size, flags);
 880
 881	/*
 882	 * The controller can only "skip" X bytes every Y bytes, so we
 883	 * need to make sure we are given a template that fit that
 884	 * description, ie a template with chunks that always have the
 885	 * same size, with the same ICGs.
 886	 */
 887	for (i = 0; i < xt->frame_size; i++) {
 888		struct data_chunk *chunk = xt->sgl + i;
 889
 890		if ((chunk->size != xt->sgl->size) ||
 891		    (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
 892		    (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
 893			dev_err(chan2dev(chan),
 894				"%s: the controller can transfer only identical chunks\n",
 895				__func__);
 896			return NULL;
 897		}
 898
 899		len += chunk->size;
 900	}
 901
 902	dwidth = atc_get_xfer_width(xt->src_start, xt->dst_start, len);
 
 903
 904	xfer_count = len >> dwidth;
 905	if (xfer_count > ATC_BTSIZE_MAX) {
 906		dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
 907		return NULL;
 908	}
 909
 910	ctrla = FIELD_PREP(ATC_SRC_WIDTH, dwidth) |
 911		FIELD_PREP(ATC_DST_WIDTH, dwidth);
 912
 913	ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
 914		FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) |
 915		FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) |
 916		ATC_SRC_PIP | ATC_DST_PIP |
 917		FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM);
 918
 919	desc = kzalloc(struct_size(desc, sg, 1), GFP_ATOMIC);
 920	if (!desc)
 921		return NULL;
 922	desc->sglen = 1;
 923
 924	atdma_sg = desc->sg;
 925	atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_NOWAIT,
 926				       &atdma_sg->lli_phys);
 927	if (!atdma_sg->lli) {
 928		kfree(desc);
 
 
 929		return NULL;
 930	}
 931	lli = atdma_sg->lli;
 932
 933	lli->saddr = xt->src_start;
 934	lli->daddr = xt->dst_start;
 935	lli->ctrla = ctrla | xfer_count;
 936	lli->ctrlb = ctrlb;
 937
 938	desc->boundary = first->size >> dwidth;
 939	desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
 940	desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
 941
 942	atdma_sg->len = len;
 943	desc->total_len = len;
 
 
 
 
 
 944
 945	set_lli_eol(desc, 0);
 946	return vchan_tx_prep(&atchan->vc, &desc->vd, flags);
 947}
 948
 949/**
 950 * atc_prep_dma_memcpy - prepare a memcpy operation
 951 * @chan: the channel to prepare operation on
 952 * @dest: operation virtual destination address
 953 * @src: operation virtual source address
 954 * @len: operation length
 955 * @flags: tx descriptor status flags
 956 */
 957static struct dma_async_tx_descriptor *
 958atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 959		size_t len, unsigned long flags)
 960{
 961	struct at_dma		*atdma = to_at_dma(chan->device);
 962	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 963	struct at_desc		*desc = NULL;
 
 
 964	size_t			xfer_count;
 965	size_t			offset;
 966	size_t			sg_len;
 967	unsigned int		src_width;
 968	unsigned int		dst_width;
 969	unsigned int		i;
 970	u32			ctrla;
 971	u32			ctrlb;
 972
 973	dev_dbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
 974		&dest, &src, len, flags);
 975
 976	if (unlikely(!len)) {
 977		dev_err(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
 978		return NULL;
 979	}
 980
 981	sg_len = DIV_ROUND_UP(len, ATC_BTSIZE_MAX);
 982	desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC);
 983	if (!desc)
 984		return NULL;
 985	desc->sglen = sg_len;
 986
 987	ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
 988		FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) |
 989		FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) |
 990		FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM);
 991
 992	/*
 993	 * We can be a lot more clever here, but this should take care
 994	 * of the most common optimization.
 995	 */
 996	src_width = dst_width = atc_get_xfer_width(src, dest, len);
 997
 998	ctrla = FIELD_PREP(ATC_SRC_WIDTH, src_width) |
 999		FIELD_PREP(ATC_DST_WIDTH, dst_width);
 
 
 
 
1000
1001	for (offset = 0, i = 0; offset < len;
1002	     offset += xfer_count << src_width, i++) {
1003		struct atdma_sg *atdma_sg = &desc->sg[i];
1004		struct at_lli *lli;
1005
1006		atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_NOWAIT,
1007					       &atdma_sg->lli_phys);
1008		if (!atdma_sg->lli)
1009			goto err_desc_get;
1010		lli = atdma_sg->lli;
1011
1012		xfer_count = min_t(size_t, (len - offset) >> src_width,
1013				   ATC_BTSIZE_MAX);
1014
1015		lli->saddr = src + offset;
1016		lli->daddr = dest + offset;
1017		lli->ctrla = ctrla | xfer_count;
1018		lli->ctrlb = ctrlb;
1019
1020		desc->sg[i].len = xfer_count << src_width;
 
1021
1022		atdma_lli_chain(desc, i);
1023	}
1024
1025	desc->total_len = len;
 
 
1026
1027	/* set end-of-link to the last link descriptor of list*/
1028	set_lli_eol(desc, i - 1);
1029
1030	return vchan_tx_prep(&atchan->vc, &desc->vd, flags);
 
 
1031
1032err_desc_get:
1033	atdma_desc_free(&desc->vd);
1034	return NULL;
1035}
1036
1037static int atdma_create_memset_lli(struct dma_chan *chan,
1038				   struct atdma_sg *atdma_sg,
1039				   dma_addr_t psrc, dma_addr_t pdst, size_t len)
 
1040{
1041	struct at_dma *atdma = to_at_dma(chan->device);
1042	struct at_lli *lli;
1043	size_t xfer_count;
1044	u32 ctrla = FIELD_PREP(ATC_SRC_WIDTH, 2) | FIELD_PREP(ATC_DST_WIDTH, 2);
 
1045	u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
1046		    FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_FIXED) |
1047		    FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) |
1048		    FIELD_PREP(ATC_FC, ATC_FC_MEM2MEM);
1049
1050	xfer_count = len >> 2;
1051	if (xfer_count > ATC_BTSIZE_MAX) {
1052		dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
1053		return -EINVAL;
 
1054	}
1055
1056	atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_NOWAIT,
1057				       &atdma_sg->lli_phys);
1058	if (!atdma_sg->lli)
1059		return -ENOMEM;
1060	lli = atdma_sg->lli;
 
1061
1062	lli->saddr = psrc;
1063	lli->daddr = pdst;
1064	lli->ctrla = ctrla | xfer_count;
1065	lli->ctrlb = ctrlb;
1066
1067	atdma_sg->len = len;
 
1068
1069	return 0;
1070}
1071
1072/**
1073 * atc_prep_dma_memset - prepare a memcpy operation
1074 * @chan: the channel to prepare operation on
1075 * @dest: operation virtual destination address
1076 * @value: value to set memory buffer to
1077 * @len: operation length
1078 * @flags: tx descriptor status flags
1079 */
1080static struct dma_async_tx_descriptor *
1081atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1082		    size_t len, unsigned long flags)
1083{
1084	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1085	struct at_dma		*atdma = to_at_dma(chan->device);
1086	struct at_desc		*desc;
1087	void __iomem		*vaddr;
1088	dma_addr_t		paddr;
1089	char			fill_pattern;
1090	int			ret;
1091
1092	dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
1093		&dest, value, len, flags);
1094
1095	if (unlikely(!len)) {
1096		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
1097		return NULL;
1098	}
1099
1100	if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1101		dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
1102			__func__);
1103		return NULL;
1104	}
1105
1106	vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
1107	if (!vaddr) {
1108		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1109			__func__);
1110		return NULL;
1111	}
 
1112
1113	/* Only the first byte of value is to be used according to dmaengine */
1114	fill_pattern = (char)value;
1115
1116	*(u32*)vaddr = (fill_pattern << 24) |
1117		       (fill_pattern << 16) |
1118		       (fill_pattern << 8) |
1119		       fill_pattern;
1120
1121	desc = kzalloc(struct_size(desc, sg, 1), GFP_ATOMIC);
1122	if (!desc)
1123		goto err_free_buffer;
1124	desc->sglen = 1;
1125
1126	ret = atdma_create_memset_lli(chan, desc->sg, paddr, dest, len);
1127	if (ret)
1128		goto err_free_desc;
1129
1130	desc->memset_paddr = paddr;
1131	desc->memset_vaddr = vaddr;
1132	desc->memset_buffer = true;
1133
 
1134	desc->total_len = len;
1135
1136	/* set end-of-link on the descriptor */
1137	set_lli_eol(desc, 0);
 
 
1138
1139	return vchan_tx_prep(&atchan->vc, &desc->vd, flags);
1140
1141err_free_desc:
1142	kfree(desc);
1143err_free_buffer:
1144	dma_pool_free(atdma->memset_pool, vaddr, paddr);
1145	return NULL;
1146}
1147
1148static struct dma_async_tx_descriptor *
1149atc_prep_dma_memset_sg(struct dma_chan *chan,
1150		       struct scatterlist *sgl,
1151		       unsigned int sg_len, int value,
1152		       unsigned long flags)
1153{
1154	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1155	struct at_dma		*atdma = to_at_dma(chan->device);
1156	struct at_desc		*desc;
1157	struct scatterlist	*sg;
1158	void __iomem		*vaddr;
1159	dma_addr_t		paddr;
1160	size_t			total_len = 0;
1161	int			i;
1162	int			ret;
1163
1164	dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
1165		 value, sg_len, flags);
1166
1167	if (unlikely(!sgl || !sg_len)) {
1168		dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1169			__func__);
1170		return NULL;
1171	}
1172
1173	vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
1174	if (!vaddr) {
1175		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1176			__func__);
1177		return NULL;
1178	}
1179	*(u32*)vaddr = value;
1180
1181	desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC);
1182	if (!desc)
1183		goto err_free_dma_buf;
1184	desc->sglen = sg_len;
1185
1186	for_each_sg(sgl, sg, sg_len, i) {
1187		dma_addr_t dest = sg_dma_address(sg);
1188		size_t len = sg_dma_len(sg);
1189
1190		dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1191			 __func__, &dest, len);
1192
1193		if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1194			dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1195				__func__);
1196			goto err_free_desc;
1197		}
1198
1199		ret = atdma_create_memset_lli(chan, &desc->sg[i], paddr, dest,
1200					      len);
1201		if (ret)
1202			goto err_free_desc;
 
1203
1204		atdma_lli_chain(desc, i);
1205		total_len += len;
1206	}
1207
 
 
 
 
1208	desc->memset_paddr = paddr;
1209	desc->memset_vaddr = vaddr;
1210	desc->memset_buffer = true;
1211
1212	desc->total_len = total_len;
 
1213
1214	/* set end-of-link on the descriptor */
1215	set_lli_eol(desc, i - 1);
 
 
1216
1217	return vchan_tx_prep(&atchan->vc, &desc->vd, flags);
1218
1219err_free_desc:
1220	atdma_desc_free(&desc->vd);
1221err_free_dma_buf:
1222	dma_pool_free(atdma->memset_pool, vaddr, paddr);
1223	return NULL;
1224}
1225
1226/**
1227 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1228 * @chan: DMA channel
1229 * @sgl: scatterlist to transfer to/from
1230 * @sg_len: number of entries in @scatterlist
1231 * @direction: DMA direction
1232 * @flags: tx descriptor status flags
1233 * @context: transaction context (ignored)
1234 */
1235static struct dma_async_tx_descriptor *
1236atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1237		unsigned int sg_len, enum dma_transfer_direction direction,
1238		unsigned long flags, void *context)
1239{
1240	struct at_dma		*atdma = to_at_dma(chan->device);
1241	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1242	struct at_dma_slave	*atslave = chan->private;
1243	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1244	struct at_desc		*desc;
 
1245	u32			ctrla;
1246	u32			ctrlb;
1247	dma_addr_t		reg;
1248	unsigned int		reg_width;
1249	unsigned int		mem_width;
1250	unsigned int		i;
1251	struct scatterlist	*sg;
1252	size_t			total_len = 0;
1253
1254	dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1255			sg_len,
1256			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1257			flags);
1258
1259	if (unlikely(!atslave || !sg_len)) {
1260		dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1261		return NULL;
1262	}
1263
1264	desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC);
1265	if (!desc)
1266		return NULL;
1267	desc->sglen = sg_len;
1268
1269	ctrla = FIELD_PREP(ATC_SCSIZE, sconfig->src_maxburst) |
1270		FIELD_PREP(ATC_DCSIZE, sconfig->dst_maxburst);
1271	ctrlb = ATC_IEN;
1272
1273	switch (direction) {
1274	case DMA_MEM_TO_DEV:
1275		reg_width = convert_buswidth(sconfig->dst_addr_width);
1276		ctrla |= FIELD_PREP(ATC_DST_WIDTH, reg_width);
1277		ctrlb |= FIELD_PREP(ATC_DST_ADDR_MODE,
1278				    ATC_DST_ADDR_MODE_FIXED) |
1279			 FIELD_PREP(ATC_SRC_ADDR_MODE, ATC_SRC_ADDR_MODE_INCR) |
1280			 FIELD_PREP(ATC_FC, ATC_FC_MEM2PER) |
1281			 FIELD_PREP(ATC_SIF, atchan->mem_if) |
1282			 FIELD_PREP(ATC_DIF, atchan->per_if);
1283		reg = sconfig->dst_addr;
1284		for_each_sg(sgl, sg, sg_len, i) {
1285			struct atdma_sg *atdma_sg = &desc->sg[i];
1286			struct at_lli *lli;
1287			u32		len;
1288			u32		mem;
1289
1290			atdma_sg->lli = dma_pool_alloc(atdma->lli_pool,
1291						       GFP_NOWAIT,
1292						       &atdma_sg->lli_phys);
1293			if (!atdma_sg->lli)
1294				goto err_desc_get;
1295			lli = atdma_sg->lli;
1296
1297			mem = sg_dma_address(sg);
1298			len = sg_dma_len(sg);
1299			if (unlikely(!len)) {
1300				dev_dbg(chan2dev(chan),
1301					"prep_slave_sg: sg(%d) data length is zero\n", i);
1302				goto err;
1303			}
1304			mem_width = 2;
1305			if (unlikely(mem & 3 || len & 3))
1306				mem_width = 0;
1307
1308			lli->saddr = mem;
1309			lli->daddr = reg;
1310			lli->ctrla = ctrla |
1311				     FIELD_PREP(ATC_SRC_WIDTH, mem_width) |
1312				     len >> mem_width;
1313			lli->ctrlb = ctrlb;
 
1314
1315			atdma_sg->len = len;
1316			total_len += len;
1317
1318			desc->sg[i].len = len;
1319			atdma_lli_chain(desc, i);
1320		}
1321		break;
1322	case DMA_DEV_TO_MEM:
1323		reg_width = convert_buswidth(sconfig->src_addr_width);
1324		ctrla |= FIELD_PREP(ATC_SRC_WIDTH, reg_width);
1325		ctrlb |= FIELD_PREP(ATC_DST_ADDR_MODE, ATC_DST_ADDR_MODE_INCR) |
1326			 FIELD_PREP(ATC_SRC_ADDR_MODE,
1327				    ATC_SRC_ADDR_MODE_FIXED) |
1328			 FIELD_PREP(ATC_FC, ATC_FC_PER2MEM) |
1329			 FIELD_PREP(ATC_SIF, atchan->per_if) |
1330			 FIELD_PREP(ATC_DIF, atchan->mem_if);
1331
1332		reg = sconfig->src_addr;
1333		for_each_sg(sgl, sg, sg_len, i) {
1334			struct atdma_sg *atdma_sg = &desc->sg[i];
1335			struct at_lli *lli;
1336			u32		len;
1337			u32		mem;
1338
1339			atdma_sg->lli = dma_pool_alloc(atdma->lli_pool,
1340						       GFP_NOWAIT,
1341						       &atdma_sg->lli_phys);
1342			if (!atdma_sg->lli)
1343				goto err_desc_get;
1344			lli = atdma_sg->lli;
1345
1346			mem = sg_dma_address(sg);
1347			len = sg_dma_len(sg);
1348			if (unlikely(!len)) {
1349				dev_dbg(chan2dev(chan),
1350					"prep_slave_sg: sg(%d) data length is zero\n", i);
1351				goto err;
1352			}
1353			mem_width = 2;
1354			if (unlikely(mem & 3 || len & 3))
1355				mem_width = 0;
1356
1357			lli->saddr = reg;
1358			lli->daddr = mem;
1359			lli->ctrla = ctrla |
1360				     FIELD_PREP(ATC_DST_WIDTH, mem_width) |
1361				     len >> reg_width;
1362			lli->ctrlb = ctrlb;
 
1363
1364			desc->sg[i].len = len;
1365			total_len += len;
1366
1367			atdma_lli_chain(desc, i);
1368		}
1369		break;
1370	default:
1371		return NULL;
1372	}
1373
1374	/* set end-of-link to the last link descriptor of list*/
1375	set_lli_eol(desc, i - 1);
1376
1377	desc->total_len = total_len;
 
 
1378
1379	return vchan_tx_prep(&atchan->vc, &desc->vd, flags);
 
 
 
1380
1381err_desc_get:
1382	dev_err(chan2dev(chan), "not enough descriptors available\n");
1383err:
1384	atdma_desc_free(&desc->vd);
1385	return NULL;
1386}
1387
1388/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389 * atc_dma_cyclic_check_values
1390 * Check for too big/unaligned periods and unaligned DMA buffer
1391 */
1392static int
1393atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1394		size_t period_len)
1395{
1396	if (period_len > (ATC_BTSIZE_MAX << reg_width))
1397		goto err_out;
1398	if (unlikely(period_len & ((1 << reg_width) - 1)))
1399		goto err_out;
1400	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1401		goto err_out;
1402
1403	return 0;
1404
1405err_out:
1406	return -EINVAL;
1407}
1408
1409/*
1410 * atc_dma_cyclic_fill_desc - Fill one period descriptor
1411 */
1412static int
1413atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1414		unsigned int i, dma_addr_t buf_addr,
1415		unsigned int reg_width, size_t period_len,
1416		enum dma_transfer_direction direction)
1417{
1418	struct at_dma		*atdma = to_at_dma(chan->device);
1419	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1420	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1421	struct atdma_sg		*atdma_sg = &desc->sg[i];
1422	struct at_lli		*lli;
1423
1424	atdma_sg->lli = dma_pool_alloc(atdma->lli_pool, GFP_ATOMIC,
1425				       &atdma_sg->lli_phys);
1426	if (!atdma_sg->lli)
1427		return -ENOMEM;
1428	lli = atdma_sg->lli;
 
1429
1430	switch (direction) {
1431	case DMA_MEM_TO_DEV:
1432		lli->saddr = buf_addr + (period_len * i);
1433		lli->daddr = sconfig->dst_addr;
1434		lli->ctrlb = FIELD_PREP(ATC_DST_ADDR_MODE,
1435					ATC_DST_ADDR_MODE_FIXED) |
1436			     FIELD_PREP(ATC_SRC_ADDR_MODE,
1437					ATC_SRC_ADDR_MODE_INCR) |
1438			     FIELD_PREP(ATC_FC, ATC_FC_MEM2PER) |
1439			     FIELD_PREP(ATC_SIF, atchan->mem_if) |
1440			     FIELD_PREP(ATC_DIF, atchan->per_if);
1441
1442		break;
1443
1444	case DMA_DEV_TO_MEM:
1445		lli->saddr = sconfig->src_addr;
1446		lli->daddr = buf_addr + (period_len * i);
1447		lli->ctrlb = FIELD_PREP(ATC_DST_ADDR_MODE,
1448					ATC_DST_ADDR_MODE_INCR) |
1449			     FIELD_PREP(ATC_SRC_ADDR_MODE,
1450					ATC_SRC_ADDR_MODE_FIXED) |
1451			     FIELD_PREP(ATC_FC, ATC_FC_PER2MEM) |
1452			     FIELD_PREP(ATC_SIF, atchan->per_if) |
1453			     FIELD_PREP(ATC_DIF, atchan->mem_if);
1454		break;
1455
1456	default:
1457		return -EINVAL;
1458	}
1459
1460	lli->ctrla = FIELD_PREP(ATC_SCSIZE, sconfig->src_maxburst) |
1461		     FIELD_PREP(ATC_DCSIZE, sconfig->dst_maxburst) |
1462		     FIELD_PREP(ATC_DST_WIDTH, reg_width) |
1463		     FIELD_PREP(ATC_SRC_WIDTH, reg_width) |
1464		     period_len >> reg_width;
1465	desc->sg[i].len = period_len;
1466
1467	return 0;
1468}
1469
1470/**
1471 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1472 * @chan: the DMA channel to prepare
1473 * @buf_addr: physical DMA address where the buffer starts
1474 * @buf_len: total number of bytes for the entire buffer
1475 * @period_len: number of bytes for each period
1476 * @direction: transfer direction, to or from device
1477 * @flags: tx descriptor status flags
1478 */
1479static struct dma_async_tx_descriptor *
1480atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1481		size_t period_len, enum dma_transfer_direction direction,
1482		unsigned long flags)
1483{
1484	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1485	struct at_dma_slave	*atslave = chan->private;
1486	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1487	struct at_desc		*desc;
 
1488	unsigned long		was_cyclic;
1489	unsigned int		reg_width;
1490	unsigned int		periods = buf_len / period_len;
1491	unsigned int		i;
1492
1493	dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1494			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1495			&buf_addr,
1496			periods, buf_len, period_len);
1497
1498	if (unlikely(!atslave || !buf_len || !period_len)) {
1499		dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1500		return NULL;
1501	}
1502
1503	was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1504	if (was_cyclic) {
1505		dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1506		return NULL;
1507	}
1508
1509	if (unlikely(!is_slave_direction(direction)))
1510		goto err_out;
1511
1512	if (direction == DMA_MEM_TO_DEV)
1513		reg_width = convert_buswidth(sconfig->dst_addr_width);
1514	else
1515		reg_width = convert_buswidth(sconfig->src_addr_width);
1516
1517	/* Check for too big/unaligned periods and unaligned DMA buffer */
1518	if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1519		goto err_out;
1520
1521	desc = kzalloc(struct_size(desc, sg, periods), GFP_ATOMIC);
1522	if (!desc)
1523		goto err_out;
1524	desc->sglen = periods;
1525
1526	/* build cyclic linked list */
1527	for (i = 0; i < periods; i++) {
 
 
 
 
 
 
1528		if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1529					     reg_width, period_len, direction))
1530			goto err_fill_desc;
1531		atdma_lli_chain(desc, i);
 
1532	}
1533	desc->total_len = buf_len;
1534	/* lets make a cyclic list */
1535	desc->sg[i - 1].lli->dscr = desc->sg[0].lli_phys;
1536
1537	return vchan_tx_prep(&atchan->vc, &desc->vd, flags);
 
 
1538
1539err_fill_desc:
1540	atdma_desc_free(&desc->vd);
 
 
 
1541err_out:
1542	clear_bit(ATC_IS_CYCLIC, &atchan->status);
1543	return NULL;
1544}
1545
1546static int atc_config(struct dma_chan *chan,
1547		      struct dma_slave_config *sconfig)
1548{
1549	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1550
1551	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1552
1553	/* Check if it is chan is configured for slave transfers */
1554	if (!chan->private)
1555		return -EINVAL;
1556
1557	memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1558
1559	convert_burst(&atchan->dma_sconfig.src_maxburst);
1560	convert_burst(&atchan->dma_sconfig.dst_maxburst);
1561
1562	return 0;
1563}
1564
1565static int atc_pause(struct dma_chan *chan)
1566{
1567	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1568	struct at_dma		*atdma = to_at_dma(chan->device);
1569	int			chan_id = atchan->vc.chan.chan_id;
1570	unsigned long		flags;
1571
 
 
1572	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1573
1574	spin_lock_irqsave(&atchan->vc.lock, flags);
1575
1576	dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1577	set_bit(ATC_IS_PAUSED, &atchan->status);
1578
1579	spin_unlock_irqrestore(&atchan->vc.lock, flags);
1580
1581	return 0;
1582}
1583
1584static int atc_resume(struct dma_chan *chan)
1585{
1586	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1587	struct at_dma		*atdma = to_at_dma(chan->device);
1588	int			chan_id = atchan->vc.chan.chan_id;
1589	unsigned long		flags;
1590
 
 
1591	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1592
1593	if (!atc_chan_is_paused(atchan))
1594		return 0;
1595
1596	spin_lock_irqsave(&atchan->vc.lock, flags);
1597
1598	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1599	clear_bit(ATC_IS_PAUSED, &atchan->status);
1600
1601	spin_unlock_irqrestore(&atchan->vc.lock, flags);
1602
1603	return 0;
1604}
1605
1606static int atc_terminate_all(struct dma_chan *chan)
1607{
1608	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1609	struct at_dma		*atdma = to_at_dma(chan->device);
1610	int			chan_id = atchan->vc.chan.chan_id;
 
1611	unsigned long		flags;
1612
1613	LIST_HEAD(list);
1614
1615	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1616
1617	/*
1618	 * This is only called when something went wrong elsewhere, so
1619	 * we don't really care about the data. Just disable the
1620	 * channel. We still have to poll the channel enable bit due
1621	 * to AHB/HSB limitations.
1622	 */
1623	spin_lock_irqsave(&atchan->vc.lock, flags);
1624
1625	/* disabling channel: must also remove suspend state */
1626	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1627
1628	/* confirm that this channel is disabled */
1629	while (dma_readl(atdma, CHSR) & atchan->mask)
1630		cpu_relax();
1631
1632	if (atchan->desc) {
1633		vchan_terminate_vdesc(&atchan->desc->vd);
1634		atchan->desc = NULL;
1635	}
1636
1637	vchan_get_all_descriptors(&atchan->vc, &list);
 
1638
1639	clear_bit(ATC_IS_PAUSED, &atchan->status);
1640	/* if channel dedicated to cyclic operations, free it */
1641	clear_bit(ATC_IS_CYCLIC, &atchan->status);
1642
1643	spin_unlock_irqrestore(&atchan->vc.lock, flags);
1644
1645	vchan_dma_desc_free_list(&atchan->vc, &list);
1646
1647	return 0;
1648}
1649
1650/**
1651 * atc_tx_status - poll for transaction completion
1652 * @chan: DMA channel
1653 * @cookie: transaction identifier to check status of
1654 * @txstate: if not %NULL updated with transaction state
1655 *
1656 * If @txstate is passed in, upon return it reflect the driver
1657 * internal state and can be used with dma_async_is_complete() to check
1658 * the status of multiple cookies without re-checking hardware state.
1659 */
1660static enum dma_status
1661atc_tx_status(struct dma_chan *chan,
1662		dma_cookie_t cookie,
1663		struct dma_tx_state *txstate)
1664{
1665	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1666	unsigned long		flags;
1667	enum dma_status		dma_status;
1668	u32 residue;
1669	int ret;
 
 
 
 
 
 
 
 
 
1670
1671	dma_status = dma_cookie_status(chan, cookie, txstate);
1672	if (dma_status == DMA_COMPLETE || !txstate)
1673		return dma_status;
1674
1675	spin_lock_irqsave(&atchan->vc.lock, flags);
1676	/*  Get number of bytes left in the active transactions */
1677	ret = atc_get_residue(chan, cookie, &residue);
1678	spin_unlock_irqrestore(&atchan->vc.lock, flags);
 
1679
1680	if (unlikely(ret < 0)) {
1681		dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1682		return DMA_ERROR;
1683	} else {
1684		dma_set_residue(txstate, residue);
1685	}
1686
1687	dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %u\n",
1688		 dma_status, cookie, residue);
1689
1690	return dma_status;
1691}
1692
 
 
 
 
1693static void atc_issue_pending(struct dma_chan *chan)
1694{
1695	struct at_dma_chan *atchan = to_at_dma_chan(chan);
1696	unsigned long flags;
 
 
 
 
 
 
1697
1698	spin_lock_irqsave(&atchan->vc.lock, flags);
1699	if (vchan_issue_pending(&atchan->vc) && !atchan->desc) {
1700		if (!(atc_chan_is_enabled(atchan)))
1701			atc_dostart(atchan);
1702	}
1703	spin_unlock_irqrestore(&atchan->vc.lock, flags);
1704}
1705
1706/**
1707 * atc_alloc_chan_resources - allocate resources for DMA channel
1708 * @chan: allocate descriptor resources for this channel
 
1709 *
1710 * return - the number of allocated descriptors
1711 */
1712static int atc_alloc_chan_resources(struct dma_chan *chan)
1713{
1714	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1715	struct at_dma		*atdma = to_at_dma(chan->device);
 
1716	struct at_dma_slave	*atslave;
 
 
1717	u32			cfg;
 
1718
1719	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1720
1721	/* ASSERT:  channel is idle */
1722	if (atc_chan_is_enabled(atchan)) {
1723		dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1724		return -EIO;
1725	}
1726
1727	cfg = ATC_DEFAULT_CFG;
1728
1729	atslave = chan->private;
1730	if (atslave) {
1731		/*
1732		 * We need controller-specific data to set up slave
1733		 * transfers.
1734		 */
1735		BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_device.dev);
1736
1737		/* if cfg configuration specified take it instead of default */
1738		if (atslave->cfg)
1739			cfg = atslave->cfg;
1740	}
1741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1742	/* channel parameters */
1743	channel_writel(atchan, CFG, cfg);
1744
1745	return 0;
 
 
 
 
1746}
1747
1748/**
1749 * atc_free_chan_resources - free all channel resources
1750 * @chan: DMA channel
1751 */
1752static void atc_free_chan_resources(struct dma_chan *chan)
1753{
1754	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
 
 
 
1755
 
 
 
 
 
 
1756	BUG_ON(atc_chan_is_enabled(atchan));
1757
1758	vchan_free_chan_resources(to_virt_chan(chan));
 
 
 
 
 
 
 
1759	atchan->status = 0;
1760
1761	/*
1762	 * Free atslave allocated in at_dma_xlate()
1763	 */
1764	kfree(chan->private);
1765	chan->private = NULL;
1766
1767	dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1768}
1769
1770#ifdef CONFIG_OF
1771static bool at_dma_filter(struct dma_chan *chan, void *slave)
1772{
1773	struct at_dma_slave *atslave = slave;
1774
1775	if (atslave->dma_dev == chan->device->dev) {
1776		chan->private = atslave;
1777		return true;
1778	} else {
1779		return false;
1780	}
1781}
1782
1783static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1784				     struct of_dma *of_dma)
1785{
1786	struct dma_chan *chan;
1787	struct at_dma_chan *atchan;
1788	struct at_dma_slave *atslave;
1789	dma_cap_mask_t mask;
1790	unsigned int per_id;
1791	struct platform_device *dmac_pdev;
1792
1793	if (dma_spec->args_count != 2)
1794		return NULL;
1795
1796	dmac_pdev = of_find_device_by_node(dma_spec->np);
1797	if (!dmac_pdev)
1798		return NULL;
1799
1800	dma_cap_zero(mask);
1801	dma_cap_set(DMA_SLAVE, mask);
1802
1803	atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
1804	if (!atslave) {
1805		put_device(&dmac_pdev->dev);
1806		return NULL;
1807	}
1808
1809	atslave->cfg = ATC_DST_H2SEL | ATC_SRC_H2SEL;
1810	/*
1811	 * We can fill both SRC_PER and DST_PER, one of these fields will be
1812	 * ignored depending on DMA transfer direction.
1813	 */
1814	per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1815	atslave->cfg |= ATC_DST_PER_ID(per_id) |  ATC_SRC_PER_ID(per_id);
 
1816	/*
1817	 * We have to translate the value we get from the device tree since
1818	 * the half FIFO configuration value had to be 0 to keep backward
1819	 * compatibility.
1820	 */
1821	switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1822	case AT91_DMA_CFG_FIFOCFG_ALAP:
1823		atslave->cfg |= FIELD_PREP(ATC_FIFOCFG,
1824					   ATC_FIFOCFG_LARGESTBURST);
1825		break;
1826	case AT91_DMA_CFG_FIFOCFG_ASAP:
1827		atslave->cfg |= FIELD_PREP(ATC_FIFOCFG,
1828					   ATC_FIFOCFG_ENOUGHSPACE);
1829		break;
1830	case AT91_DMA_CFG_FIFOCFG_HALF:
1831	default:
1832		atslave->cfg |= FIELD_PREP(ATC_FIFOCFG, ATC_FIFOCFG_HALFFIFO);
1833	}
1834	atslave->dma_dev = &dmac_pdev->dev;
1835
1836	chan = dma_request_channel(mask, at_dma_filter, atslave);
1837	if (!chan) {
1838		put_device(&dmac_pdev->dev);
1839		kfree(atslave);
1840		return NULL;
1841	}
1842
1843	atchan = to_at_dma_chan(chan);
1844	atchan->per_if = dma_spec->args[0] & 0xff;
1845	atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1846
1847	return chan;
1848}
1849#else
1850static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1851				     struct of_dma *of_dma)
1852{
1853	return NULL;
1854}
1855#endif
1856
1857/*--  Module Management  -----------------------------------------------*/
1858
1859/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1860static struct at_dma_platform_data at91sam9rl_config = {
1861	.nr_channels = 2,
1862};
1863static struct at_dma_platform_data at91sam9g45_config = {
1864	.nr_channels = 8,
1865};
1866
1867#if defined(CONFIG_OF)
1868static const struct of_device_id atmel_dma_dt_ids[] = {
1869	{
1870		.compatible = "atmel,at91sam9rl-dma",
1871		.data = &at91sam9rl_config,
1872	}, {
1873		.compatible = "atmel,at91sam9g45-dma",
1874		.data = &at91sam9g45_config,
1875	}, {
1876		/* sentinel */
1877	}
1878};
1879
1880MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1881#endif
1882
1883static const struct platform_device_id atdma_devtypes[] = {
1884	{
1885		.name = "at91sam9rl_dma",
1886		.driver_data = (unsigned long) &at91sam9rl_config,
1887	}, {
1888		.name = "at91sam9g45_dma",
1889		.driver_data = (unsigned long) &at91sam9g45_config,
1890	}, {
1891		/* sentinel */
1892	}
1893};
1894
1895static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1896						struct platform_device *pdev)
1897{
1898	if (pdev->dev.of_node) {
1899		const struct of_device_id *match;
1900		match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1901		if (match == NULL)
1902			return NULL;
1903		return match->data;
1904	}
1905	return (struct at_dma_platform_data *)
1906			platform_get_device_id(pdev)->driver_data;
1907}
1908
1909/**
1910 * at_dma_off - disable DMA controller
1911 * @atdma: the Atmel HDAMC device
1912 */
1913static void at_dma_off(struct at_dma *atdma)
1914{
1915	dma_writel(atdma, EN, 0);
1916
1917	/* disable all interrupts */
1918	dma_writel(atdma, EBCIDR, -1L);
1919
1920	/* confirm that all channels are disabled */
1921	while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1922		cpu_relax();
1923}
1924
1925static int __init at_dma_probe(struct platform_device *pdev)
1926{
 
1927	struct at_dma		*atdma;
 
1928	int			irq;
1929	int			err;
1930	int			i;
1931	const struct at_dma_platform_data *plat_dat;
1932
1933	/* setup platform data for each SoC */
1934	dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
 
1935	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1936	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1937	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1938	dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1939	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1940	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
 
1941
1942	/* get DMA parameters from controller type */
1943	plat_dat = at_dma_get_driver_data(pdev);
1944	if (!plat_dat)
1945		return -ENODEV;
1946
1947	atdma = devm_kzalloc(&pdev->dev,
1948			     struct_size(atdma, chan, plat_dat->nr_channels),
1949			     GFP_KERNEL);
1950	if (!atdma)
1951		return -ENOMEM;
1952
1953	atdma->regs = devm_platform_ioremap_resource(pdev, 0);
1954	if (IS_ERR(atdma->regs))
1955		return PTR_ERR(atdma->regs);
1956
1957	irq = platform_get_irq(pdev, 0);
1958	if (irq < 0)
1959		return irq;
1960
 
 
 
 
 
 
1961	/* discover transaction capabilities */
1962	atdma->dma_device.cap_mask = plat_dat->cap_mask;
1963	atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1964
1965	atdma->clk = devm_clk_get(&pdev->dev, "dma_clk");
1966	if (IS_ERR(atdma->clk))
1967		return PTR_ERR(atdma->clk);
 
 
1968
 
 
 
 
 
 
 
 
 
 
 
1969	err = clk_prepare_enable(atdma->clk);
1970	if (err)
1971		return err;
1972
1973	/* force dma off, just in case */
1974	at_dma_off(atdma);
1975
1976	err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1977	if (err)
1978		goto err_irq;
1979
1980	platform_set_drvdata(pdev, atdma);
1981
1982	/* create a pool of consistent memory blocks for hardware descriptors */
1983	atdma->lli_pool = dma_pool_create("at_hdmac_lli_pool",
1984					  &pdev->dev, sizeof(struct at_lli),
1985					  4 /* word alignment */, 0);
1986	if (!atdma->lli_pool) {
1987		dev_err(&pdev->dev, "Unable to allocate DMA LLI descriptor pool\n");
1988		err = -ENOMEM;
1989		goto err_desc_pool_create;
1990	}
1991
1992	/* create a pool of consistent memory blocks for memset blocks */
1993	atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1994					     &pdev->dev, sizeof(int), 4, 0);
1995	if (!atdma->memset_pool) {
1996		dev_err(&pdev->dev, "No memory for memset dma pool\n");
1997		err = -ENOMEM;
1998		goto err_memset_pool_create;
1999	}
2000
2001	/* clear any pending interrupt */
2002	while (dma_readl(atdma, EBCISR))
2003		cpu_relax();
2004
2005	/* initialize channels related values */
2006	INIT_LIST_HEAD(&atdma->dma_device.channels);
2007	for (i = 0; i < plat_dat->nr_channels; i++) {
2008		struct at_dma_chan	*atchan = &atdma->chan[i];
2009
2010		atchan->mem_if = AT_DMA_MEM_IF;
2011		atchan->per_if = AT_DMA_PER_IF;
 
 
 
 
2012
2013		atchan->ch_regs = atdma->regs + ch_regs(i);
 
2014		atchan->mask = 1 << i;
2015
2016		atchan->atdma = atdma;
2017		atchan->vc.desc_free = atdma_desc_free;
2018		vchan_init(&atchan->vc, &atdma->dma_device);
 
 
 
2019		atc_enable_chan_irq(atdma, i);
2020	}
2021
2022	/* set base routines */
2023	atdma->dma_device.device_alloc_chan_resources = atc_alloc_chan_resources;
2024	atdma->dma_device.device_free_chan_resources = atc_free_chan_resources;
2025	atdma->dma_device.device_tx_status = atc_tx_status;
2026	atdma->dma_device.device_issue_pending = atc_issue_pending;
2027	atdma->dma_device.dev = &pdev->dev;
2028
2029	/* set prep routines based on capability */
2030	if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_device.cap_mask))
2031		atdma->dma_device.device_prep_interleaved_dma = atc_prep_dma_interleaved;
2032
2033	if (dma_has_cap(DMA_MEMCPY, atdma->dma_device.cap_mask))
2034		atdma->dma_device.device_prep_dma_memcpy = atc_prep_dma_memcpy;
2035
2036	if (dma_has_cap(DMA_MEMSET, atdma->dma_device.cap_mask)) {
2037		atdma->dma_device.device_prep_dma_memset = atc_prep_dma_memset;
2038		atdma->dma_device.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
2039		atdma->dma_device.fill_align = DMAENGINE_ALIGN_4_BYTES;
2040	}
2041
2042	if (dma_has_cap(DMA_SLAVE, atdma->dma_device.cap_mask)) {
2043		atdma->dma_device.device_prep_slave_sg = atc_prep_slave_sg;
2044		/* controller can do slave DMA: can trigger cyclic transfers */
2045		dma_cap_set(DMA_CYCLIC, atdma->dma_device.cap_mask);
2046		atdma->dma_device.device_prep_dma_cyclic = atc_prep_dma_cyclic;
2047		atdma->dma_device.device_config = atc_config;
2048		atdma->dma_device.device_pause = atc_pause;
2049		atdma->dma_device.device_resume = atc_resume;
2050		atdma->dma_device.device_terminate_all = atc_terminate_all;
2051		atdma->dma_device.src_addr_widths = ATC_DMA_BUSWIDTHS;
2052		atdma->dma_device.dst_addr_widths = ATC_DMA_BUSWIDTHS;
2053		atdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2054		atdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2055	}
2056
 
 
 
2057	dma_writel(atdma, EN, AT_DMA_ENABLE);
2058
2059	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
2060	  dma_has_cap(DMA_MEMCPY, atdma->dma_device.cap_mask) ? "cpy " : "",
2061	  dma_has_cap(DMA_MEMSET, atdma->dma_device.cap_mask) ? "set " : "",
2062	  dma_has_cap(DMA_SLAVE, atdma->dma_device.cap_mask)  ? "slave " : "",
 
2063	  plat_dat->nr_channels);
2064
2065	err = dma_async_device_register(&atdma->dma_device);
2066	if (err) {
2067		dev_err(&pdev->dev, "Unable to register: %d.\n", err);
2068		goto err_dma_async_device_register;
2069	}
2070
2071	/*
2072	 * Do not return an error if the dmac node is not present in order to
2073	 * not break the existing way of requesting channel with
2074	 * dma_request_channel().
2075	 */
2076	if (pdev->dev.of_node) {
2077		err = of_dma_controller_register(pdev->dev.of_node,
2078						 at_dma_xlate, atdma);
2079		if (err) {
2080			dev_err(&pdev->dev, "could not register of_dma_controller\n");
2081			goto err_of_dma_controller_register;
2082		}
2083	}
2084
2085	return 0;
2086
2087err_of_dma_controller_register:
2088	dma_async_device_unregister(&atdma->dma_device);
2089err_dma_async_device_register:
2090	dma_pool_destroy(atdma->memset_pool);
2091err_memset_pool_create:
2092	dma_pool_destroy(atdma->lli_pool);
2093err_desc_pool_create:
2094	free_irq(platform_get_irq(pdev, 0), atdma);
2095err_irq:
2096	clk_disable_unprepare(atdma->clk);
 
 
 
 
 
 
 
 
 
2097	return err;
2098}
2099
2100static int at_dma_remove(struct platform_device *pdev)
2101{
2102	struct at_dma		*atdma = platform_get_drvdata(pdev);
2103	struct dma_chan		*chan, *_chan;
 
2104
2105	at_dma_off(atdma);
2106	if (pdev->dev.of_node)
2107		of_dma_controller_free(pdev->dev.of_node);
2108	dma_async_device_unregister(&atdma->dma_device);
2109
2110	dma_pool_destroy(atdma->memset_pool);
2111	dma_pool_destroy(atdma->lli_pool);
2112	free_irq(platform_get_irq(pdev, 0), atdma);
2113
2114	list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels,
2115			device_node) {
 
 
2116		/* Disable interrupts */
2117		atc_disable_chan_irq(atdma, chan->chan_id);
 
 
2118		list_del(&chan->device_node);
2119	}
2120
2121	clk_disable_unprepare(atdma->clk);
 
 
 
 
 
 
 
 
 
2122
2123	return 0;
2124}
2125
2126static void at_dma_shutdown(struct platform_device *pdev)
2127{
2128	struct at_dma	*atdma = platform_get_drvdata(pdev);
2129
2130	at_dma_off(platform_get_drvdata(pdev));
2131	clk_disable_unprepare(atdma->clk);
2132}
2133
2134static int at_dma_prepare(struct device *dev)
2135{
2136	struct at_dma *atdma = dev_get_drvdata(dev);
 
2137	struct dma_chan *chan, *_chan;
2138
2139	list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels,
2140			device_node) {
2141		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2142		/* wait for transaction completion (except in cyclic case) */
2143		if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2144			return -EAGAIN;
2145	}
2146	return 0;
2147}
2148
2149static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2150{
2151	struct dma_chan	*chan = &atchan->vc.chan;
2152
2153	/* Channel should be paused by user
2154	 * do it anyway even if it is not done already */
2155	if (!atc_chan_is_paused(atchan)) {
2156		dev_warn(chan2dev(chan),
2157		"cyclic channel not paused, should be done by channel user\n");
2158		atc_pause(chan);
2159	}
2160
2161	/* now preserve additional data for cyclic operations */
2162	/* next descriptor address in the cyclic list */
2163	atchan->save_dscr = channel_readl(atchan, DSCR);
2164
2165	vdbg_dump_regs(atchan);
2166}
2167
2168static int at_dma_suspend_noirq(struct device *dev)
2169{
2170	struct at_dma *atdma = dev_get_drvdata(dev);
 
2171	struct dma_chan *chan, *_chan;
2172
2173	/* preserve data */
2174	list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels,
2175			device_node) {
2176		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2177
2178		if (atc_chan_is_cyclic(atchan))
2179			atc_suspend_cyclic(atchan);
2180		atchan->save_cfg = channel_readl(atchan, CFG);
2181	}
2182	atdma->save_imr = dma_readl(atdma, EBCIMR);
2183
2184	/* disable DMA controller */
2185	at_dma_off(atdma);
2186	clk_disable_unprepare(atdma->clk);
2187	return 0;
2188}
2189
2190static void atc_resume_cyclic(struct at_dma_chan *atchan)
2191{
2192	struct at_dma	*atdma = to_at_dma(atchan->vc.chan.device);
2193
2194	/* restore channel status for cyclic descriptors list:
2195	 * next descriptor in the cyclic list at the time of suspend */
2196	channel_writel(atchan, SADDR, 0);
2197	channel_writel(atchan, DADDR, 0);
2198	channel_writel(atchan, CTRLA, 0);
2199	channel_writel(atchan, CTRLB, 0);
2200	channel_writel(atchan, DSCR, atchan->save_dscr);
2201	dma_writel(atdma, CHER, atchan->mask);
2202
2203	/* channel pause status should be removed by channel user
2204	 * We cannot take the initiative to do it here */
2205
2206	vdbg_dump_regs(atchan);
2207}
2208
2209static int at_dma_resume_noirq(struct device *dev)
2210{
2211	struct at_dma *atdma = dev_get_drvdata(dev);
 
2212	struct dma_chan *chan, *_chan;
2213
2214	/* bring back DMA controller */
2215	clk_prepare_enable(atdma->clk);
2216	dma_writel(atdma, EN, AT_DMA_ENABLE);
2217
2218	/* clear any pending interrupt */
2219	while (dma_readl(atdma, EBCISR))
2220		cpu_relax();
2221
2222	/* restore saved data */
2223	dma_writel(atdma, EBCIER, atdma->save_imr);
2224	list_for_each_entry_safe(chan, _chan, &atdma->dma_device.channels,
2225			device_node) {
2226		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2227
2228		channel_writel(atchan, CFG, atchan->save_cfg);
2229		if (atc_chan_is_cyclic(atchan))
2230			atc_resume_cyclic(atchan);
2231	}
2232	return 0;
2233}
2234
2235static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops = {
2236	.prepare = at_dma_prepare,
2237	.suspend_noirq = at_dma_suspend_noirq,
2238	.resume_noirq = at_dma_resume_noirq,
2239};
2240
2241static struct platform_driver at_dma_driver = {
2242	.remove		= at_dma_remove,
2243	.shutdown	= at_dma_shutdown,
2244	.id_table	= atdma_devtypes,
2245	.driver = {
2246		.name	= "at_hdmac",
2247		.pm	= pm_ptr(&at_dma_dev_pm_ops),
2248		.of_match_table	= of_match_ptr(atmel_dma_dt_ids),
2249	},
2250};
2251
2252static int __init at_dma_init(void)
2253{
2254	return platform_driver_probe(&at_dma_driver, at_dma_probe);
2255}
2256subsys_initcall(at_dma_init);
2257
2258static void __exit at_dma_exit(void)
2259{
2260	platform_driver_unregister(&at_dma_driver);
2261}
2262module_exit(at_dma_exit);
2263
2264MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2265MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2266MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
2267MODULE_LICENSE("GPL");
2268MODULE_ALIAS("platform:at_hdmac");