Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for the Cirrus Logic EP93xx DMA Controller
   4 *
   5 * Copyright (C) 2011 Mika Westerberg
   6 *
   7 * DMA M2P implementation is based on the original
   8 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
   9 *
  10 *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  11 *   Copyright (C) 2006 Applied Data Systems
  12 *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
  13 *
  14 * This driver is based on dw_dmac and amba-pl08x drivers.
  15 */
  16
  17#include <linux/clk.h>
  18#include <linux/init.h>
  19#include <linux/interrupt.h>
  20#include <linux/dmaengine.h>
  21#include <linux/module.h>
  22#include <linux/mod_devicetable.h>
  23#include <linux/platform_device.h>
  24#include <linux/slab.h>
  25
  26#include <linux/platform_data/dma-ep93xx.h>
  27
  28#include "dmaengine.h"
  29
  30/* M2P registers */
  31#define M2P_CONTROL			0x0000
  32#define M2P_CONTROL_STALLINT		BIT(0)
  33#define M2P_CONTROL_NFBINT		BIT(1)
  34#define M2P_CONTROL_CH_ERROR_INT	BIT(3)
  35#define M2P_CONTROL_ENABLE		BIT(4)
  36#define M2P_CONTROL_ICE			BIT(6)
  37
  38#define M2P_INTERRUPT			0x0004
  39#define M2P_INTERRUPT_STALL		BIT(0)
  40#define M2P_INTERRUPT_NFB		BIT(1)
  41#define M2P_INTERRUPT_ERROR		BIT(3)
  42
  43#define M2P_PPALLOC			0x0008
  44#define M2P_STATUS			0x000c
  45
  46#define M2P_MAXCNT0			0x0020
  47#define M2P_BASE0			0x0024
  48#define M2P_MAXCNT1			0x0030
  49#define M2P_BASE1			0x0034
  50
  51#define M2P_STATE_IDLE			0
  52#define M2P_STATE_STALL			1
  53#define M2P_STATE_ON			2
  54#define M2P_STATE_NEXT			3
  55
  56/* M2M registers */
  57#define M2M_CONTROL			0x0000
  58#define M2M_CONTROL_DONEINT		BIT(2)
  59#define M2M_CONTROL_ENABLE		BIT(3)
  60#define M2M_CONTROL_START		BIT(4)
  61#define M2M_CONTROL_DAH			BIT(11)
  62#define M2M_CONTROL_SAH			BIT(12)
  63#define M2M_CONTROL_PW_SHIFT		9
  64#define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
  65#define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
  66#define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
  67#define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
  68#define M2M_CONTROL_TM_SHIFT		13
  69#define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
  70#define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
  71#define M2M_CONTROL_NFBINT		BIT(21)
  72#define M2M_CONTROL_RSS_SHIFT		22
  73#define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
  74#define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
  75#define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
  76#define M2M_CONTROL_NO_HDSK		BIT(24)
  77#define M2M_CONTROL_PWSC_SHIFT		25
  78
  79#define M2M_INTERRUPT			0x0004
  80#define M2M_INTERRUPT_MASK		6
  81
  82#define M2M_STATUS			0x000c
  83#define M2M_STATUS_CTL_SHIFT		1
  84#define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
  85#define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
  86#define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
  87#define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
  88#define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
  89#define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
  90#define M2M_STATUS_BUF_SHIFT		4
  91#define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
  92#define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
  93#define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
  94#define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
  95#define M2M_STATUS_DONE			BIT(6)
  96
  97#define M2M_BCR0			0x0010
  98#define M2M_BCR1			0x0014
  99#define M2M_SAR_BASE0			0x0018
 100#define M2M_SAR_BASE1			0x001c
 101#define M2M_DAR_BASE0			0x002c
 102#define M2M_DAR_BASE1			0x0030
 103
 104#define DMA_MAX_CHAN_BYTES		0xffff
 105#define DMA_MAX_CHAN_DESCRIPTORS	32
 106
 107struct ep93xx_dma_engine;
 108static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
 109					 enum dma_transfer_direction dir,
 110					 struct dma_slave_config *config);
 111
 112/**
 113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
 114 * @src_addr: source address of the transaction
 115 * @dst_addr: destination address of the transaction
 116 * @size: size of the transaction (in bytes)
 117 * @complete: this descriptor is completed
 118 * @txd: dmaengine API descriptor
 119 * @tx_list: list of linked descriptors
 120 * @node: link used for putting this into a channel queue
 121 */
 122struct ep93xx_dma_desc {
 123	u32				src_addr;
 124	u32				dst_addr;
 125	size_t				size;
 126	bool				complete;
 127	struct dma_async_tx_descriptor	txd;
 128	struct list_head		tx_list;
 129	struct list_head		node;
 130};
 131
 132/**
 133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
 134 * @chan: dmaengine API channel
 135 * @edma: pointer to the engine device
 136 * @regs: memory mapped registers
 137 * @irq: interrupt number of the channel
 138 * @clk: clock used by this channel
 139 * @tasklet: channel specific tasklet used for callbacks
 140 * @lock: lock protecting the fields following
 141 * @flags: flags for the channel
 142 * @buffer: which buffer to use next (0/1)
 143 * @active: flattened chain of descriptors currently being processed
 144 * @queue: pending descriptors which are handled next
 145 * @free_list: list of free descriptors which can be used
 146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
 147 *                is set via .device_config before slave operation is
 148 *                prepared
 149 * @runtime_ctrl: M2M runtime values for the control register.
 150 * @slave_config: slave configuration
 151 *
 152 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
 153 * will have slightly different scheme here: @active points to a head of
 154 * flattened DMA descriptor chain.
 155 *
 156 * @queue holds pending transactions. These are linked through the first
 157 * descriptor in the chain. When a descriptor is moved to the @active queue,
 158 * the first and chained descriptors are flattened into a single list.
 159 *
 160 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
 161 * necessary channel configuration information. For memcpy channels this must
 162 * be %NULL.
 163 */
 164struct ep93xx_dma_chan {
 165	struct dma_chan			chan;
 166	const struct ep93xx_dma_engine	*edma;
 167	void __iomem			*regs;
 168	int				irq;
 169	struct clk			*clk;
 170	struct tasklet_struct		tasklet;
 171	/* protects the fields following */
 172	spinlock_t			lock;
 173	unsigned long			flags;
 174/* Channel is configured for cyclic transfers */
 175#define EP93XX_DMA_IS_CYCLIC		0
 176
 177	int				buffer;
 178	struct list_head		active;
 179	struct list_head		queue;
 180	struct list_head		free_list;
 181	u32				runtime_addr;
 182	u32				runtime_ctrl;
 183	struct dma_slave_config		slave_config;
 184};
 185
 186/**
 187 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
 188 * @dma_dev: holds the dmaengine device
 189 * @m2m: is this an M2M or M2P device
 190 * @hw_setup: method which sets the channel up for operation
 191 * @hw_synchronize: synchronizes DMA channel termination to current context
 192 * @hw_shutdown: shuts the channel down and flushes whatever is left
 193 * @hw_submit: pushes active descriptor(s) to the hardware
 194 * @hw_interrupt: handle the interrupt
 195 * @num_channels: number of channels for this instance
 196 * @channels: array of channels
 197 *
 198 * There is one instance of this struct for the M2P channels and one for the
 199 * M2M channels. hw_xxx() methods are used to perform operations which are
 200 * different on M2M and M2P channels. These methods are called with channel
 201 * lock held and interrupts disabled so they cannot sleep.
 202 */
 203struct ep93xx_dma_engine {
 204	struct dma_device	dma_dev;
 205	bool			m2m;
 206	int			(*hw_setup)(struct ep93xx_dma_chan *);
 207	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
 208	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
 209	void			(*hw_submit)(struct ep93xx_dma_chan *);
 210	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
 211#define INTERRUPT_UNKNOWN	0
 212#define INTERRUPT_DONE		1
 213#define INTERRUPT_NEXT_BUFFER	2
 214
 215	size_t			num_channels;
 216	struct ep93xx_dma_chan	channels[] __counted_by(num_channels);
 217};
 218
 219static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
 220{
 221	return &edmac->chan.dev->device;
 222}
 223
 224static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
 225{
 226	return container_of(chan, struct ep93xx_dma_chan, chan);
 227}
 228
 229/**
 230 * ep93xx_dma_set_active - set new active descriptor chain
 231 * @edmac: channel
 232 * @desc: head of the new active descriptor chain
 233 *
 234 * Sets @desc to be the head of the new active descriptor chain. This is the
 235 * chain which is processed next. The active list must be empty before calling
 236 * this function.
 237 *
 238 * Called with @edmac->lock held and interrupts disabled.
 239 */
 240static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
 241				  struct ep93xx_dma_desc *desc)
 242{
 243	BUG_ON(!list_empty(&edmac->active));
 244
 245	list_add_tail(&desc->node, &edmac->active);
 246
 247	/* Flatten the @desc->tx_list chain into @edmac->active list */
 248	while (!list_empty(&desc->tx_list)) {
 249		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
 250			struct ep93xx_dma_desc, node);
 251
 252		/*
 253		 * We copy the callback parameters from the first descriptor
 254		 * to all the chained descriptors. This way we can call the
 255		 * callback without having to find out the first descriptor in
 256		 * the chain. Useful for cyclic transfers.
 257		 */
 258		d->txd.callback = desc->txd.callback;
 259		d->txd.callback_param = desc->txd.callback_param;
 260
 261		list_move_tail(&d->node, &edmac->active);
 262	}
 263}
 264
 265/* Called with @edmac->lock held and interrupts disabled */
 266static struct ep93xx_dma_desc *
 267ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 268{
 269	return list_first_entry_or_null(&edmac->active,
 270					struct ep93xx_dma_desc, node);
 271}
 272
 273/**
 274 * ep93xx_dma_advance_active - advances to the next active descriptor
 275 * @edmac: channel
 276 *
 277 * Function advances active descriptor to the next in the @edmac->active and
 278 * returns %true if we still have descriptors in the chain to process.
 279 * Otherwise returns %false.
 280 *
 281 * When the channel is in cyclic mode always returns %true.
 282 *
 283 * Called with @edmac->lock held and interrupts disabled.
 284 */
 285static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 286{
 287	struct ep93xx_dma_desc *desc;
 288
 289	list_rotate_left(&edmac->active);
 290
 291	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 292		return true;
 293
 294	desc = ep93xx_dma_get_active(edmac);
 295	if (!desc)
 296		return false;
 297
 298	/*
 299	 * If txd.cookie is set it means that we are back in the first
 300	 * descriptor in the chain and hence done with it.
 301	 */
 302	return !desc->txd.cookie;
 303}
 304
 305/*
 306 * M2P DMA implementation
 307 */
 308
 309static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
 310{
 311	writel(control, edmac->regs + M2P_CONTROL);
 312	/*
 313	 * EP93xx User's Guide states that we must perform a dummy read after
 314	 * write to the control register.
 315	 */
 316	readl(edmac->regs + M2P_CONTROL);
 317}
 318
 319static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
 320{
 321	struct ep93xx_dma_data *data = edmac->chan.private;
 322	u32 control;
 323
 324	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
 325
 326	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
 327		| M2P_CONTROL_ENABLE;
 328	m2p_set_control(edmac, control);
 329
 330	edmac->buffer = 0;
 331
 332	return 0;
 333}
 334
 335static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
 336{
 337	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 338}
 339
 340static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 341{
 342	unsigned long flags;
 343	u32 control;
 344
 345	spin_lock_irqsave(&edmac->lock, flags);
 346	control = readl(edmac->regs + M2P_CONTROL);
 347	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 348	m2p_set_control(edmac, control);
 349	spin_unlock_irqrestore(&edmac->lock, flags);
 350
 351	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
 352		schedule();
 353}
 354
 355static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
 356{
 357	m2p_set_control(edmac, 0);
 358
 359	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
 360		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 361}
 362
 363static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 364{
 365	struct ep93xx_dma_desc *desc;
 366	u32 bus_addr;
 367
 368	desc = ep93xx_dma_get_active(edmac);
 369	if (!desc) {
 370		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
 371		return;
 372	}
 373
 374	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
 375		bus_addr = desc->src_addr;
 376	else
 377		bus_addr = desc->dst_addr;
 378
 379	if (edmac->buffer == 0) {
 380		writel(desc->size, edmac->regs + M2P_MAXCNT0);
 381		writel(bus_addr, edmac->regs + M2P_BASE0);
 382	} else {
 383		writel(desc->size, edmac->regs + M2P_MAXCNT1);
 384		writel(bus_addr, edmac->regs + M2P_BASE1);
 385	}
 386
 387	edmac->buffer ^= 1;
 388}
 389
 390static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
 391{
 392	u32 control = readl(edmac->regs + M2P_CONTROL);
 393
 394	m2p_fill_desc(edmac);
 395	control |= M2P_CONTROL_STALLINT;
 396
 397	if (ep93xx_dma_advance_active(edmac)) {
 398		m2p_fill_desc(edmac);
 399		control |= M2P_CONTROL_NFBINT;
 400	}
 401
 402	m2p_set_control(edmac, control);
 403}
 404
 405static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
 406{
 407	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
 408	u32 control;
 409
 410	if (irq_status & M2P_INTERRUPT_ERROR) {
 411		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
 412
 413		/* Clear the error interrupt */
 414		writel(1, edmac->regs + M2P_INTERRUPT);
 415
 416		/*
 417		 * It seems that there is no easy way of reporting errors back
 418		 * to client so we just report the error here and continue as
 419		 * usual.
 420		 *
 421		 * Revisit this when there is a mechanism to report back the
 422		 * errors.
 423		 */
 424		dev_err(chan2dev(edmac),
 425			"DMA transfer failed! Details:\n"
 426			"\tcookie	: %d\n"
 427			"\tsrc_addr	: 0x%08x\n"
 428			"\tdst_addr	: 0x%08x\n"
 429			"\tsize		: %zu\n",
 430			desc->txd.cookie, desc->src_addr, desc->dst_addr,
 431			desc->size);
 432	}
 433
 434	/*
 435	 * Even latest E2 silicon revision sometimes assert STALL interrupt
 436	 * instead of NFB. Therefore we treat them equally, basing on the
 437	 * amount of data we still have to transfer.
 438	 */
 439	if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
 440		return INTERRUPT_UNKNOWN;
 441
 442	if (ep93xx_dma_advance_active(edmac)) {
 443		m2p_fill_desc(edmac);
 444		return INTERRUPT_NEXT_BUFFER;
 445	}
 446
 447	/* Disable interrupts */
 448	control = readl(edmac->regs + M2P_CONTROL);
 449	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 450	m2p_set_control(edmac, control);
 451
 452	return INTERRUPT_DONE;
 453}
 454
 455/*
 456 * M2M DMA implementation
 457 */
 458
 459static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
 460{
 461	const struct ep93xx_dma_data *data = edmac->chan.private;
 462	u32 control = 0;
 463
 464	if (!data) {
 465		/* This is memcpy channel, nothing to configure */
 466		writel(control, edmac->regs + M2M_CONTROL);
 467		return 0;
 468	}
 469
 470	switch (data->port) {
 471	case EP93XX_DMA_SSP:
 472		/*
 473		 * This was found via experimenting - anything less than 5
 474		 * causes the channel to perform only a partial transfer which
 475		 * leads to problems since we don't get DONE interrupt then.
 476		 */
 477		control = (5 << M2M_CONTROL_PWSC_SHIFT);
 478		control |= M2M_CONTROL_NO_HDSK;
 479
 480		if (data->direction == DMA_MEM_TO_DEV) {
 481			control |= M2M_CONTROL_DAH;
 482			control |= M2M_CONTROL_TM_TX;
 483			control |= M2M_CONTROL_RSS_SSPTX;
 484		} else {
 485			control |= M2M_CONTROL_SAH;
 486			control |= M2M_CONTROL_TM_RX;
 487			control |= M2M_CONTROL_RSS_SSPRX;
 488		}
 489		break;
 490
 491	case EP93XX_DMA_IDE:
 492		/*
 493		 * This IDE part is totally untested. Values below are taken
 494		 * from the EP93xx Users's Guide and might not be correct.
 495		 */
 496		if (data->direction == DMA_MEM_TO_DEV) {
 497			/* Worst case from the UG */
 498			control = (3 << M2M_CONTROL_PWSC_SHIFT);
 499			control |= M2M_CONTROL_DAH;
 500			control |= M2M_CONTROL_TM_TX;
 501		} else {
 502			control = (2 << M2M_CONTROL_PWSC_SHIFT);
 503			control |= M2M_CONTROL_SAH;
 504			control |= M2M_CONTROL_TM_RX;
 505		}
 506
 507		control |= M2M_CONTROL_NO_HDSK;
 508		control |= M2M_CONTROL_RSS_IDE;
 509		control |= M2M_CONTROL_PW_16;
 510		break;
 511
 512	default:
 513		return -EINVAL;
 514	}
 515
 516	writel(control, edmac->regs + M2M_CONTROL);
 517	return 0;
 518}
 519
 520static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
 521{
 522	/* Just disable the channel */
 523	writel(0, edmac->regs + M2M_CONTROL);
 524}
 525
 526static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 527{
 528	struct ep93xx_dma_desc *desc;
 529
 530	desc = ep93xx_dma_get_active(edmac);
 531	if (!desc) {
 532		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
 533		return;
 534	}
 535
 536	if (edmac->buffer == 0) {
 537		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
 538		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
 539		writel(desc->size, edmac->regs + M2M_BCR0);
 540	} else {
 541		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
 542		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
 543		writel(desc->size, edmac->regs + M2M_BCR1);
 544	}
 545
 546	edmac->buffer ^= 1;
 547}
 548
 549static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
 550{
 551	struct ep93xx_dma_data *data = edmac->chan.private;
 552	u32 control = readl(edmac->regs + M2M_CONTROL);
 553
 554	/*
 555	 * Since we allow clients to configure PW (peripheral width) we always
 556	 * clear PW bits here and then set them according what is given in
 557	 * the runtime configuration.
 558	 */
 559	control &= ~M2M_CONTROL_PW_MASK;
 560	control |= edmac->runtime_ctrl;
 561
 562	m2m_fill_desc(edmac);
 563	control |= M2M_CONTROL_DONEINT;
 564
 565	if (ep93xx_dma_advance_active(edmac)) {
 566		m2m_fill_desc(edmac);
 567		control |= M2M_CONTROL_NFBINT;
 568	}
 569
 570	/*
 571	 * Now we can finally enable the channel. For M2M channel this must be
 572	 * done _after_ the BCRx registers are programmed.
 573	 */
 574	control |= M2M_CONTROL_ENABLE;
 575	writel(control, edmac->regs + M2M_CONTROL);
 576
 577	if (!data) {
 578		/*
 579		 * For memcpy channels the software trigger must be asserted
 580		 * in order to start the memcpy operation.
 581		 */
 582		control |= M2M_CONTROL_START;
 583		writel(control, edmac->regs + M2M_CONTROL);
 584	}
 585}
 586
 587/*
 588 * According to EP93xx User's Guide, we should receive DONE interrupt when all
 589 * M2M DMA controller transactions complete normally. This is not always the
 590 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
 591 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
 592 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
 593 * In effect, disabling the channel when only DONE bit is set could stop
 594 * currently running DMA transfer. To avoid this, we use Buffer FSM and
 595 * Control FSM to check current state of DMA channel.
 596 */
 597static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
 598{
 599	u32 status = readl(edmac->regs + M2M_STATUS);
 600	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
 601	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
 602	bool done = status & M2M_STATUS_DONE;
 603	bool last_done;
 604	u32 control;
 605	struct ep93xx_dma_desc *desc;
 606
 607	/* Accept only DONE and NFB interrupts */
 608	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
 609		return INTERRUPT_UNKNOWN;
 610
 611	if (done) {
 612		/* Clear the DONE bit */
 613		writel(0, edmac->regs + M2M_INTERRUPT);
 614	}
 615
 616	/*
 617	 * Check whether we are done with descriptors or not. This, together
 618	 * with DMA channel state, determines action to take in interrupt.
 619	 */
 620	desc = ep93xx_dma_get_active(edmac);
 621	last_done = !desc || desc->txd.cookie;
 622
 623	/*
 624	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
 625	 * DMA channel. Using DONE and NFB bits from channel status register
 626	 * or bits from channel interrupt register is not reliable.
 627	 */
 628	if (!last_done &&
 629	    (buf_fsm == M2M_STATUS_BUF_NO ||
 630	     buf_fsm == M2M_STATUS_BUF_ON)) {
 631		/*
 632		 * Two buffers are ready for update when Buffer FSM is in
 633		 * DMA_NO_BUF state. Only one buffer can be prepared without
 634		 * disabling the channel or polling the DONE bit.
 635		 * To simplify things, always prepare only one buffer.
 636		 */
 637		if (ep93xx_dma_advance_active(edmac)) {
 638			m2m_fill_desc(edmac);
 639			if (done && !edmac->chan.private) {
 640				/* Software trigger for memcpy channel */
 641				control = readl(edmac->regs + M2M_CONTROL);
 642				control |= M2M_CONTROL_START;
 643				writel(control, edmac->regs + M2M_CONTROL);
 644			}
 645			return INTERRUPT_NEXT_BUFFER;
 646		} else {
 647			last_done = true;
 648		}
 649	}
 650
 651	/*
 652	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
 653	 * and Control FSM is in DMA_STALL state.
 654	 */
 655	if (last_done &&
 656	    buf_fsm == M2M_STATUS_BUF_NO &&
 657	    ctl_fsm == M2M_STATUS_CTL_STALL) {
 658		/* Disable interrupts and the channel */
 659		control = readl(edmac->regs + M2M_CONTROL);
 660		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
 661			    | M2M_CONTROL_ENABLE);
 662		writel(control, edmac->regs + M2M_CONTROL);
 663		return INTERRUPT_DONE;
 664	}
 665
 666	/*
 667	 * Nothing to do this time.
 668	 */
 669	return INTERRUPT_NEXT_BUFFER;
 670}
 671
 672/*
 673 * DMA engine API implementation
 674 */
 675
 676static struct ep93xx_dma_desc *
 677ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
 678{
 679	struct ep93xx_dma_desc *desc, *_desc;
 680	struct ep93xx_dma_desc *ret = NULL;
 681	unsigned long flags;
 682
 683	spin_lock_irqsave(&edmac->lock, flags);
 684	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
 685		if (async_tx_test_ack(&desc->txd)) {
 686			list_del_init(&desc->node);
 687
 688			/* Re-initialize the descriptor */
 689			desc->src_addr = 0;
 690			desc->dst_addr = 0;
 691			desc->size = 0;
 692			desc->complete = false;
 693			desc->txd.cookie = 0;
 694			desc->txd.callback = NULL;
 695			desc->txd.callback_param = NULL;
 696
 697			ret = desc;
 698			break;
 699		}
 700	}
 701	spin_unlock_irqrestore(&edmac->lock, flags);
 702	return ret;
 703}
 704
 705static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
 706				struct ep93xx_dma_desc *desc)
 707{
 708	if (desc) {
 709		unsigned long flags;
 710
 711		spin_lock_irqsave(&edmac->lock, flags);
 712		list_splice_init(&desc->tx_list, &edmac->free_list);
 713		list_add(&desc->node, &edmac->free_list);
 714		spin_unlock_irqrestore(&edmac->lock, flags);
 715	}
 716}
 717
 718/**
 719 * ep93xx_dma_advance_work - start processing the next pending transaction
 720 * @edmac: channel
 721 *
 722 * If we have pending transactions queued and we are currently idling, this
 723 * function takes the next queued transaction from the @edmac->queue and
 724 * pushes it to the hardware for execution.
 725 */
 726static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
 727{
 728	struct ep93xx_dma_desc *new;
 729	unsigned long flags;
 730
 731	spin_lock_irqsave(&edmac->lock, flags);
 732	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
 733		spin_unlock_irqrestore(&edmac->lock, flags);
 734		return;
 735	}
 736
 737	/* Take the next descriptor from the pending queue */
 738	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
 739	list_del_init(&new->node);
 740
 741	ep93xx_dma_set_active(edmac, new);
 742
 743	/* Push it to the hardware */
 744	edmac->edma->hw_submit(edmac);
 745	spin_unlock_irqrestore(&edmac->lock, flags);
 746}
 747
 748static void ep93xx_dma_tasklet(struct tasklet_struct *t)
 749{
 750	struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
 751	struct ep93xx_dma_desc *desc, *d;
 752	struct dmaengine_desc_callback cb;
 753	LIST_HEAD(list);
 754
 755	memset(&cb, 0, sizeof(cb));
 756	spin_lock_irq(&edmac->lock);
 757	/*
 758	 * If dma_terminate_all() was called before we get to run, the active
 759	 * list has become empty. If that happens we aren't supposed to do
 760	 * anything more than call ep93xx_dma_advance_work().
 761	 */
 762	desc = ep93xx_dma_get_active(edmac);
 763	if (desc) {
 764		if (desc->complete) {
 765			/* mark descriptor complete for non cyclic case only */
 766			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 767				dma_cookie_complete(&desc->txd);
 768			list_splice_init(&edmac->active, &list);
 769		}
 770		dmaengine_desc_get_callback(&desc->txd, &cb);
 771	}
 772	spin_unlock_irq(&edmac->lock);
 773
 774	/* Pick up the next descriptor from the queue */
 775	ep93xx_dma_advance_work(edmac);
 776
 777	/* Now we can release all the chained descriptors */
 778	list_for_each_entry_safe(desc, d, &list, node) {
 779		dma_descriptor_unmap(&desc->txd);
 780		ep93xx_dma_desc_put(edmac, desc);
 781	}
 782
 783	dmaengine_desc_callback_invoke(&cb, NULL);
 784}
 785
 786static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 787{
 788	struct ep93xx_dma_chan *edmac = dev_id;
 789	struct ep93xx_dma_desc *desc;
 790	irqreturn_t ret = IRQ_HANDLED;
 791
 792	spin_lock(&edmac->lock);
 793
 794	desc = ep93xx_dma_get_active(edmac);
 795	if (!desc) {
 796		dev_warn(chan2dev(edmac),
 797			 "got interrupt while active list is empty\n");
 798		spin_unlock(&edmac->lock);
 799		return IRQ_NONE;
 800	}
 801
 802	switch (edmac->edma->hw_interrupt(edmac)) {
 803	case INTERRUPT_DONE:
 804		desc->complete = true;
 805		tasklet_schedule(&edmac->tasklet);
 806		break;
 807
 808	case INTERRUPT_NEXT_BUFFER:
 809		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 810			tasklet_schedule(&edmac->tasklet);
 811		break;
 812
 813	default:
 814		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
 815		ret = IRQ_NONE;
 816		break;
 817	}
 818
 819	spin_unlock(&edmac->lock);
 820	return ret;
 821}
 822
 823/**
 824 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
 825 * @tx: descriptor to be executed
 826 *
 827 * Function will execute given descriptor on the hardware or if the hardware
 828 * is busy, queue the descriptor to be executed later on. Returns cookie which
 829 * can be used to poll the status of the descriptor.
 830 */
 831static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 832{
 833	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
 834	struct ep93xx_dma_desc *desc;
 835	dma_cookie_t cookie;
 836	unsigned long flags;
 837
 838	spin_lock_irqsave(&edmac->lock, flags);
 839	cookie = dma_cookie_assign(tx);
 840
 841	desc = container_of(tx, struct ep93xx_dma_desc, txd);
 842
 843	/*
 844	 * If nothing is currently prosessed, we push this descriptor
 845	 * directly to the hardware. Otherwise we put the descriptor
 846	 * to the pending queue.
 847	 */
 848	if (list_empty(&edmac->active)) {
 849		ep93xx_dma_set_active(edmac, desc);
 850		edmac->edma->hw_submit(edmac);
 851	} else {
 852		list_add_tail(&desc->node, &edmac->queue);
 853	}
 854
 855	spin_unlock_irqrestore(&edmac->lock, flags);
 856	return cookie;
 857}
 858
 859/**
 860 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
 861 * @chan: channel to allocate resources
 862 *
 863 * Function allocates necessary resources for the given DMA channel and
 864 * returns number of allocated descriptors for the channel. Negative errno
 865 * is returned in case of failure.
 866 */
 867static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
 868{
 869	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 870	struct ep93xx_dma_data *data = chan->private;
 871	const char *name = dma_chan_name(chan);
 872	int ret, i;
 873
 874	/* Sanity check the channel parameters */
 875	if (!edmac->edma->m2m) {
 876		if (!data)
 877			return -EINVAL;
 878		if (data->port < EP93XX_DMA_I2S1 ||
 879		    data->port > EP93XX_DMA_IRDA)
 880			return -EINVAL;
 881		if (data->direction != ep93xx_dma_chan_direction(chan))
 882			return -EINVAL;
 883	} else {
 884		if (data) {
 885			switch (data->port) {
 886			case EP93XX_DMA_SSP:
 887			case EP93XX_DMA_IDE:
 888				if (!is_slave_direction(data->direction))
 889					return -EINVAL;
 890				break;
 891			default:
 892				return -EINVAL;
 893			}
 894		}
 895	}
 896
 897	if (data && data->name)
 898		name = data->name;
 899
 900	ret = clk_prepare_enable(edmac->clk);
 901	if (ret)
 902		return ret;
 903
 904	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
 905	if (ret)
 906		goto fail_clk_disable;
 907
 908	spin_lock_irq(&edmac->lock);
 909	dma_cookie_init(&edmac->chan);
 910	ret = edmac->edma->hw_setup(edmac);
 911	spin_unlock_irq(&edmac->lock);
 912
 913	if (ret)
 914		goto fail_free_irq;
 915
 916	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
 917		struct ep93xx_dma_desc *desc;
 918
 919		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 920		if (!desc) {
 921			dev_warn(chan2dev(edmac), "not enough descriptors\n");
 922			break;
 923		}
 924
 925		INIT_LIST_HEAD(&desc->tx_list);
 926
 927		dma_async_tx_descriptor_init(&desc->txd, chan);
 928		desc->txd.flags = DMA_CTRL_ACK;
 929		desc->txd.tx_submit = ep93xx_dma_tx_submit;
 930
 931		ep93xx_dma_desc_put(edmac, desc);
 932	}
 933
 934	return i;
 935
 936fail_free_irq:
 937	free_irq(edmac->irq, edmac);
 938fail_clk_disable:
 939	clk_disable_unprepare(edmac->clk);
 940
 941	return ret;
 942}
 943
 944/**
 945 * ep93xx_dma_free_chan_resources - release resources for the channel
 946 * @chan: channel
 947 *
 948 * Function releases all the resources allocated for the given channel.
 949 * The channel must be idle when this is called.
 950 */
 951static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
 952{
 953	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 954	struct ep93xx_dma_desc *desc, *d;
 955	unsigned long flags;
 956	LIST_HEAD(list);
 957
 958	BUG_ON(!list_empty(&edmac->active));
 959	BUG_ON(!list_empty(&edmac->queue));
 960
 961	spin_lock_irqsave(&edmac->lock, flags);
 962	edmac->edma->hw_shutdown(edmac);
 963	edmac->runtime_addr = 0;
 964	edmac->runtime_ctrl = 0;
 965	edmac->buffer = 0;
 966	list_splice_init(&edmac->free_list, &list);
 967	spin_unlock_irqrestore(&edmac->lock, flags);
 968
 969	list_for_each_entry_safe(desc, d, &list, node)
 970		kfree(desc);
 971
 972	clk_disable_unprepare(edmac->clk);
 973	free_irq(edmac->irq, edmac);
 974}
 975
 976/**
 977 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
 978 * @chan: channel
 979 * @dest: destination bus address
 980 * @src: source bus address
 981 * @len: size of the transaction
 982 * @flags: flags for the descriptor
 983 *
 984 * Returns a valid DMA descriptor or %NULL in case of failure.
 985 */
 986static struct dma_async_tx_descriptor *
 987ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 988			   dma_addr_t src, size_t len, unsigned long flags)
 989{
 990	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 991	struct ep93xx_dma_desc *desc, *first;
 992	size_t bytes, offset;
 993
 994	first = NULL;
 995	for (offset = 0; offset < len; offset += bytes) {
 996		desc = ep93xx_dma_desc_get(edmac);
 997		if (!desc) {
 998			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
 999			goto fail;
1000		}
1001
1002		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1003
1004		desc->src_addr = src + offset;
1005		desc->dst_addr = dest + offset;
1006		desc->size = bytes;
1007
1008		if (!first)
1009			first = desc;
1010		else
1011			list_add_tail(&desc->node, &first->tx_list);
1012	}
1013
1014	first->txd.cookie = -EBUSY;
1015	first->txd.flags = flags;
1016
1017	return &first->txd;
1018fail:
1019	ep93xx_dma_desc_put(edmac, first);
1020	return NULL;
1021}
1022
1023/**
1024 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1025 * @chan: channel
1026 * @sgl: list of buffers to transfer
1027 * @sg_len: number of entries in @sgl
1028 * @dir: direction of tha DMA transfer
1029 * @flags: flags for the descriptor
1030 * @context: operation context (ignored)
1031 *
1032 * Returns a valid DMA descriptor or %NULL in case of failure.
1033 */
1034static struct dma_async_tx_descriptor *
1035ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1036			 unsigned int sg_len, enum dma_transfer_direction dir,
1037			 unsigned long flags, void *context)
1038{
1039	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1040	struct ep93xx_dma_desc *desc, *first;
1041	struct scatterlist *sg;
1042	int i;
1043
1044	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1045		dev_warn(chan2dev(edmac),
1046			 "channel was configured with different direction\n");
1047		return NULL;
1048	}
1049
1050	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1051		dev_warn(chan2dev(edmac),
1052			 "channel is already used for cyclic transfers\n");
1053		return NULL;
1054	}
1055
1056	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1057
1058	first = NULL;
1059	for_each_sg(sgl, sg, sg_len, i) {
1060		size_t len = sg_dma_len(sg);
1061
1062		if (len > DMA_MAX_CHAN_BYTES) {
1063			dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1064				 len);
1065			goto fail;
1066		}
1067
1068		desc = ep93xx_dma_desc_get(edmac);
1069		if (!desc) {
1070			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1071			goto fail;
1072		}
1073
1074		if (dir == DMA_MEM_TO_DEV) {
1075			desc->src_addr = sg_dma_address(sg);
1076			desc->dst_addr = edmac->runtime_addr;
1077		} else {
1078			desc->src_addr = edmac->runtime_addr;
1079			desc->dst_addr = sg_dma_address(sg);
1080		}
1081		desc->size = len;
1082
1083		if (!first)
1084			first = desc;
1085		else
1086			list_add_tail(&desc->node, &first->tx_list);
1087	}
1088
1089	first->txd.cookie = -EBUSY;
1090	first->txd.flags = flags;
1091
1092	return &first->txd;
1093
1094fail:
1095	ep93xx_dma_desc_put(edmac, first);
1096	return NULL;
1097}
1098
1099/**
1100 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1101 * @chan: channel
1102 * @dma_addr: DMA mapped address of the buffer
1103 * @buf_len: length of the buffer (in bytes)
1104 * @period_len: length of a single period
1105 * @dir: direction of the operation
1106 * @flags: tx descriptor status flags
1107 *
1108 * Prepares a descriptor for cyclic DMA operation. This means that once the
1109 * descriptor is submitted, we will be submitting in a @period_len sized
1110 * buffers and calling callback once the period has been elapsed. Transfer
1111 * terminates only when client calls dmaengine_terminate_all() for this
1112 * channel.
1113 *
1114 * Returns a valid DMA descriptor or %NULL in case of failure.
1115 */
1116static struct dma_async_tx_descriptor *
1117ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1118			   size_t buf_len, size_t period_len,
1119			   enum dma_transfer_direction dir, unsigned long flags)
1120{
1121	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1122	struct ep93xx_dma_desc *desc, *first;
1123	size_t offset = 0;
1124
1125	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1126		dev_warn(chan2dev(edmac),
1127			 "channel was configured with different direction\n");
1128		return NULL;
1129	}
1130
1131	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1132		dev_warn(chan2dev(edmac),
1133			 "channel is already used for cyclic transfers\n");
1134		return NULL;
1135	}
1136
1137	if (period_len > DMA_MAX_CHAN_BYTES) {
1138		dev_warn(chan2dev(edmac), "too big period length %zu\n",
1139			 period_len);
1140		return NULL;
1141	}
1142
1143	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1144
1145	/* Split the buffer into period size chunks */
1146	first = NULL;
1147	for (offset = 0; offset < buf_len; offset += period_len) {
1148		desc = ep93xx_dma_desc_get(edmac);
1149		if (!desc) {
1150			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1151			goto fail;
1152		}
1153
1154		if (dir == DMA_MEM_TO_DEV) {
1155			desc->src_addr = dma_addr + offset;
1156			desc->dst_addr = edmac->runtime_addr;
1157		} else {
1158			desc->src_addr = edmac->runtime_addr;
1159			desc->dst_addr = dma_addr + offset;
1160		}
1161
1162		desc->size = period_len;
1163
1164		if (!first)
1165			first = desc;
1166		else
1167			list_add_tail(&desc->node, &first->tx_list);
1168	}
1169
1170	first->txd.cookie = -EBUSY;
1171
1172	return &first->txd;
1173
1174fail:
1175	ep93xx_dma_desc_put(edmac, first);
1176	return NULL;
1177}
1178
1179/**
1180 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1181 * current context.
1182 * @chan: channel
1183 *
1184 * Synchronizes the DMA channel termination to the current context. When this
1185 * function returns it is guaranteed that all transfers for previously issued
1186 * descriptors have stopped and it is safe to free the memory associated
1187 * with them. Furthermore it is guaranteed that all complete callback functions
1188 * for a previously submitted descriptor have finished running and it is safe to
1189 * free resources accessed from within the complete callbacks.
1190 */
1191static void ep93xx_dma_synchronize(struct dma_chan *chan)
1192{
1193	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1194
1195	if (edmac->edma->hw_synchronize)
1196		edmac->edma->hw_synchronize(edmac);
1197}
1198
1199/**
1200 * ep93xx_dma_terminate_all - terminate all transactions
1201 * @chan: channel
1202 *
1203 * Stops all DMA transactions. All descriptors are put back to the
1204 * @edmac->free_list and callbacks are _not_ called.
1205 */
1206static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1207{
1208	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1209	struct ep93xx_dma_desc *desc, *_d;
1210	unsigned long flags;
1211	LIST_HEAD(list);
1212
1213	spin_lock_irqsave(&edmac->lock, flags);
1214	/* First we disable and flush the DMA channel */
1215	edmac->edma->hw_shutdown(edmac);
1216	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1217	list_splice_init(&edmac->active, &list);
1218	list_splice_init(&edmac->queue, &list);
1219	/*
1220	 * We then re-enable the channel. This way we can continue submitting
1221	 * the descriptors by just calling ->hw_submit() again.
1222	 */
1223	edmac->edma->hw_setup(edmac);
1224	spin_unlock_irqrestore(&edmac->lock, flags);
1225
1226	list_for_each_entry_safe(desc, _d, &list, node)
1227		ep93xx_dma_desc_put(edmac, desc);
1228
1229	return 0;
1230}
1231
1232static int ep93xx_dma_slave_config(struct dma_chan *chan,
1233				   struct dma_slave_config *config)
1234{
1235	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1236
1237	memcpy(&edmac->slave_config, config, sizeof(*config));
1238
1239	return 0;
1240}
1241
1242static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
1243					 enum dma_transfer_direction dir,
1244					 struct dma_slave_config *config)
1245{
1246	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1247	enum dma_slave_buswidth width;
1248	unsigned long flags;
1249	u32 addr, ctrl;
1250
1251	if (!edmac->edma->m2m)
1252		return -EINVAL;
1253
1254	switch (dir) {
1255	case DMA_DEV_TO_MEM:
1256		width = config->src_addr_width;
1257		addr = config->src_addr;
1258		break;
1259
1260	case DMA_MEM_TO_DEV:
1261		width = config->dst_addr_width;
1262		addr = config->dst_addr;
1263		break;
1264
1265	default:
1266		return -EINVAL;
1267	}
1268
1269	switch (width) {
1270	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1271		ctrl = 0;
1272		break;
1273	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1274		ctrl = M2M_CONTROL_PW_16;
1275		break;
1276	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1277		ctrl = M2M_CONTROL_PW_32;
1278		break;
1279	default:
1280		return -EINVAL;
1281	}
1282
1283	spin_lock_irqsave(&edmac->lock, flags);
1284	edmac->runtime_addr = addr;
1285	edmac->runtime_ctrl = ctrl;
1286	spin_unlock_irqrestore(&edmac->lock, flags);
1287
1288	return 0;
1289}
1290
1291/**
1292 * ep93xx_dma_tx_status - check if a transaction is completed
1293 * @chan: channel
1294 * @cookie: transaction specific cookie
1295 * @state: state of the transaction is stored here if given
1296 *
1297 * This function can be used to query state of a given transaction.
1298 */
1299static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1300					    dma_cookie_t cookie,
1301					    struct dma_tx_state *state)
1302{
1303	return dma_cookie_status(chan, cookie, state);
1304}
1305
1306/**
1307 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1308 * @chan: channel
1309 *
1310 * When this function is called, all pending transactions are pushed to the
1311 * hardware and executed.
1312 */
1313static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1314{
1315	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1316}
1317
1318static int __init ep93xx_dma_probe(struct platform_device *pdev)
1319{
1320	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1321	struct ep93xx_dma_engine *edma;
1322	struct dma_device *dma_dev;
 
1323	int ret, i;
1324
1325	edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL);
 
1326	if (!edma)
1327		return -ENOMEM;
1328
1329	dma_dev = &edma->dma_dev;
1330	edma->m2m = platform_get_device_id(pdev)->driver_data;
1331	edma->num_channels = pdata->num_channels;
1332
1333	INIT_LIST_HEAD(&dma_dev->channels);
1334	for (i = 0; i < pdata->num_channels; i++) {
1335		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1336		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1337
1338		edmac->chan.device = dma_dev;
1339		edmac->regs = cdata->base;
1340		edmac->irq = cdata->irq;
1341		edmac->edma = edma;
1342
1343		edmac->clk = clk_get(NULL, cdata->name);
1344		if (IS_ERR(edmac->clk)) {
1345			dev_warn(&pdev->dev, "failed to get clock for %s\n",
1346				 cdata->name);
1347			continue;
1348		}
1349
1350		spin_lock_init(&edmac->lock);
1351		INIT_LIST_HEAD(&edmac->active);
1352		INIT_LIST_HEAD(&edmac->queue);
1353		INIT_LIST_HEAD(&edmac->free_list);
1354		tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
 
1355
1356		list_add_tail(&edmac->chan.device_node,
1357			      &dma_dev->channels);
1358	}
1359
1360	dma_cap_zero(dma_dev->cap_mask);
1361	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1362	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1363
1364	dma_dev->dev = &pdev->dev;
1365	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1366	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1367	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1368	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1369	dma_dev->device_config = ep93xx_dma_slave_config;
1370	dma_dev->device_synchronize = ep93xx_dma_synchronize;
1371	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1372	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1373	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1374
1375	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1376
1377	if (edma->m2m) {
1378		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1379		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1380
1381		edma->hw_setup = m2m_hw_setup;
1382		edma->hw_shutdown = m2m_hw_shutdown;
1383		edma->hw_submit = m2m_hw_submit;
1384		edma->hw_interrupt = m2m_hw_interrupt;
1385	} else {
1386		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1387
1388		edma->hw_synchronize = m2p_hw_synchronize;
1389		edma->hw_setup = m2p_hw_setup;
1390		edma->hw_shutdown = m2p_hw_shutdown;
1391		edma->hw_submit = m2p_hw_submit;
1392		edma->hw_interrupt = m2p_hw_interrupt;
1393	}
1394
1395	ret = dma_async_device_register(dma_dev);
1396	if (unlikely(ret)) {
1397		for (i = 0; i < edma->num_channels; i++) {
1398			struct ep93xx_dma_chan *edmac = &edma->channels[i];
1399			if (!IS_ERR_OR_NULL(edmac->clk))
1400				clk_put(edmac->clk);
1401		}
1402		kfree(edma);
1403	} else {
1404		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1405			 edma->m2m ? "M" : "P");
1406	}
1407
1408	return ret;
1409}
1410
1411static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1412	{ "ep93xx-dma-m2p", 0 },
1413	{ "ep93xx-dma-m2m", 1 },
1414	{ },
1415};
1416
1417static struct platform_driver ep93xx_dma_driver = {
1418	.driver		= {
1419		.name	= "ep93xx-dma",
1420	},
1421	.id_table	= ep93xx_dma_driver_ids,
1422};
1423
1424static int __init ep93xx_dma_module_init(void)
1425{
1426	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1427}
1428subsys_initcall(ep93xx_dma_module_init);
1429
1430MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1431MODULE_DESCRIPTION("EP93xx DMA driver");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for the Cirrus Logic EP93xx DMA Controller
   4 *
   5 * Copyright (C) 2011 Mika Westerberg
   6 *
   7 * DMA M2P implementation is based on the original
   8 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
   9 *
  10 *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  11 *   Copyright (C) 2006 Applied Data Systems
  12 *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
  13 *
  14 * This driver is based on dw_dmac and amba-pl08x drivers.
  15 */
  16
  17#include <linux/clk.h>
  18#include <linux/init.h>
  19#include <linux/interrupt.h>
  20#include <linux/dmaengine.h>
  21#include <linux/module.h>
  22#include <linux/mod_devicetable.h>
  23#include <linux/platform_device.h>
  24#include <linux/slab.h>
  25
  26#include <linux/platform_data/dma-ep93xx.h>
  27
  28#include "dmaengine.h"
  29
  30/* M2P registers */
  31#define M2P_CONTROL			0x0000
  32#define M2P_CONTROL_STALLINT		BIT(0)
  33#define M2P_CONTROL_NFBINT		BIT(1)
  34#define M2P_CONTROL_CH_ERROR_INT	BIT(3)
  35#define M2P_CONTROL_ENABLE		BIT(4)
  36#define M2P_CONTROL_ICE			BIT(6)
  37
  38#define M2P_INTERRUPT			0x0004
  39#define M2P_INTERRUPT_STALL		BIT(0)
  40#define M2P_INTERRUPT_NFB		BIT(1)
  41#define M2P_INTERRUPT_ERROR		BIT(3)
  42
  43#define M2P_PPALLOC			0x0008
  44#define M2P_STATUS			0x000c
  45
  46#define M2P_MAXCNT0			0x0020
  47#define M2P_BASE0			0x0024
  48#define M2P_MAXCNT1			0x0030
  49#define M2P_BASE1			0x0034
  50
  51#define M2P_STATE_IDLE			0
  52#define M2P_STATE_STALL			1
  53#define M2P_STATE_ON			2
  54#define M2P_STATE_NEXT			3
  55
  56/* M2M registers */
  57#define M2M_CONTROL			0x0000
  58#define M2M_CONTROL_DONEINT		BIT(2)
  59#define M2M_CONTROL_ENABLE		BIT(3)
  60#define M2M_CONTROL_START		BIT(4)
  61#define M2M_CONTROL_DAH			BIT(11)
  62#define M2M_CONTROL_SAH			BIT(12)
  63#define M2M_CONTROL_PW_SHIFT		9
  64#define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
  65#define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
  66#define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
  67#define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
  68#define M2M_CONTROL_TM_SHIFT		13
  69#define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
  70#define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
  71#define M2M_CONTROL_NFBINT		BIT(21)
  72#define M2M_CONTROL_RSS_SHIFT		22
  73#define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
  74#define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
  75#define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
  76#define M2M_CONTROL_NO_HDSK		BIT(24)
  77#define M2M_CONTROL_PWSC_SHIFT		25
  78
  79#define M2M_INTERRUPT			0x0004
  80#define M2M_INTERRUPT_MASK		6
  81
  82#define M2M_STATUS			0x000c
  83#define M2M_STATUS_CTL_SHIFT		1
  84#define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
  85#define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
  86#define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
  87#define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
  88#define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
  89#define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
  90#define M2M_STATUS_BUF_SHIFT		4
  91#define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
  92#define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
  93#define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
  94#define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
  95#define M2M_STATUS_DONE			BIT(6)
  96
  97#define M2M_BCR0			0x0010
  98#define M2M_BCR1			0x0014
  99#define M2M_SAR_BASE0			0x0018
 100#define M2M_SAR_BASE1			0x001c
 101#define M2M_DAR_BASE0			0x002c
 102#define M2M_DAR_BASE1			0x0030
 103
 104#define DMA_MAX_CHAN_BYTES		0xffff
 105#define DMA_MAX_CHAN_DESCRIPTORS	32
 106
 107struct ep93xx_dma_engine;
 108static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
 109					 enum dma_transfer_direction dir,
 110					 struct dma_slave_config *config);
 111
 112/**
 113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
 114 * @src_addr: source address of the transaction
 115 * @dst_addr: destination address of the transaction
 116 * @size: size of the transaction (in bytes)
 117 * @complete: this descriptor is completed
 118 * @txd: dmaengine API descriptor
 119 * @tx_list: list of linked descriptors
 120 * @node: link used for putting this into a channel queue
 121 */
 122struct ep93xx_dma_desc {
 123	u32				src_addr;
 124	u32				dst_addr;
 125	size_t				size;
 126	bool				complete;
 127	struct dma_async_tx_descriptor	txd;
 128	struct list_head		tx_list;
 129	struct list_head		node;
 130};
 131
 132/**
 133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
 134 * @chan: dmaengine API channel
 135 * @edma: pointer to to the engine device
 136 * @regs: memory mapped registers
 137 * @irq: interrupt number of the channel
 138 * @clk: clock used by this channel
 139 * @tasklet: channel specific tasklet used for callbacks
 140 * @lock: lock protecting the fields following
 141 * @flags: flags for the channel
 142 * @buffer: which buffer to use next (0/1)
 143 * @active: flattened chain of descriptors currently being processed
 144 * @queue: pending descriptors which are handled next
 145 * @free_list: list of free descriptors which can be used
 146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
 147 *                is set via .device_config before slave operation is
 148 *                prepared
 149 * @runtime_ctrl: M2M runtime values for the control register.
 
 150 *
 151 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
 152 * will have slightly different scheme here: @active points to a head of
 153 * flattened DMA descriptor chain.
 154 *
 155 * @queue holds pending transactions. These are linked through the first
 156 * descriptor in the chain. When a descriptor is moved to the @active queue,
 157 * the first and chained descriptors are flattened into a single list.
 158 *
 159 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
 160 * necessary channel configuration information. For memcpy channels this must
 161 * be %NULL.
 162 */
 163struct ep93xx_dma_chan {
 164	struct dma_chan			chan;
 165	const struct ep93xx_dma_engine	*edma;
 166	void __iomem			*regs;
 167	int				irq;
 168	struct clk			*clk;
 169	struct tasklet_struct		tasklet;
 170	/* protects the fields following */
 171	spinlock_t			lock;
 172	unsigned long			flags;
 173/* Channel is configured for cyclic transfers */
 174#define EP93XX_DMA_IS_CYCLIC		0
 175
 176	int				buffer;
 177	struct list_head		active;
 178	struct list_head		queue;
 179	struct list_head		free_list;
 180	u32				runtime_addr;
 181	u32				runtime_ctrl;
 182	struct dma_slave_config		slave_config;
 183};
 184
 185/**
 186 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
 187 * @dma_dev: holds the dmaengine device
 188 * @m2m: is this an M2M or M2P device
 189 * @hw_setup: method which sets the channel up for operation
 
 190 * @hw_shutdown: shuts the channel down and flushes whatever is left
 191 * @hw_submit: pushes active descriptor(s) to the hardware
 192 * @hw_interrupt: handle the interrupt
 193 * @num_channels: number of channels for this instance
 194 * @channels: array of channels
 195 *
 196 * There is one instance of this struct for the M2P channels and one for the
 197 * M2M channels. hw_xxx() methods are used to perform operations which are
 198 * different on M2M and M2P channels. These methods are called with channel
 199 * lock held and interrupts disabled so they cannot sleep.
 200 */
 201struct ep93xx_dma_engine {
 202	struct dma_device	dma_dev;
 203	bool			m2m;
 204	int			(*hw_setup)(struct ep93xx_dma_chan *);
 205	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
 206	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
 207	void			(*hw_submit)(struct ep93xx_dma_chan *);
 208	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
 209#define INTERRUPT_UNKNOWN	0
 210#define INTERRUPT_DONE		1
 211#define INTERRUPT_NEXT_BUFFER	2
 212
 213	size_t			num_channels;
 214	struct ep93xx_dma_chan	channels[];
 215};
 216
 217static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
 218{
 219	return &edmac->chan.dev->device;
 220}
 221
 222static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
 223{
 224	return container_of(chan, struct ep93xx_dma_chan, chan);
 225}
 226
 227/**
 228 * ep93xx_dma_set_active - set new active descriptor chain
 229 * @edmac: channel
 230 * @desc: head of the new active descriptor chain
 231 *
 232 * Sets @desc to be the head of the new active descriptor chain. This is the
 233 * chain which is processed next. The active list must be empty before calling
 234 * this function.
 235 *
 236 * Called with @edmac->lock held and interrupts disabled.
 237 */
 238static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
 239				  struct ep93xx_dma_desc *desc)
 240{
 241	BUG_ON(!list_empty(&edmac->active));
 242
 243	list_add_tail(&desc->node, &edmac->active);
 244
 245	/* Flatten the @desc->tx_list chain into @edmac->active list */
 246	while (!list_empty(&desc->tx_list)) {
 247		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
 248			struct ep93xx_dma_desc, node);
 249
 250		/*
 251		 * We copy the callback parameters from the first descriptor
 252		 * to all the chained descriptors. This way we can call the
 253		 * callback without having to find out the first descriptor in
 254		 * the chain. Useful for cyclic transfers.
 255		 */
 256		d->txd.callback = desc->txd.callback;
 257		d->txd.callback_param = desc->txd.callback_param;
 258
 259		list_move_tail(&d->node, &edmac->active);
 260	}
 261}
 262
 263/* Called with @edmac->lock held and interrupts disabled */
 264static struct ep93xx_dma_desc *
 265ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 266{
 267	return list_first_entry_or_null(&edmac->active,
 268					struct ep93xx_dma_desc, node);
 269}
 270
 271/**
 272 * ep93xx_dma_advance_active - advances to the next active descriptor
 273 * @edmac: channel
 274 *
 275 * Function advances active descriptor to the next in the @edmac->active and
 276 * returns %true if we still have descriptors in the chain to process.
 277 * Otherwise returns %false.
 278 *
 279 * When the channel is in cyclic mode always returns %true.
 280 *
 281 * Called with @edmac->lock held and interrupts disabled.
 282 */
 283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 284{
 285	struct ep93xx_dma_desc *desc;
 286
 287	list_rotate_left(&edmac->active);
 288
 289	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 290		return true;
 291
 292	desc = ep93xx_dma_get_active(edmac);
 293	if (!desc)
 294		return false;
 295
 296	/*
 297	 * If txd.cookie is set it means that we are back in the first
 298	 * descriptor in the chain and hence done with it.
 299	 */
 300	return !desc->txd.cookie;
 301}
 302
 303/*
 304 * M2P DMA implementation
 305 */
 306
 307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
 308{
 309	writel(control, edmac->regs + M2P_CONTROL);
 310	/*
 311	 * EP93xx User's Guide states that we must perform a dummy read after
 312	 * write to the control register.
 313	 */
 314	readl(edmac->regs + M2P_CONTROL);
 315}
 316
 317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
 318{
 319	struct ep93xx_dma_data *data = edmac->chan.private;
 320	u32 control;
 321
 322	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
 323
 324	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
 325		| M2P_CONTROL_ENABLE;
 326	m2p_set_control(edmac, control);
 327
 328	edmac->buffer = 0;
 329
 330	return 0;
 331}
 332
 333static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
 334{
 335	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 336}
 337
 338static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 339{
 340	unsigned long flags;
 341	u32 control;
 342
 343	spin_lock_irqsave(&edmac->lock, flags);
 344	control = readl(edmac->regs + M2P_CONTROL);
 345	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 346	m2p_set_control(edmac, control);
 347	spin_unlock_irqrestore(&edmac->lock, flags);
 348
 349	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
 350		schedule();
 351}
 352
 353static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
 354{
 355	m2p_set_control(edmac, 0);
 356
 357	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
 358		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 359}
 360
 361static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 362{
 363	struct ep93xx_dma_desc *desc;
 364	u32 bus_addr;
 365
 366	desc = ep93xx_dma_get_active(edmac);
 367	if (!desc) {
 368		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
 369		return;
 370	}
 371
 372	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
 373		bus_addr = desc->src_addr;
 374	else
 375		bus_addr = desc->dst_addr;
 376
 377	if (edmac->buffer == 0) {
 378		writel(desc->size, edmac->regs + M2P_MAXCNT0);
 379		writel(bus_addr, edmac->regs + M2P_BASE0);
 380	} else {
 381		writel(desc->size, edmac->regs + M2P_MAXCNT1);
 382		writel(bus_addr, edmac->regs + M2P_BASE1);
 383	}
 384
 385	edmac->buffer ^= 1;
 386}
 387
 388static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
 389{
 390	u32 control = readl(edmac->regs + M2P_CONTROL);
 391
 392	m2p_fill_desc(edmac);
 393	control |= M2P_CONTROL_STALLINT;
 394
 395	if (ep93xx_dma_advance_active(edmac)) {
 396		m2p_fill_desc(edmac);
 397		control |= M2P_CONTROL_NFBINT;
 398	}
 399
 400	m2p_set_control(edmac, control);
 401}
 402
 403static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
 404{
 405	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
 406	u32 control;
 407
 408	if (irq_status & M2P_INTERRUPT_ERROR) {
 409		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
 410
 411		/* Clear the error interrupt */
 412		writel(1, edmac->regs + M2P_INTERRUPT);
 413
 414		/*
 415		 * It seems that there is no easy way of reporting errors back
 416		 * to client so we just report the error here and continue as
 417		 * usual.
 418		 *
 419		 * Revisit this when there is a mechanism to report back the
 420		 * errors.
 421		 */
 422		dev_err(chan2dev(edmac),
 423			"DMA transfer failed! Details:\n"
 424			"\tcookie	: %d\n"
 425			"\tsrc_addr	: 0x%08x\n"
 426			"\tdst_addr	: 0x%08x\n"
 427			"\tsize		: %zu\n",
 428			desc->txd.cookie, desc->src_addr, desc->dst_addr,
 429			desc->size);
 430	}
 431
 432	/*
 433	 * Even latest E2 silicon revision sometimes assert STALL interrupt
 434	 * instead of NFB. Therefore we treat them equally, basing on the
 435	 * amount of data we still have to transfer.
 436	 */
 437	if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
 438		return INTERRUPT_UNKNOWN;
 439
 440	if (ep93xx_dma_advance_active(edmac)) {
 441		m2p_fill_desc(edmac);
 442		return INTERRUPT_NEXT_BUFFER;
 443	}
 444
 445	/* Disable interrupts */
 446	control = readl(edmac->regs + M2P_CONTROL);
 447	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 448	m2p_set_control(edmac, control);
 449
 450	return INTERRUPT_DONE;
 451}
 452
 453/*
 454 * M2M DMA implementation
 455 */
 456
 457static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
 458{
 459	const struct ep93xx_dma_data *data = edmac->chan.private;
 460	u32 control = 0;
 461
 462	if (!data) {
 463		/* This is memcpy channel, nothing to configure */
 464		writel(control, edmac->regs + M2M_CONTROL);
 465		return 0;
 466	}
 467
 468	switch (data->port) {
 469	case EP93XX_DMA_SSP:
 470		/*
 471		 * This was found via experimenting - anything less than 5
 472		 * causes the channel to perform only a partial transfer which
 473		 * leads to problems since we don't get DONE interrupt then.
 474		 */
 475		control = (5 << M2M_CONTROL_PWSC_SHIFT);
 476		control |= M2M_CONTROL_NO_HDSK;
 477
 478		if (data->direction == DMA_MEM_TO_DEV) {
 479			control |= M2M_CONTROL_DAH;
 480			control |= M2M_CONTROL_TM_TX;
 481			control |= M2M_CONTROL_RSS_SSPTX;
 482		} else {
 483			control |= M2M_CONTROL_SAH;
 484			control |= M2M_CONTROL_TM_RX;
 485			control |= M2M_CONTROL_RSS_SSPRX;
 486		}
 487		break;
 488
 489	case EP93XX_DMA_IDE:
 490		/*
 491		 * This IDE part is totally untested. Values below are taken
 492		 * from the EP93xx Users's Guide and might not be correct.
 493		 */
 494		if (data->direction == DMA_MEM_TO_DEV) {
 495			/* Worst case from the UG */
 496			control = (3 << M2M_CONTROL_PWSC_SHIFT);
 497			control |= M2M_CONTROL_DAH;
 498			control |= M2M_CONTROL_TM_TX;
 499		} else {
 500			control = (2 << M2M_CONTROL_PWSC_SHIFT);
 501			control |= M2M_CONTROL_SAH;
 502			control |= M2M_CONTROL_TM_RX;
 503		}
 504
 505		control |= M2M_CONTROL_NO_HDSK;
 506		control |= M2M_CONTROL_RSS_IDE;
 507		control |= M2M_CONTROL_PW_16;
 508		break;
 509
 510	default:
 511		return -EINVAL;
 512	}
 513
 514	writel(control, edmac->regs + M2M_CONTROL);
 515	return 0;
 516}
 517
 518static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
 519{
 520	/* Just disable the channel */
 521	writel(0, edmac->regs + M2M_CONTROL);
 522}
 523
 524static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 525{
 526	struct ep93xx_dma_desc *desc;
 527
 528	desc = ep93xx_dma_get_active(edmac);
 529	if (!desc) {
 530		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
 531		return;
 532	}
 533
 534	if (edmac->buffer == 0) {
 535		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
 536		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
 537		writel(desc->size, edmac->regs + M2M_BCR0);
 538	} else {
 539		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
 540		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
 541		writel(desc->size, edmac->regs + M2M_BCR1);
 542	}
 543
 544	edmac->buffer ^= 1;
 545}
 546
 547static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
 548{
 549	struct ep93xx_dma_data *data = edmac->chan.private;
 550	u32 control = readl(edmac->regs + M2M_CONTROL);
 551
 552	/*
 553	 * Since we allow clients to configure PW (peripheral width) we always
 554	 * clear PW bits here and then set them according what is given in
 555	 * the runtime configuration.
 556	 */
 557	control &= ~M2M_CONTROL_PW_MASK;
 558	control |= edmac->runtime_ctrl;
 559
 560	m2m_fill_desc(edmac);
 561	control |= M2M_CONTROL_DONEINT;
 562
 563	if (ep93xx_dma_advance_active(edmac)) {
 564		m2m_fill_desc(edmac);
 565		control |= M2M_CONTROL_NFBINT;
 566	}
 567
 568	/*
 569	 * Now we can finally enable the channel. For M2M channel this must be
 570	 * done _after_ the BCRx registers are programmed.
 571	 */
 572	control |= M2M_CONTROL_ENABLE;
 573	writel(control, edmac->regs + M2M_CONTROL);
 574
 575	if (!data) {
 576		/*
 577		 * For memcpy channels the software trigger must be asserted
 578		 * in order to start the memcpy operation.
 579		 */
 580		control |= M2M_CONTROL_START;
 581		writel(control, edmac->regs + M2M_CONTROL);
 582	}
 583}
 584
 585/*
 586 * According to EP93xx User's Guide, we should receive DONE interrupt when all
 587 * M2M DMA controller transactions complete normally. This is not always the
 588 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
 589 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
 590 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
 591 * In effect, disabling the channel when only DONE bit is set could stop
 592 * currently running DMA transfer. To avoid this, we use Buffer FSM and
 593 * Control FSM to check current state of DMA channel.
 594 */
 595static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
 596{
 597	u32 status = readl(edmac->regs + M2M_STATUS);
 598	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
 599	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
 600	bool done = status & M2M_STATUS_DONE;
 601	bool last_done;
 602	u32 control;
 603	struct ep93xx_dma_desc *desc;
 604
 605	/* Accept only DONE and NFB interrupts */
 606	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
 607		return INTERRUPT_UNKNOWN;
 608
 609	if (done) {
 610		/* Clear the DONE bit */
 611		writel(0, edmac->regs + M2M_INTERRUPT);
 612	}
 613
 614	/*
 615	 * Check whether we are done with descriptors or not. This, together
 616	 * with DMA channel state, determines action to take in interrupt.
 617	 */
 618	desc = ep93xx_dma_get_active(edmac);
 619	last_done = !desc || desc->txd.cookie;
 620
 621	/*
 622	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
 623	 * DMA channel. Using DONE and NFB bits from channel status register
 624	 * or bits from channel interrupt register is not reliable.
 625	 */
 626	if (!last_done &&
 627	    (buf_fsm == M2M_STATUS_BUF_NO ||
 628	     buf_fsm == M2M_STATUS_BUF_ON)) {
 629		/*
 630		 * Two buffers are ready for update when Buffer FSM is in
 631		 * DMA_NO_BUF state. Only one buffer can be prepared without
 632		 * disabling the channel or polling the DONE bit.
 633		 * To simplify things, always prepare only one buffer.
 634		 */
 635		if (ep93xx_dma_advance_active(edmac)) {
 636			m2m_fill_desc(edmac);
 637			if (done && !edmac->chan.private) {
 638				/* Software trigger for memcpy channel */
 639				control = readl(edmac->regs + M2M_CONTROL);
 640				control |= M2M_CONTROL_START;
 641				writel(control, edmac->regs + M2M_CONTROL);
 642			}
 643			return INTERRUPT_NEXT_BUFFER;
 644		} else {
 645			last_done = true;
 646		}
 647	}
 648
 649	/*
 650	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
 651	 * and Control FSM is in DMA_STALL state.
 652	 */
 653	if (last_done &&
 654	    buf_fsm == M2M_STATUS_BUF_NO &&
 655	    ctl_fsm == M2M_STATUS_CTL_STALL) {
 656		/* Disable interrupts and the channel */
 657		control = readl(edmac->regs + M2M_CONTROL);
 658		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
 659			    | M2M_CONTROL_ENABLE);
 660		writel(control, edmac->regs + M2M_CONTROL);
 661		return INTERRUPT_DONE;
 662	}
 663
 664	/*
 665	 * Nothing to do this time.
 666	 */
 667	return INTERRUPT_NEXT_BUFFER;
 668}
 669
 670/*
 671 * DMA engine API implementation
 672 */
 673
 674static struct ep93xx_dma_desc *
 675ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
 676{
 677	struct ep93xx_dma_desc *desc, *_desc;
 678	struct ep93xx_dma_desc *ret = NULL;
 679	unsigned long flags;
 680
 681	spin_lock_irqsave(&edmac->lock, flags);
 682	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
 683		if (async_tx_test_ack(&desc->txd)) {
 684			list_del_init(&desc->node);
 685
 686			/* Re-initialize the descriptor */
 687			desc->src_addr = 0;
 688			desc->dst_addr = 0;
 689			desc->size = 0;
 690			desc->complete = false;
 691			desc->txd.cookie = 0;
 692			desc->txd.callback = NULL;
 693			desc->txd.callback_param = NULL;
 694
 695			ret = desc;
 696			break;
 697		}
 698	}
 699	spin_unlock_irqrestore(&edmac->lock, flags);
 700	return ret;
 701}
 702
 703static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
 704				struct ep93xx_dma_desc *desc)
 705{
 706	if (desc) {
 707		unsigned long flags;
 708
 709		spin_lock_irqsave(&edmac->lock, flags);
 710		list_splice_init(&desc->tx_list, &edmac->free_list);
 711		list_add(&desc->node, &edmac->free_list);
 712		spin_unlock_irqrestore(&edmac->lock, flags);
 713	}
 714}
 715
 716/**
 717 * ep93xx_dma_advance_work - start processing the next pending transaction
 718 * @edmac: channel
 719 *
 720 * If we have pending transactions queued and we are currently idling, this
 721 * function takes the next queued transaction from the @edmac->queue and
 722 * pushes it to the hardware for execution.
 723 */
 724static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
 725{
 726	struct ep93xx_dma_desc *new;
 727	unsigned long flags;
 728
 729	spin_lock_irqsave(&edmac->lock, flags);
 730	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
 731		spin_unlock_irqrestore(&edmac->lock, flags);
 732		return;
 733	}
 734
 735	/* Take the next descriptor from the pending queue */
 736	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
 737	list_del_init(&new->node);
 738
 739	ep93xx_dma_set_active(edmac, new);
 740
 741	/* Push it to the hardware */
 742	edmac->edma->hw_submit(edmac);
 743	spin_unlock_irqrestore(&edmac->lock, flags);
 744}
 745
 746static void ep93xx_dma_tasklet(unsigned long data)
 747{
 748	struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
 749	struct ep93xx_dma_desc *desc, *d;
 750	struct dmaengine_desc_callback cb;
 751	LIST_HEAD(list);
 752
 753	memset(&cb, 0, sizeof(cb));
 754	spin_lock_irq(&edmac->lock);
 755	/*
 756	 * If dma_terminate_all() was called before we get to run, the active
 757	 * list has become empty. If that happens we aren't supposed to do
 758	 * anything more than call ep93xx_dma_advance_work().
 759	 */
 760	desc = ep93xx_dma_get_active(edmac);
 761	if (desc) {
 762		if (desc->complete) {
 763			/* mark descriptor complete for non cyclic case only */
 764			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 765				dma_cookie_complete(&desc->txd);
 766			list_splice_init(&edmac->active, &list);
 767		}
 768		dmaengine_desc_get_callback(&desc->txd, &cb);
 769	}
 770	spin_unlock_irq(&edmac->lock);
 771
 772	/* Pick up the next descriptor from the queue */
 773	ep93xx_dma_advance_work(edmac);
 774
 775	/* Now we can release all the chained descriptors */
 776	list_for_each_entry_safe(desc, d, &list, node) {
 777		dma_descriptor_unmap(&desc->txd);
 778		ep93xx_dma_desc_put(edmac, desc);
 779	}
 780
 781	dmaengine_desc_callback_invoke(&cb, NULL);
 782}
 783
 784static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 785{
 786	struct ep93xx_dma_chan *edmac = dev_id;
 787	struct ep93xx_dma_desc *desc;
 788	irqreturn_t ret = IRQ_HANDLED;
 789
 790	spin_lock(&edmac->lock);
 791
 792	desc = ep93xx_dma_get_active(edmac);
 793	if (!desc) {
 794		dev_warn(chan2dev(edmac),
 795			 "got interrupt while active list is empty\n");
 796		spin_unlock(&edmac->lock);
 797		return IRQ_NONE;
 798	}
 799
 800	switch (edmac->edma->hw_interrupt(edmac)) {
 801	case INTERRUPT_DONE:
 802		desc->complete = true;
 803		tasklet_schedule(&edmac->tasklet);
 804		break;
 805
 806	case INTERRUPT_NEXT_BUFFER:
 807		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 808			tasklet_schedule(&edmac->tasklet);
 809		break;
 810
 811	default:
 812		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
 813		ret = IRQ_NONE;
 814		break;
 815	}
 816
 817	spin_unlock(&edmac->lock);
 818	return ret;
 819}
 820
 821/**
 822 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
 823 * @tx: descriptor to be executed
 824 *
 825 * Function will execute given descriptor on the hardware or if the hardware
 826 * is busy, queue the descriptor to be executed later on. Returns cookie which
 827 * can be used to poll the status of the descriptor.
 828 */
 829static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 830{
 831	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
 832	struct ep93xx_dma_desc *desc;
 833	dma_cookie_t cookie;
 834	unsigned long flags;
 835
 836	spin_lock_irqsave(&edmac->lock, flags);
 837	cookie = dma_cookie_assign(tx);
 838
 839	desc = container_of(tx, struct ep93xx_dma_desc, txd);
 840
 841	/*
 842	 * If nothing is currently prosessed, we push this descriptor
 843	 * directly to the hardware. Otherwise we put the descriptor
 844	 * to the pending queue.
 845	 */
 846	if (list_empty(&edmac->active)) {
 847		ep93xx_dma_set_active(edmac, desc);
 848		edmac->edma->hw_submit(edmac);
 849	} else {
 850		list_add_tail(&desc->node, &edmac->queue);
 851	}
 852
 853	spin_unlock_irqrestore(&edmac->lock, flags);
 854	return cookie;
 855}
 856
 857/**
 858 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
 859 * @chan: channel to allocate resources
 860 *
 861 * Function allocates necessary resources for the given DMA channel and
 862 * returns number of allocated descriptors for the channel. Negative errno
 863 * is returned in case of failure.
 864 */
 865static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
 866{
 867	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 868	struct ep93xx_dma_data *data = chan->private;
 869	const char *name = dma_chan_name(chan);
 870	int ret, i;
 871
 872	/* Sanity check the channel parameters */
 873	if (!edmac->edma->m2m) {
 874		if (!data)
 875			return -EINVAL;
 876		if (data->port < EP93XX_DMA_I2S1 ||
 877		    data->port > EP93XX_DMA_IRDA)
 878			return -EINVAL;
 879		if (data->direction != ep93xx_dma_chan_direction(chan))
 880			return -EINVAL;
 881	} else {
 882		if (data) {
 883			switch (data->port) {
 884			case EP93XX_DMA_SSP:
 885			case EP93XX_DMA_IDE:
 886				if (!is_slave_direction(data->direction))
 887					return -EINVAL;
 888				break;
 889			default:
 890				return -EINVAL;
 891			}
 892		}
 893	}
 894
 895	if (data && data->name)
 896		name = data->name;
 897
 898	ret = clk_enable(edmac->clk);
 899	if (ret)
 900		return ret;
 901
 902	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
 903	if (ret)
 904		goto fail_clk_disable;
 905
 906	spin_lock_irq(&edmac->lock);
 907	dma_cookie_init(&edmac->chan);
 908	ret = edmac->edma->hw_setup(edmac);
 909	spin_unlock_irq(&edmac->lock);
 910
 911	if (ret)
 912		goto fail_free_irq;
 913
 914	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
 915		struct ep93xx_dma_desc *desc;
 916
 917		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 918		if (!desc) {
 919			dev_warn(chan2dev(edmac), "not enough descriptors\n");
 920			break;
 921		}
 922
 923		INIT_LIST_HEAD(&desc->tx_list);
 924
 925		dma_async_tx_descriptor_init(&desc->txd, chan);
 926		desc->txd.flags = DMA_CTRL_ACK;
 927		desc->txd.tx_submit = ep93xx_dma_tx_submit;
 928
 929		ep93xx_dma_desc_put(edmac, desc);
 930	}
 931
 932	return i;
 933
 934fail_free_irq:
 935	free_irq(edmac->irq, edmac);
 936fail_clk_disable:
 937	clk_disable(edmac->clk);
 938
 939	return ret;
 940}
 941
 942/**
 943 * ep93xx_dma_free_chan_resources - release resources for the channel
 944 * @chan: channel
 945 *
 946 * Function releases all the resources allocated for the given channel.
 947 * The channel must be idle when this is called.
 948 */
 949static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
 950{
 951	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 952	struct ep93xx_dma_desc *desc, *d;
 953	unsigned long flags;
 954	LIST_HEAD(list);
 955
 956	BUG_ON(!list_empty(&edmac->active));
 957	BUG_ON(!list_empty(&edmac->queue));
 958
 959	spin_lock_irqsave(&edmac->lock, flags);
 960	edmac->edma->hw_shutdown(edmac);
 961	edmac->runtime_addr = 0;
 962	edmac->runtime_ctrl = 0;
 963	edmac->buffer = 0;
 964	list_splice_init(&edmac->free_list, &list);
 965	spin_unlock_irqrestore(&edmac->lock, flags);
 966
 967	list_for_each_entry_safe(desc, d, &list, node)
 968		kfree(desc);
 969
 970	clk_disable(edmac->clk);
 971	free_irq(edmac->irq, edmac);
 972}
 973
 974/**
 975 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
 976 * @chan: channel
 977 * @dest: destination bus address
 978 * @src: source bus address
 979 * @len: size of the transaction
 980 * @flags: flags for the descriptor
 981 *
 982 * Returns a valid DMA descriptor or %NULL in case of failure.
 983 */
 984static struct dma_async_tx_descriptor *
 985ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 986			   dma_addr_t src, size_t len, unsigned long flags)
 987{
 988	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 989	struct ep93xx_dma_desc *desc, *first;
 990	size_t bytes, offset;
 991
 992	first = NULL;
 993	for (offset = 0; offset < len; offset += bytes) {
 994		desc = ep93xx_dma_desc_get(edmac);
 995		if (!desc) {
 996			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
 997			goto fail;
 998		}
 999
1000		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1001
1002		desc->src_addr = src + offset;
1003		desc->dst_addr = dest + offset;
1004		desc->size = bytes;
1005
1006		if (!first)
1007			first = desc;
1008		else
1009			list_add_tail(&desc->node, &first->tx_list);
1010	}
1011
1012	first->txd.cookie = -EBUSY;
1013	first->txd.flags = flags;
1014
1015	return &first->txd;
1016fail:
1017	ep93xx_dma_desc_put(edmac, first);
1018	return NULL;
1019}
1020
1021/**
1022 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1023 * @chan: channel
1024 * @sgl: list of buffers to transfer
1025 * @sg_len: number of entries in @sgl
1026 * @dir: direction of tha DMA transfer
1027 * @flags: flags for the descriptor
1028 * @context: operation context (ignored)
1029 *
1030 * Returns a valid DMA descriptor or %NULL in case of failure.
1031 */
1032static struct dma_async_tx_descriptor *
1033ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1034			 unsigned int sg_len, enum dma_transfer_direction dir,
1035			 unsigned long flags, void *context)
1036{
1037	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1038	struct ep93xx_dma_desc *desc, *first;
1039	struct scatterlist *sg;
1040	int i;
1041
1042	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1043		dev_warn(chan2dev(edmac),
1044			 "channel was configured with different direction\n");
1045		return NULL;
1046	}
1047
1048	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1049		dev_warn(chan2dev(edmac),
1050			 "channel is already used for cyclic transfers\n");
1051		return NULL;
1052	}
1053
1054	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1055
1056	first = NULL;
1057	for_each_sg(sgl, sg, sg_len, i) {
1058		size_t len = sg_dma_len(sg);
1059
1060		if (len > DMA_MAX_CHAN_BYTES) {
1061			dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1062				 len);
1063			goto fail;
1064		}
1065
1066		desc = ep93xx_dma_desc_get(edmac);
1067		if (!desc) {
1068			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1069			goto fail;
1070		}
1071
1072		if (dir == DMA_MEM_TO_DEV) {
1073			desc->src_addr = sg_dma_address(sg);
1074			desc->dst_addr = edmac->runtime_addr;
1075		} else {
1076			desc->src_addr = edmac->runtime_addr;
1077			desc->dst_addr = sg_dma_address(sg);
1078		}
1079		desc->size = len;
1080
1081		if (!first)
1082			first = desc;
1083		else
1084			list_add_tail(&desc->node, &first->tx_list);
1085	}
1086
1087	first->txd.cookie = -EBUSY;
1088	first->txd.flags = flags;
1089
1090	return &first->txd;
1091
1092fail:
1093	ep93xx_dma_desc_put(edmac, first);
1094	return NULL;
1095}
1096
1097/**
1098 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1099 * @chan: channel
1100 * @dma_addr: DMA mapped address of the buffer
1101 * @buf_len: length of the buffer (in bytes)
1102 * @period_len: length of a single period
1103 * @dir: direction of the operation
1104 * @flags: tx descriptor status flags
1105 *
1106 * Prepares a descriptor for cyclic DMA operation. This means that once the
1107 * descriptor is submitted, we will be submitting in a @period_len sized
1108 * buffers and calling callback once the period has been elapsed. Transfer
1109 * terminates only when client calls dmaengine_terminate_all() for this
1110 * channel.
1111 *
1112 * Returns a valid DMA descriptor or %NULL in case of failure.
1113 */
1114static struct dma_async_tx_descriptor *
1115ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1116			   size_t buf_len, size_t period_len,
1117			   enum dma_transfer_direction dir, unsigned long flags)
1118{
1119	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1120	struct ep93xx_dma_desc *desc, *first;
1121	size_t offset = 0;
1122
1123	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1124		dev_warn(chan2dev(edmac),
1125			 "channel was configured with different direction\n");
1126		return NULL;
1127	}
1128
1129	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1130		dev_warn(chan2dev(edmac),
1131			 "channel is already used for cyclic transfers\n");
1132		return NULL;
1133	}
1134
1135	if (period_len > DMA_MAX_CHAN_BYTES) {
1136		dev_warn(chan2dev(edmac), "too big period length %zu\n",
1137			 period_len);
1138		return NULL;
1139	}
1140
1141	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1142
1143	/* Split the buffer into period size chunks */
1144	first = NULL;
1145	for (offset = 0; offset < buf_len; offset += period_len) {
1146		desc = ep93xx_dma_desc_get(edmac);
1147		if (!desc) {
1148			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1149			goto fail;
1150		}
1151
1152		if (dir == DMA_MEM_TO_DEV) {
1153			desc->src_addr = dma_addr + offset;
1154			desc->dst_addr = edmac->runtime_addr;
1155		} else {
1156			desc->src_addr = edmac->runtime_addr;
1157			desc->dst_addr = dma_addr + offset;
1158		}
1159
1160		desc->size = period_len;
1161
1162		if (!first)
1163			first = desc;
1164		else
1165			list_add_tail(&desc->node, &first->tx_list);
1166	}
1167
1168	first->txd.cookie = -EBUSY;
1169
1170	return &first->txd;
1171
1172fail:
1173	ep93xx_dma_desc_put(edmac, first);
1174	return NULL;
1175}
1176
1177/**
1178 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1179 * current context.
1180 * @chan: channel
1181 *
1182 * Synchronizes the DMA channel termination to the current context. When this
1183 * function returns it is guaranteed that all transfers for previously issued
1184 * descriptors have stopped and and it is safe to free the memory associated
1185 * with them. Furthermore it is guaranteed that all complete callback functions
1186 * for a previously submitted descriptor have finished running and it is safe to
1187 * free resources accessed from within the complete callbacks.
1188 */
1189static void ep93xx_dma_synchronize(struct dma_chan *chan)
1190{
1191	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1192
1193	if (edmac->edma->hw_synchronize)
1194		edmac->edma->hw_synchronize(edmac);
1195}
1196
1197/**
1198 * ep93xx_dma_terminate_all - terminate all transactions
1199 * @chan: channel
1200 *
1201 * Stops all DMA transactions. All descriptors are put back to the
1202 * @edmac->free_list and callbacks are _not_ called.
1203 */
1204static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1205{
1206	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1207	struct ep93xx_dma_desc *desc, *_d;
1208	unsigned long flags;
1209	LIST_HEAD(list);
1210
1211	spin_lock_irqsave(&edmac->lock, flags);
1212	/* First we disable and flush the DMA channel */
1213	edmac->edma->hw_shutdown(edmac);
1214	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1215	list_splice_init(&edmac->active, &list);
1216	list_splice_init(&edmac->queue, &list);
1217	/*
1218	 * We then re-enable the channel. This way we can continue submitting
1219	 * the descriptors by just calling ->hw_submit() again.
1220	 */
1221	edmac->edma->hw_setup(edmac);
1222	spin_unlock_irqrestore(&edmac->lock, flags);
1223
1224	list_for_each_entry_safe(desc, _d, &list, node)
1225		ep93xx_dma_desc_put(edmac, desc);
1226
1227	return 0;
1228}
1229
1230static int ep93xx_dma_slave_config(struct dma_chan *chan,
1231				   struct dma_slave_config *config)
1232{
1233	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1234
1235	memcpy(&edmac->slave_config, config, sizeof(*config));
1236
1237	return 0;
1238}
1239
1240static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
1241					 enum dma_transfer_direction dir,
1242					 struct dma_slave_config *config)
1243{
1244	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1245	enum dma_slave_buswidth width;
1246	unsigned long flags;
1247	u32 addr, ctrl;
1248
1249	if (!edmac->edma->m2m)
1250		return -EINVAL;
1251
1252	switch (dir) {
1253	case DMA_DEV_TO_MEM:
1254		width = config->src_addr_width;
1255		addr = config->src_addr;
1256		break;
1257
1258	case DMA_MEM_TO_DEV:
1259		width = config->dst_addr_width;
1260		addr = config->dst_addr;
1261		break;
1262
1263	default:
1264		return -EINVAL;
1265	}
1266
1267	switch (width) {
1268	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1269		ctrl = 0;
1270		break;
1271	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1272		ctrl = M2M_CONTROL_PW_16;
1273		break;
1274	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1275		ctrl = M2M_CONTROL_PW_32;
1276		break;
1277	default:
1278		return -EINVAL;
1279	}
1280
1281	spin_lock_irqsave(&edmac->lock, flags);
1282	edmac->runtime_addr = addr;
1283	edmac->runtime_ctrl = ctrl;
1284	spin_unlock_irqrestore(&edmac->lock, flags);
1285
1286	return 0;
1287}
1288
1289/**
1290 * ep93xx_dma_tx_status - check if a transaction is completed
1291 * @chan: channel
1292 * @cookie: transaction specific cookie
1293 * @state: state of the transaction is stored here if given
1294 *
1295 * This function can be used to query state of a given transaction.
1296 */
1297static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1298					    dma_cookie_t cookie,
1299					    struct dma_tx_state *state)
1300{
1301	return dma_cookie_status(chan, cookie, state);
1302}
1303
1304/**
1305 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1306 * @chan: channel
1307 *
1308 * When this function is called, all pending transactions are pushed to the
1309 * hardware and executed.
1310 */
1311static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1312{
1313	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1314}
1315
1316static int __init ep93xx_dma_probe(struct platform_device *pdev)
1317{
1318	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1319	struct ep93xx_dma_engine *edma;
1320	struct dma_device *dma_dev;
1321	size_t edma_size;
1322	int ret, i;
1323
1324	edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1325	edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1326	if (!edma)
1327		return -ENOMEM;
1328
1329	dma_dev = &edma->dma_dev;
1330	edma->m2m = platform_get_device_id(pdev)->driver_data;
1331	edma->num_channels = pdata->num_channels;
1332
1333	INIT_LIST_HEAD(&dma_dev->channels);
1334	for (i = 0; i < pdata->num_channels; i++) {
1335		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1336		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1337
1338		edmac->chan.device = dma_dev;
1339		edmac->regs = cdata->base;
1340		edmac->irq = cdata->irq;
1341		edmac->edma = edma;
1342
1343		edmac->clk = clk_get(NULL, cdata->name);
1344		if (IS_ERR(edmac->clk)) {
1345			dev_warn(&pdev->dev, "failed to get clock for %s\n",
1346				 cdata->name);
1347			continue;
1348		}
1349
1350		spin_lock_init(&edmac->lock);
1351		INIT_LIST_HEAD(&edmac->active);
1352		INIT_LIST_HEAD(&edmac->queue);
1353		INIT_LIST_HEAD(&edmac->free_list);
1354		tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1355			     (unsigned long)edmac);
1356
1357		list_add_tail(&edmac->chan.device_node,
1358			      &dma_dev->channels);
1359	}
1360
1361	dma_cap_zero(dma_dev->cap_mask);
1362	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1363	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1364
1365	dma_dev->dev = &pdev->dev;
1366	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1367	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1368	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1369	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1370	dma_dev->device_config = ep93xx_dma_slave_config;
1371	dma_dev->device_synchronize = ep93xx_dma_synchronize;
1372	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1373	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1374	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1375
1376	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1377
1378	if (edma->m2m) {
1379		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1380		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1381
1382		edma->hw_setup = m2m_hw_setup;
1383		edma->hw_shutdown = m2m_hw_shutdown;
1384		edma->hw_submit = m2m_hw_submit;
1385		edma->hw_interrupt = m2m_hw_interrupt;
1386	} else {
1387		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1388
1389		edma->hw_synchronize = m2p_hw_synchronize;
1390		edma->hw_setup = m2p_hw_setup;
1391		edma->hw_shutdown = m2p_hw_shutdown;
1392		edma->hw_submit = m2p_hw_submit;
1393		edma->hw_interrupt = m2p_hw_interrupt;
1394	}
1395
1396	ret = dma_async_device_register(dma_dev);
1397	if (unlikely(ret)) {
1398		for (i = 0; i < edma->num_channels; i++) {
1399			struct ep93xx_dma_chan *edmac = &edma->channels[i];
1400			if (!IS_ERR_OR_NULL(edmac->clk))
1401				clk_put(edmac->clk);
1402		}
1403		kfree(edma);
1404	} else {
1405		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1406			 edma->m2m ? "M" : "P");
1407	}
1408
1409	return ret;
1410}
1411
1412static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1413	{ "ep93xx-dma-m2p", 0 },
1414	{ "ep93xx-dma-m2m", 1 },
1415	{ },
1416};
1417
1418static struct platform_driver ep93xx_dma_driver = {
1419	.driver		= {
1420		.name	= "ep93xx-dma",
1421	},
1422	.id_table	= ep93xx_dma_driver_ids,
1423};
1424
1425static int __init ep93xx_dma_module_init(void)
1426{
1427	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1428}
1429subsys_initcall(ep93xx_dma_module_init);
1430
1431MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1432MODULE_DESCRIPTION("EP93xx DMA driver");
1433MODULE_LICENSE("GPL");