Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * Driver for the Cirrus Logic EP93xx DMA Controller
   3 *
   4 * Copyright (C) 2011 Mika Westerberg
   5 *
   6 * DMA M2P implementation is based on the original
   7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
   8 *
   9 *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  10 *   Copyright (C) 2006 Applied Data Systems
  11 *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
  12 *
  13 * This driver is based on dw_dmac and amba-pl08x drivers.
  14 *
  15 * This program is free software; you can redistribute it and/or modify
  16 * it under the terms of the GNU General Public License as published by
  17 * the Free Software Foundation; either version 2 of the License, or
  18 * (at your option) any later version.
  19 */
  20
  21#include <linux/clk.h>
  22#include <linux/init.h>
  23#include <linux/interrupt.h>
 
  24#include <linux/dmaengine.h>
  25#include <linux/module.h>
 
 
 
  26#include <linux/platform_device.h>
  27#include <linux/slab.h>
  28
  29#include <linux/platform_data/dma-ep93xx.h>
  30
  31#include "dmaengine.h"
  32
  33/* M2P registers */
  34#define M2P_CONTROL			0x0000
  35#define M2P_CONTROL_STALLINT		BIT(0)
  36#define M2P_CONTROL_NFBINT		BIT(1)
  37#define M2P_CONTROL_CH_ERROR_INT	BIT(3)
  38#define M2P_CONTROL_ENABLE		BIT(4)
  39#define M2P_CONTROL_ICE			BIT(6)
  40
  41#define M2P_INTERRUPT			0x0004
  42#define M2P_INTERRUPT_STALL		BIT(0)
  43#define M2P_INTERRUPT_NFB		BIT(1)
  44#define M2P_INTERRUPT_ERROR		BIT(3)
  45
  46#define M2P_PPALLOC			0x0008
  47#define M2P_STATUS			0x000c
  48
  49#define M2P_MAXCNT0			0x0020
  50#define M2P_BASE0			0x0024
  51#define M2P_MAXCNT1			0x0030
  52#define M2P_BASE1			0x0034
  53
  54#define M2P_STATE_IDLE			0
  55#define M2P_STATE_STALL			1
  56#define M2P_STATE_ON			2
  57#define M2P_STATE_NEXT			3
  58
  59/* M2M registers */
  60#define M2M_CONTROL			0x0000
  61#define M2M_CONTROL_DONEINT		BIT(2)
  62#define M2M_CONTROL_ENABLE		BIT(3)
  63#define M2M_CONTROL_START		BIT(4)
  64#define M2M_CONTROL_DAH			BIT(11)
  65#define M2M_CONTROL_SAH			BIT(12)
  66#define M2M_CONTROL_PW_SHIFT		9
  67#define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
  68#define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
  69#define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
  70#define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
  71#define M2M_CONTROL_TM_SHIFT		13
  72#define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
  73#define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
  74#define M2M_CONTROL_NFBINT		BIT(21)
  75#define M2M_CONTROL_RSS_SHIFT		22
  76#define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
  77#define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
  78#define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
  79#define M2M_CONTROL_NO_HDSK		BIT(24)
  80#define M2M_CONTROL_PWSC_SHIFT		25
  81
  82#define M2M_INTERRUPT			0x0004
  83#define M2M_INTERRUPT_MASK		6
  84
  85#define M2M_STATUS			0x000c
  86#define M2M_STATUS_CTL_SHIFT		1
  87#define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
  88#define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
  89#define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
  90#define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
  91#define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
  92#define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
  93#define M2M_STATUS_BUF_SHIFT		4
  94#define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
  95#define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
  96#define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
  97#define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
  98#define M2M_STATUS_DONE			BIT(6)
  99
 100#define M2M_BCR0			0x0010
 101#define M2M_BCR1			0x0014
 102#define M2M_SAR_BASE0			0x0018
 103#define M2M_SAR_BASE1			0x001c
 104#define M2M_DAR_BASE0			0x002c
 105#define M2M_DAR_BASE1			0x0030
 106
 107#define DMA_MAX_CHAN_BYTES		0xffff
 108#define DMA_MAX_CHAN_DESCRIPTORS	32
 109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110struct ep93xx_dma_engine;
 
 
 
 111
 112/**
 113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
 114 * @src_addr: source address of the transaction
 115 * @dst_addr: destination address of the transaction
 116 * @size: size of the transaction (in bytes)
 117 * @complete: this descriptor is completed
 118 * @txd: dmaengine API descriptor
 119 * @tx_list: list of linked descriptors
 120 * @node: link used for putting this into a channel queue
 121 */
 122struct ep93xx_dma_desc {
 123	u32				src_addr;
 124	u32				dst_addr;
 125	size_t				size;
 126	bool				complete;
 127	struct dma_async_tx_descriptor	txd;
 128	struct list_head		tx_list;
 129	struct list_head		node;
 130};
 131
 
 
 
 
 
 132/**
 133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
 134 * @chan: dmaengine API channel
 135 * @edma: pointer to to the engine device
 136 * @regs: memory mapped registers
 
 137 * @irq: interrupt number of the channel
 138 * @clk: clock used by this channel
 139 * @tasklet: channel specific tasklet used for callbacks
 140 * @lock: lock protecting the fields following
 141 * @flags: flags for the channel
 142 * @buffer: which buffer to use next (0/1)
 143 * @active: flattened chain of descriptors currently being processed
 144 * @queue: pending descriptors which are handled next
 145 * @free_list: list of free descriptors which can be used
 146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
 147 *                is set via %DMA_SLAVE_CONFIG before slave operation is
 148 *                prepared
 149 * @runtime_ctrl: M2M runtime values for the control register.
 
 150 *
 151 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
 152 * will have slightly different scheme here: @active points to a head of
 153 * flattened DMA descriptor chain.
 154 *
 155 * @queue holds pending transactions. These are linked through the first
 156 * descriptor in the chain. When a descriptor is moved to the @active queue,
 157 * the first and chained descriptors are flattened into a single list.
 158 *
 159 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
 160 * necessary channel configuration information. For memcpy channels this must
 161 * be %NULL.
 162 */
 163struct ep93xx_dma_chan {
 164	struct dma_chan			chan;
 165	const struct ep93xx_dma_engine	*edma;
 166	void __iomem			*regs;
 
 167	int				irq;
 168	struct clk			*clk;
 169	struct tasklet_struct		tasklet;
 170	/* protects the fields following */
 171	spinlock_t			lock;
 172	unsigned long			flags;
 173/* Channel is configured for cyclic transfers */
 174#define EP93XX_DMA_IS_CYCLIC		0
 175
 176	int				buffer;
 177	struct list_head		active;
 178	struct list_head		queue;
 179	struct list_head		free_list;
 180	u32				runtime_addr;
 181	u32				runtime_ctrl;
 
 182};
 183
 184/**
 185 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
 186 * @dma_dev: holds the dmaengine device
 187 * @m2m: is this an M2M or M2P device
 188 * @hw_setup: method which sets the channel up for operation
 
 189 * @hw_shutdown: shuts the channel down and flushes whatever is left
 190 * @hw_submit: pushes active descriptor(s) to the hardware
 191 * @hw_interrupt: handle the interrupt
 192 * @num_channels: number of channels for this instance
 193 * @channels: array of channels
 194 *
 195 * There is one instance of this struct for the M2P channels and one for the
 196 * M2M channels. hw_xxx() methods are used to perform operations which are
 197 * different on M2M and M2P channels. These methods are called with channel
 198 * lock held and interrupts disabled so they cannot sleep.
 199 */
 200struct ep93xx_dma_engine {
 201	struct dma_device	dma_dev;
 202	bool			m2m;
 203	int			(*hw_setup)(struct ep93xx_dma_chan *);
 
 204	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
 205	void			(*hw_submit)(struct ep93xx_dma_chan *);
 206	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
 207#define INTERRUPT_UNKNOWN	0
 208#define INTERRUPT_DONE		1
 209#define INTERRUPT_NEXT_BUFFER	2
 210
 211	size_t			num_channels;
 212	struct ep93xx_dma_chan	channels[];
 
 
 
 
 
 213};
 214
 215static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
 216{
 217	return &edmac->chan.dev->device;
 218}
 219
 220static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
 221{
 222	return container_of(chan, struct ep93xx_dma_chan, chan);
 223}
 224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 225/**
 226 * ep93xx_dma_set_active - set new active descriptor chain
 227 * @edmac: channel
 228 * @desc: head of the new active descriptor chain
 229 *
 230 * Sets @desc to be the head of the new active descriptor chain. This is the
 231 * chain which is processed next. The active list must be empty before calling
 232 * this function.
 233 *
 234 * Called with @edmac->lock held and interrupts disabled.
 235 */
 236static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
 237				  struct ep93xx_dma_desc *desc)
 238{
 239	BUG_ON(!list_empty(&edmac->active));
 240
 241	list_add_tail(&desc->node, &edmac->active);
 242
 243	/* Flatten the @desc->tx_list chain into @edmac->active list */
 244	while (!list_empty(&desc->tx_list)) {
 245		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
 246			struct ep93xx_dma_desc, node);
 247
 248		/*
 249		 * We copy the callback parameters from the first descriptor
 250		 * to all the chained descriptors. This way we can call the
 251		 * callback without having to find out the first descriptor in
 252		 * the chain. Useful for cyclic transfers.
 253		 */
 254		d->txd.callback = desc->txd.callback;
 255		d->txd.callback_param = desc->txd.callback_param;
 256
 257		list_move_tail(&d->node, &edmac->active);
 258	}
 259}
 260
 261/* Called with @edmac->lock held and interrupts disabled */
 262static struct ep93xx_dma_desc *
 263ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 264{
 265	if (list_empty(&edmac->active))
 266		return NULL;
 267
 268	return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
 269}
 270
 271/**
 272 * ep93xx_dma_advance_active - advances to the next active descriptor
 273 * @edmac: channel
 274 *
 275 * Function advances active descriptor to the next in the @edmac->active and
 276 * returns %true if we still have descriptors in the chain to process.
 277 * Otherwise returns %false.
 278 *
 279 * When the channel is in cyclic mode always returns %true.
 280 *
 281 * Called with @edmac->lock held and interrupts disabled.
 282 */
 283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 284{
 285	struct ep93xx_dma_desc *desc;
 286
 287	list_rotate_left(&edmac->active);
 288
 289	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 290		return true;
 291
 292	desc = ep93xx_dma_get_active(edmac);
 293	if (!desc)
 294		return false;
 295
 296	/*
 297	 * If txd.cookie is set it means that we are back in the first
 298	 * descriptor in the chain and hence done with it.
 299	 */
 300	return !desc->txd.cookie;
 301}
 302
 303/*
 304 * M2P DMA implementation
 305 */
 306
 307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
 308{
 309	writel(control, edmac->regs + M2P_CONTROL);
 310	/*
 311	 * EP93xx User's Guide states that we must perform a dummy read after
 312	 * write to the control register.
 313	 */
 314	readl(edmac->regs + M2P_CONTROL);
 315}
 316
 317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
 318{
 319	struct ep93xx_dma_data *data = edmac->chan.private;
 320	u32 control;
 321
 322	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
 323
 324	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
 325		| M2P_CONTROL_ENABLE;
 326	m2p_set_control(edmac, control);
 327
 
 
 328	return 0;
 329}
 330
 331static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
 332{
 333	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 334}
 335
 336static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
 337{
 
 338	u32 control;
 339
 
 340	control = readl(edmac->regs + M2P_CONTROL);
 341	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 342	m2p_set_control(edmac, control);
 
 343
 344	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
 345		cpu_relax();
 
 346
 
 
 347	m2p_set_control(edmac, 0);
 348
 349	while (m2p_channel_state(edmac) == M2P_STATE_STALL)
 350		cpu_relax();
 351}
 352
 353static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 354{
 355	struct ep93xx_dma_desc *desc;
 356	u32 bus_addr;
 357
 358	desc = ep93xx_dma_get_active(edmac);
 359	if (!desc) {
 360		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
 361		return;
 362	}
 363
 364	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
 365		bus_addr = desc->src_addr;
 366	else
 367		bus_addr = desc->dst_addr;
 368
 369	if (edmac->buffer == 0) {
 370		writel(desc->size, edmac->regs + M2P_MAXCNT0);
 371		writel(bus_addr, edmac->regs + M2P_BASE0);
 372	} else {
 373		writel(desc->size, edmac->regs + M2P_MAXCNT1);
 374		writel(bus_addr, edmac->regs + M2P_BASE1);
 375	}
 376
 377	edmac->buffer ^= 1;
 378}
 379
 380static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
 381{
 382	u32 control = readl(edmac->regs + M2P_CONTROL);
 383
 384	m2p_fill_desc(edmac);
 385	control |= M2P_CONTROL_STALLINT;
 386
 387	if (ep93xx_dma_advance_active(edmac)) {
 388		m2p_fill_desc(edmac);
 389		control |= M2P_CONTROL_NFBINT;
 390	}
 391
 392	m2p_set_control(edmac, control);
 393}
 394
 395static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
 396{
 397	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
 398	u32 control;
 399
 400	if (irq_status & M2P_INTERRUPT_ERROR) {
 401		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
 402
 403		/* Clear the error interrupt */
 404		writel(1, edmac->regs + M2P_INTERRUPT);
 405
 406		/*
 407		 * It seems that there is no easy way of reporting errors back
 408		 * to client so we just report the error here and continue as
 409		 * usual.
 410		 *
 411		 * Revisit this when there is a mechanism to report back the
 412		 * errors.
 413		 */
 414		dev_err(chan2dev(edmac),
 415			"DMA transfer failed! Details:\n"
 416			"\tcookie	: %d\n"
 417			"\tsrc_addr	: 0x%08x\n"
 418			"\tdst_addr	: 0x%08x\n"
 419			"\tsize		: %zu\n",
 420			desc->txd.cookie, desc->src_addr, desc->dst_addr,
 421			desc->size);
 422	}
 423
 424	switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
 425	case M2P_INTERRUPT_STALL:
 426		/* Disable interrupts */
 427		control = readl(edmac->regs + M2P_CONTROL);
 428		control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 429		m2p_set_control(edmac, control);
 430
 431		return INTERRUPT_DONE;
 432
 433	case M2P_INTERRUPT_NFB:
 434		if (ep93xx_dma_advance_active(edmac))
 435			m2p_fill_desc(edmac);
 436
 
 
 437		return INTERRUPT_NEXT_BUFFER;
 438	}
 439
 440	return INTERRUPT_UNKNOWN;
 
 
 
 
 
 441}
 442
 443/*
 444 * M2M DMA implementation
 445 */
 446
 447static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
 448{
 449	const struct ep93xx_dma_data *data = edmac->chan.private;
 450	u32 control = 0;
 451
 452	if (!data) {
 453		/* This is memcpy channel, nothing to configure */
 454		writel(control, edmac->regs + M2M_CONTROL);
 455		return 0;
 456	}
 457
 458	switch (data->port) {
 459	case EP93XX_DMA_SSP:
 460		/*
 461		 * This was found via experimenting - anything less than 5
 462		 * causes the channel to perform only a partial transfer which
 463		 * leads to problems since we don't get DONE interrupt then.
 464		 */
 465		control = (5 << M2M_CONTROL_PWSC_SHIFT);
 466		control |= M2M_CONTROL_NO_HDSK;
 467
 468		if (data->direction == DMA_MEM_TO_DEV) {
 469			control |= M2M_CONTROL_DAH;
 470			control |= M2M_CONTROL_TM_TX;
 471			control |= M2M_CONTROL_RSS_SSPTX;
 472		} else {
 473			control |= M2M_CONTROL_SAH;
 474			control |= M2M_CONTROL_TM_RX;
 475			control |= M2M_CONTROL_RSS_SSPRX;
 476		}
 477		break;
 478
 479	case EP93XX_DMA_IDE:
 480		/*
 481		 * This IDE part is totally untested. Values below are taken
 482		 * from the EP93xx Users's Guide and might not be correct.
 483		 */
 484		if (data->direction == DMA_MEM_TO_DEV) {
 485			/* Worst case from the UG */
 486			control = (3 << M2M_CONTROL_PWSC_SHIFT);
 487			control |= M2M_CONTROL_DAH;
 488			control |= M2M_CONTROL_TM_TX;
 489		} else {
 490			control = (2 << M2M_CONTROL_PWSC_SHIFT);
 491			control |= M2M_CONTROL_SAH;
 492			control |= M2M_CONTROL_TM_RX;
 493		}
 494
 495		control |= M2M_CONTROL_NO_HDSK;
 496		control |= M2M_CONTROL_RSS_IDE;
 497		control |= M2M_CONTROL_PW_16;
 498		break;
 499
 500	default:
 501		return -EINVAL;
 502	}
 503
 504	writel(control, edmac->regs + M2M_CONTROL);
 505	return 0;
 506}
 507
 508static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
 509{
 510	/* Just disable the channel */
 511	writel(0, edmac->regs + M2M_CONTROL);
 512}
 513
 514static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 515{
 516	struct ep93xx_dma_desc *desc;
 517
 518	desc = ep93xx_dma_get_active(edmac);
 519	if (!desc) {
 520		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
 521		return;
 522	}
 523
 524	if (edmac->buffer == 0) {
 525		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
 526		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
 527		writel(desc->size, edmac->regs + M2M_BCR0);
 528	} else {
 529		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
 530		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
 531		writel(desc->size, edmac->regs + M2M_BCR1);
 532	}
 533
 534	edmac->buffer ^= 1;
 535}
 536
 537static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
 538{
 539	struct ep93xx_dma_data *data = edmac->chan.private;
 540	u32 control = readl(edmac->regs + M2M_CONTROL);
 541
 542	/*
 543	 * Since we allow clients to configure PW (peripheral width) we always
 544	 * clear PW bits here and then set them according what is given in
 545	 * the runtime configuration.
 546	 */
 547	control &= ~M2M_CONTROL_PW_MASK;
 548	control |= edmac->runtime_ctrl;
 549
 550	m2m_fill_desc(edmac);
 551	control |= M2M_CONTROL_DONEINT;
 552
 553	if (ep93xx_dma_advance_active(edmac)) {
 554		m2m_fill_desc(edmac);
 555		control |= M2M_CONTROL_NFBINT;
 556	}
 557
 558	/*
 559	 * Now we can finally enable the channel. For M2M channel this must be
 560	 * done _after_ the BCRx registers are programmed.
 561	 */
 562	control |= M2M_CONTROL_ENABLE;
 563	writel(control, edmac->regs + M2M_CONTROL);
 564
 565	if (!data) {
 566		/*
 567		 * For memcpy channels the software trigger must be asserted
 568		 * in order to start the memcpy operation.
 569		 */
 570		control |= M2M_CONTROL_START;
 571		writel(control, edmac->regs + M2M_CONTROL);
 572	}
 573}
 574
 575/*
 576 * According to EP93xx User's Guide, we should receive DONE interrupt when all
 577 * M2M DMA controller transactions complete normally. This is not always the
 578 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
 579 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
 580 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
 581 * In effect, disabling the channel when only DONE bit is set could stop
 582 * currently running DMA transfer. To avoid this, we use Buffer FSM and
 583 * Control FSM to check current state of DMA channel.
 584 */
 585static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
 586{
 587	u32 status = readl(edmac->regs + M2M_STATUS);
 588	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
 589	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
 590	bool done = status & M2M_STATUS_DONE;
 591	bool last_done;
 592	u32 control;
 593	struct ep93xx_dma_desc *desc;
 594
 595	/* Accept only DONE and NFB interrupts */
 596	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
 597		return INTERRUPT_UNKNOWN;
 598
 599	if (done) {
 600		/* Clear the DONE bit */
 601		writel(0, edmac->regs + M2M_INTERRUPT);
 602	}
 603
 604	/*
 605	 * Check whether we are done with descriptors or not. This, together
 606	 * with DMA channel state, determines action to take in interrupt.
 607	 */
 608	desc = ep93xx_dma_get_active(edmac);
 609	last_done = !desc || desc->txd.cookie;
 610
 611	/*
 612	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
 613	 * DMA channel. Using DONE and NFB bits from channel status register
 614	 * or bits from channel interrupt register is not reliable.
 615	 */
 616	if (!last_done &&
 617	    (buf_fsm == M2M_STATUS_BUF_NO ||
 618	     buf_fsm == M2M_STATUS_BUF_ON)) {
 619		/*
 620		 * Two buffers are ready for update when Buffer FSM is in
 621		 * DMA_NO_BUF state. Only one buffer can be prepared without
 622		 * disabling the channel or polling the DONE bit.
 623		 * To simplify things, always prepare only one buffer.
 624		 */
 625		if (ep93xx_dma_advance_active(edmac)) {
 626			m2m_fill_desc(edmac);
 627			if (done && !edmac->chan.private) {
 628				/* Software trigger for memcpy channel */
 629				control = readl(edmac->regs + M2M_CONTROL);
 630				control |= M2M_CONTROL_START;
 631				writel(control, edmac->regs + M2M_CONTROL);
 632			}
 633			return INTERRUPT_NEXT_BUFFER;
 634		} else {
 635			last_done = true;
 636		}
 637	}
 638
 639	/*
 640	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
 641	 * and Control FSM is in DMA_STALL state.
 642	 */
 643	if (last_done &&
 644	    buf_fsm == M2M_STATUS_BUF_NO &&
 645	    ctl_fsm == M2M_STATUS_CTL_STALL) {
 646		/* Disable interrupts and the channel */
 647		control = readl(edmac->regs + M2M_CONTROL);
 648		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
 649			    | M2M_CONTROL_ENABLE);
 650		writel(control, edmac->regs + M2M_CONTROL);
 651		return INTERRUPT_DONE;
 652	}
 653
 654	/*
 655	 * Nothing to do this time.
 656	 */
 657	return INTERRUPT_NEXT_BUFFER;
 658}
 659
 660/*
 661 * DMA engine API implementation
 662 */
 663
 664static struct ep93xx_dma_desc *
 665ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
 666{
 667	struct ep93xx_dma_desc *desc, *_desc;
 668	struct ep93xx_dma_desc *ret = NULL;
 669	unsigned long flags;
 670
 671	spin_lock_irqsave(&edmac->lock, flags);
 672	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
 673		if (async_tx_test_ack(&desc->txd)) {
 674			list_del_init(&desc->node);
 675
 676			/* Re-initialize the descriptor */
 677			desc->src_addr = 0;
 678			desc->dst_addr = 0;
 679			desc->size = 0;
 680			desc->complete = false;
 681			desc->txd.cookie = 0;
 682			desc->txd.callback = NULL;
 683			desc->txd.callback_param = NULL;
 684
 685			ret = desc;
 686			break;
 687		}
 688	}
 689	spin_unlock_irqrestore(&edmac->lock, flags);
 690	return ret;
 691}
 692
 693static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
 694				struct ep93xx_dma_desc *desc)
 695{
 696	if (desc) {
 697		unsigned long flags;
 698
 699		spin_lock_irqsave(&edmac->lock, flags);
 700		list_splice_init(&desc->tx_list, &edmac->free_list);
 701		list_add(&desc->node, &edmac->free_list);
 702		spin_unlock_irqrestore(&edmac->lock, flags);
 703	}
 704}
 705
 706/**
 707 * ep93xx_dma_advance_work - start processing the next pending transaction
 708 * @edmac: channel
 709 *
 710 * If we have pending transactions queued and we are currently idling, this
 711 * function takes the next queued transaction from the @edmac->queue and
 712 * pushes it to the hardware for execution.
 713 */
 714static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
 715{
 716	struct ep93xx_dma_desc *new;
 717	unsigned long flags;
 718
 719	spin_lock_irqsave(&edmac->lock, flags);
 720	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
 721		spin_unlock_irqrestore(&edmac->lock, flags);
 722		return;
 723	}
 724
 725	/* Take the next descriptor from the pending queue */
 726	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
 727	list_del_init(&new->node);
 728
 729	ep93xx_dma_set_active(edmac, new);
 730
 731	/* Push it to the hardware */
 732	edmac->edma->hw_submit(edmac);
 733	spin_unlock_irqrestore(&edmac->lock, flags);
 734}
 735
 736static void ep93xx_dma_tasklet(unsigned long data)
 737{
 738	struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
 739	struct ep93xx_dma_desc *desc, *d;
 740	dma_async_tx_callback callback = NULL;
 741	void *callback_param = NULL;
 742	LIST_HEAD(list);
 743
 
 744	spin_lock_irq(&edmac->lock);
 745	/*
 746	 * If dma_terminate_all() was called before we get to run, the active
 747	 * list has become empty. If that happens we aren't supposed to do
 748	 * anything more than call ep93xx_dma_advance_work().
 749	 */
 750	desc = ep93xx_dma_get_active(edmac);
 751	if (desc) {
 752		if (desc->complete) {
 753			/* mark descriptor complete for non cyclic case only */
 754			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 755				dma_cookie_complete(&desc->txd);
 756			list_splice_init(&edmac->active, &list);
 757		}
 758		callback = desc->txd.callback;
 759		callback_param = desc->txd.callback_param;
 760	}
 761	spin_unlock_irq(&edmac->lock);
 762
 763	/* Pick up the next descriptor from the queue */
 764	ep93xx_dma_advance_work(edmac);
 765
 766	/* Now we can release all the chained descriptors */
 767	list_for_each_entry_safe(desc, d, &list, node) {
 768		dma_descriptor_unmap(&desc->txd);
 769		ep93xx_dma_desc_put(edmac, desc);
 770	}
 771
 772	if (callback)
 773		callback(callback_param);
 774}
 775
 776static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 777{
 778	struct ep93xx_dma_chan *edmac = dev_id;
 779	struct ep93xx_dma_desc *desc;
 780	irqreturn_t ret = IRQ_HANDLED;
 781
 782	spin_lock(&edmac->lock);
 783
 784	desc = ep93xx_dma_get_active(edmac);
 785	if (!desc) {
 786		dev_warn(chan2dev(edmac),
 787			 "got interrupt while active list is empty\n");
 788		spin_unlock(&edmac->lock);
 789		return IRQ_NONE;
 790	}
 791
 792	switch (edmac->edma->hw_interrupt(edmac)) {
 793	case INTERRUPT_DONE:
 794		desc->complete = true;
 795		tasklet_schedule(&edmac->tasklet);
 796		break;
 797
 798	case INTERRUPT_NEXT_BUFFER:
 799		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 800			tasklet_schedule(&edmac->tasklet);
 801		break;
 802
 803	default:
 804		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
 805		ret = IRQ_NONE;
 806		break;
 807	}
 808
 809	spin_unlock(&edmac->lock);
 810	return ret;
 811}
 812
 813/**
 814 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
 815 * @tx: descriptor to be executed
 816 *
 817 * Function will execute given descriptor on the hardware or if the hardware
 818 * is busy, queue the descriptor to be executed later on. Returns cookie which
 819 * can be used to poll the status of the descriptor.
 820 */
 821static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 822{
 823	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
 824	struct ep93xx_dma_desc *desc;
 825	dma_cookie_t cookie;
 826	unsigned long flags;
 827
 828	spin_lock_irqsave(&edmac->lock, flags);
 829	cookie = dma_cookie_assign(tx);
 830
 831	desc = container_of(tx, struct ep93xx_dma_desc, txd);
 832
 833	/*
 834	 * If nothing is currently prosessed, we push this descriptor
 835	 * directly to the hardware. Otherwise we put the descriptor
 836	 * to the pending queue.
 837	 */
 838	if (list_empty(&edmac->active)) {
 839		ep93xx_dma_set_active(edmac, desc);
 840		edmac->edma->hw_submit(edmac);
 841	} else {
 842		list_add_tail(&desc->node, &edmac->queue);
 843	}
 844
 845	spin_unlock_irqrestore(&edmac->lock, flags);
 846	return cookie;
 847}
 848
 849/**
 850 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
 851 * @chan: channel to allocate resources
 852 *
 853 * Function allocates necessary resources for the given DMA channel and
 854 * returns number of allocated descriptors for the channel. Negative errno
 855 * is returned in case of failure.
 856 */
 857static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
 858{
 859	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 860	struct ep93xx_dma_data *data = chan->private;
 861	const char *name = dma_chan_name(chan);
 862	int ret, i;
 863
 864	/* Sanity check the channel parameters */
 865	if (!edmac->edma->m2m) {
 866		if (!data)
 867			return -EINVAL;
 868		if (data->port < EP93XX_DMA_I2S1 ||
 869		    data->port > EP93XX_DMA_IRDA)
 870			return -EINVAL;
 871		if (data->direction != ep93xx_dma_chan_direction(chan))
 872			return -EINVAL;
 873	} else {
 874		if (data) {
 875			switch (data->port) {
 876			case EP93XX_DMA_SSP:
 877			case EP93XX_DMA_IDE:
 878				if (!is_slave_direction(data->direction))
 879					return -EINVAL;
 880				break;
 881			default:
 882				return -EINVAL;
 883			}
 884		}
 885	}
 886
 887	if (data && data->name)
 888		name = data->name;
 889
 890	ret = clk_enable(edmac->clk);
 891	if (ret)
 892		return ret;
 893
 894	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
 895	if (ret)
 896		goto fail_clk_disable;
 897
 898	spin_lock_irq(&edmac->lock);
 899	dma_cookie_init(&edmac->chan);
 900	ret = edmac->edma->hw_setup(edmac);
 901	spin_unlock_irq(&edmac->lock);
 902
 903	if (ret)
 904		goto fail_free_irq;
 905
 906	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
 907		struct ep93xx_dma_desc *desc;
 908
 909		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 910		if (!desc) {
 911			dev_warn(chan2dev(edmac), "not enough descriptors\n");
 912			break;
 913		}
 914
 915		INIT_LIST_HEAD(&desc->tx_list);
 916
 917		dma_async_tx_descriptor_init(&desc->txd, chan);
 918		desc->txd.flags = DMA_CTRL_ACK;
 919		desc->txd.tx_submit = ep93xx_dma_tx_submit;
 920
 921		ep93xx_dma_desc_put(edmac, desc);
 922	}
 923
 924	return i;
 925
 926fail_free_irq:
 927	free_irq(edmac->irq, edmac);
 928fail_clk_disable:
 929	clk_disable(edmac->clk);
 930
 931	return ret;
 932}
 933
 934/**
 935 * ep93xx_dma_free_chan_resources - release resources for the channel
 936 * @chan: channel
 937 *
 938 * Function releases all the resources allocated for the given channel.
 939 * The channel must be idle when this is called.
 940 */
 941static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
 942{
 943	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 944	struct ep93xx_dma_desc *desc, *d;
 945	unsigned long flags;
 946	LIST_HEAD(list);
 947
 948	BUG_ON(!list_empty(&edmac->active));
 949	BUG_ON(!list_empty(&edmac->queue));
 950
 951	spin_lock_irqsave(&edmac->lock, flags);
 952	edmac->edma->hw_shutdown(edmac);
 953	edmac->runtime_addr = 0;
 954	edmac->runtime_ctrl = 0;
 955	edmac->buffer = 0;
 956	list_splice_init(&edmac->free_list, &list);
 957	spin_unlock_irqrestore(&edmac->lock, flags);
 958
 959	list_for_each_entry_safe(desc, d, &list, node)
 960		kfree(desc);
 961
 962	clk_disable(edmac->clk);
 963	free_irq(edmac->irq, edmac);
 964}
 965
 966/**
 967 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
 968 * @chan: channel
 969 * @dest: destination bus address
 970 * @src: source bus address
 971 * @len: size of the transaction
 972 * @flags: flags for the descriptor
 973 *
 974 * Returns a valid DMA descriptor or %NULL in case of failure.
 975 */
 976static struct dma_async_tx_descriptor *
 977ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 978			   dma_addr_t src, size_t len, unsigned long flags)
 979{
 980	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 981	struct ep93xx_dma_desc *desc, *first;
 982	size_t bytes, offset;
 983
 984	first = NULL;
 985	for (offset = 0; offset < len; offset += bytes) {
 986		desc = ep93xx_dma_desc_get(edmac);
 987		if (!desc) {
 988			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
 989			goto fail;
 990		}
 991
 992		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
 993
 994		desc->src_addr = src + offset;
 995		desc->dst_addr = dest + offset;
 996		desc->size = bytes;
 997
 998		if (!first)
 999			first = desc;
1000		else
1001			list_add_tail(&desc->node, &first->tx_list);
1002	}
1003
1004	first->txd.cookie = -EBUSY;
1005	first->txd.flags = flags;
1006
1007	return &first->txd;
1008fail:
1009	ep93xx_dma_desc_put(edmac, first);
1010	return NULL;
1011}
1012
1013/**
1014 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1015 * @chan: channel
1016 * @sgl: list of buffers to transfer
1017 * @sg_len: number of entries in @sgl
1018 * @dir: direction of tha DMA transfer
1019 * @flags: flags for the descriptor
1020 * @context: operation context (ignored)
1021 *
1022 * Returns a valid DMA descriptor or %NULL in case of failure.
1023 */
1024static struct dma_async_tx_descriptor *
1025ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1026			 unsigned int sg_len, enum dma_transfer_direction dir,
1027			 unsigned long flags, void *context)
1028{
1029	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1030	struct ep93xx_dma_desc *desc, *first;
1031	struct scatterlist *sg;
1032	int i;
1033
1034	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1035		dev_warn(chan2dev(edmac),
1036			 "channel was configured with different direction\n");
1037		return NULL;
1038	}
1039
1040	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1041		dev_warn(chan2dev(edmac),
1042			 "channel is already used for cyclic transfers\n");
1043		return NULL;
1044	}
1045
 
 
1046	first = NULL;
1047	for_each_sg(sgl, sg, sg_len, i) {
1048		size_t sg_len = sg_dma_len(sg);
1049
1050		if (sg_len > DMA_MAX_CHAN_BYTES) {
1051			dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1052				 sg_len);
1053			goto fail;
1054		}
1055
1056		desc = ep93xx_dma_desc_get(edmac);
1057		if (!desc) {
1058			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1059			goto fail;
1060		}
1061
1062		if (dir == DMA_MEM_TO_DEV) {
1063			desc->src_addr = sg_dma_address(sg);
1064			desc->dst_addr = edmac->runtime_addr;
1065		} else {
1066			desc->src_addr = edmac->runtime_addr;
1067			desc->dst_addr = sg_dma_address(sg);
1068		}
1069		desc->size = sg_len;
1070
1071		if (!first)
1072			first = desc;
1073		else
1074			list_add_tail(&desc->node, &first->tx_list);
1075	}
1076
1077	first->txd.cookie = -EBUSY;
1078	first->txd.flags = flags;
1079
1080	return &first->txd;
1081
1082fail:
1083	ep93xx_dma_desc_put(edmac, first);
1084	return NULL;
1085}
1086
1087/**
1088 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1089 * @chan: channel
1090 * @dma_addr: DMA mapped address of the buffer
1091 * @buf_len: length of the buffer (in bytes)
1092 * @period_len: length of a single period
1093 * @dir: direction of the operation
1094 * @flags: tx descriptor status flags
1095 * @context: operation context (ignored)
1096 *
1097 * Prepares a descriptor for cyclic DMA operation. This means that once the
1098 * descriptor is submitted, we will be submitting in a @period_len sized
1099 * buffers and calling callback once the period has been elapsed. Transfer
1100 * terminates only when client calls dmaengine_terminate_all() for this
1101 * channel.
1102 *
1103 * Returns a valid DMA descriptor or %NULL in case of failure.
1104 */
1105static struct dma_async_tx_descriptor *
1106ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1107			   size_t buf_len, size_t period_len,
1108			   enum dma_transfer_direction dir, unsigned long flags,
1109			   void *context)
1110{
1111	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1112	struct ep93xx_dma_desc *desc, *first;
1113	size_t offset = 0;
1114
1115	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1116		dev_warn(chan2dev(edmac),
1117			 "channel was configured with different direction\n");
1118		return NULL;
1119	}
1120
1121	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1122		dev_warn(chan2dev(edmac),
1123			 "channel is already used for cyclic transfers\n");
1124		return NULL;
1125	}
1126
1127	if (period_len > DMA_MAX_CHAN_BYTES) {
1128		dev_warn(chan2dev(edmac), "too big period length %d\n",
1129			 period_len);
1130		return NULL;
1131	}
1132
 
 
1133	/* Split the buffer into period size chunks */
1134	first = NULL;
1135	for (offset = 0; offset < buf_len; offset += period_len) {
1136		desc = ep93xx_dma_desc_get(edmac);
1137		if (!desc) {
1138			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1139			goto fail;
1140		}
1141
1142		if (dir == DMA_MEM_TO_DEV) {
1143			desc->src_addr = dma_addr + offset;
1144			desc->dst_addr = edmac->runtime_addr;
1145		} else {
1146			desc->src_addr = edmac->runtime_addr;
1147			desc->dst_addr = dma_addr + offset;
1148		}
1149
1150		desc->size = period_len;
1151
1152		if (!first)
1153			first = desc;
1154		else
1155			list_add_tail(&desc->node, &first->tx_list);
1156	}
1157
1158	first->txd.cookie = -EBUSY;
1159
1160	return &first->txd;
1161
1162fail:
1163	ep93xx_dma_desc_put(edmac, first);
1164	return NULL;
1165}
1166
1167/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1168 * ep93xx_dma_terminate_all - terminate all transactions
1169 * @edmac: channel
1170 *
1171 * Stops all DMA transactions. All descriptors are put back to the
1172 * @edmac->free_list and callbacks are _not_ called.
1173 */
1174static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1175{
 
1176	struct ep93xx_dma_desc *desc, *_d;
1177	unsigned long flags;
1178	LIST_HEAD(list);
1179
1180	spin_lock_irqsave(&edmac->lock, flags);
1181	/* First we disable and flush the DMA channel */
1182	edmac->edma->hw_shutdown(edmac);
1183	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1184	list_splice_init(&edmac->active, &list);
1185	list_splice_init(&edmac->queue, &list);
1186	/*
1187	 * We then re-enable the channel. This way we can continue submitting
1188	 * the descriptors by just calling ->hw_submit() again.
1189	 */
1190	edmac->edma->hw_setup(edmac);
1191	spin_unlock_irqrestore(&edmac->lock, flags);
1192
1193	list_for_each_entry_safe(desc, _d, &list, node)
1194		ep93xx_dma_desc_put(edmac, desc);
1195
1196	return 0;
1197}
1198
1199static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1200				   struct dma_slave_config *config)
1201{
 
 
 
 
 
 
 
 
 
 
 
 
1202	enum dma_slave_buswidth width;
1203	unsigned long flags;
1204	u32 addr, ctrl;
1205
1206	if (!edmac->edma->m2m)
1207		return -EINVAL;
1208
1209	switch (config->direction) {
1210	case DMA_DEV_TO_MEM:
1211		width = config->src_addr_width;
1212		addr = config->src_addr;
1213		break;
1214
1215	case DMA_MEM_TO_DEV:
1216		width = config->dst_addr_width;
1217		addr = config->dst_addr;
1218		break;
1219
1220	default:
1221		return -EINVAL;
1222	}
1223
1224	switch (width) {
1225	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1226		ctrl = 0;
1227		break;
1228	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1229		ctrl = M2M_CONTROL_PW_16;
1230		break;
1231	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1232		ctrl = M2M_CONTROL_PW_32;
1233		break;
1234	default:
1235		return -EINVAL;
1236	}
1237
1238	spin_lock_irqsave(&edmac->lock, flags);
1239	edmac->runtime_addr = addr;
1240	edmac->runtime_ctrl = ctrl;
1241	spin_unlock_irqrestore(&edmac->lock, flags);
1242
1243	return 0;
1244}
1245
1246/**
1247 * ep93xx_dma_control - manipulate all pending operations on a channel
1248 * @chan: channel
1249 * @cmd: control command to perform
1250 * @arg: optional argument
1251 *
1252 * Controls the channel. Function returns %0 in case of success or negative
1253 * error in case of failure.
1254 */
1255static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1256			      unsigned long arg)
1257{
1258	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1259	struct dma_slave_config *config;
1260
1261	switch (cmd) {
1262	case DMA_TERMINATE_ALL:
1263		return ep93xx_dma_terminate_all(edmac);
1264
1265	case DMA_SLAVE_CONFIG:
1266		config = (struct dma_slave_config *)arg;
1267		return ep93xx_dma_slave_config(edmac, config);
1268
1269	default:
1270		break;
1271	}
1272
1273	return -ENOSYS;
1274}
1275
1276/**
1277 * ep93xx_dma_tx_status - check if a transaction is completed
1278 * @chan: channel
1279 * @cookie: transaction specific cookie
1280 * @state: state of the transaction is stored here if given
1281 *
1282 * This function can be used to query state of a given transaction.
1283 */
1284static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1285					    dma_cookie_t cookie,
1286					    struct dma_tx_state *state)
1287{
1288	return dma_cookie_status(chan, cookie, state);
1289}
1290
1291/**
1292 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1293 * @chan: channel
1294 *
1295 * When this function is called, all pending transactions are pushed to the
1296 * hardware and executed.
1297 */
1298static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1299{
1300	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1301}
1302
1303static int __init ep93xx_dma_probe(struct platform_device *pdev)
1304{
1305	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
1306	struct ep93xx_dma_engine *edma;
1307	struct dma_device *dma_dev;
1308	size_t edma_size;
1309	int ret, i;
 
 
 
 
1310
1311	edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1312	edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1313	if (!edma)
1314		return -ENOMEM;
1315
 
 
1316	dma_dev = &edma->dma_dev;
1317	edma->m2m = platform_get_device_id(pdev)->driver_data;
1318	edma->num_channels = pdata->num_channels;
1319
1320	INIT_LIST_HEAD(&dma_dev->channels);
1321	for (i = 0; i < pdata->num_channels; i++) {
1322		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1323		struct ep93xx_dma_chan *edmac = &edma->channels[i];
 
1324
1325		edmac->chan.device = dma_dev;
1326		edmac->regs = cdata->base;
1327		edmac->irq = cdata->irq;
 
 
 
 
 
 
1328		edmac->edma = edma;
1329
1330		edmac->clk = clk_get(NULL, cdata->name);
 
 
 
 
 
 
 
1331		if (IS_ERR(edmac->clk)) {
1332			dev_warn(&pdev->dev, "failed to get clock for %s\n",
1333				 cdata->name);
1334			continue;
1335		}
1336
1337		spin_lock_init(&edmac->lock);
1338		INIT_LIST_HEAD(&edmac->active);
1339		INIT_LIST_HEAD(&edmac->queue);
1340		INIT_LIST_HEAD(&edmac->free_list);
1341		tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1342			     (unsigned long)edmac);
1343
1344		list_add_tail(&edmac->chan.device_node,
1345			      &dma_dev->channels);
1346	}
1347
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1348	dma_cap_zero(dma_dev->cap_mask);
1349	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1350	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1351
1352	dma_dev->dev = &pdev->dev;
1353	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1354	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1355	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1356	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1357	dma_dev->device_control = ep93xx_dma_control;
 
 
1358	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1359	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1360
1361	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1362
1363	if (edma->m2m) {
1364		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1365		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1366
1367		edma->hw_setup = m2m_hw_setup;
1368		edma->hw_shutdown = m2m_hw_shutdown;
1369		edma->hw_submit = m2m_hw_submit;
1370		edma->hw_interrupt = m2m_hw_interrupt;
1371	} else {
1372		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1373
 
1374		edma->hw_setup = m2p_hw_setup;
1375		edma->hw_shutdown = m2p_hw_shutdown;
1376		edma->hw_submit = m2p_hw_submit;
1377		edma->hw_interrupt = m2p_hw_interrupt;
1378	}
1379
1380	ret = dma_async_device_register(dma_dev);
1381	if (unlikely(ret)) {
1382		for (i = 0; i < edma->num_channels; i++) {
1383			struct ep93xx_dma_chan *edmac = &edma->channels[i];
1384			if (!IS_ERR_OR_NULL(edmac->clk))
1385				clk_put(edmac->clk);
1386		}
1387		kfree(edma);
1388	} else {
1389		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1390			 edma->m2m ? "M" : "P");
1391	}
 
 
 
 
 
 
 
 
 
1392
1393	return ret;
1394}
1395
1396static struct platform_device_id ep93xx_dma_driver_ids[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1397	{ "ep93xx-dma-m2p", 0 },
1398	{ "ep93xx-dma-m2m", 1 },
1399	{ },
1400};
1401
1402static struct platform_driver ep93xx_dma_driver = {
1403	.driver		= {
1404		.name	= "ep93xx-dma",
 
1405	},
1406	.id_table	= ep93xx_dma_driver_ids,
 
1407};
1408
1409static int __init ep93xx_dma_module_init(void)
1410{
1411	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1412}
1413subsys_initcall(ep93xx_dma_module_init);
1414
1415MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1416MODULE_DESCRIPTION("EP93xx DMA driver");
1417MODULE_LICENSE("GPL");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Driver for the Cirrus Logic EP93xx DMA Controller
   4 *
   5 * Copyright (C) 2011 Mika Westerberg
   6 *
   7 * DMA M2P implementation is based on the original
   8 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
   9 *
  10 *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  11 *   Copyright (C) 2006 Applied Data Systems
  12 *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
  13 *
  14 * This driver is based on dw_dmac and amba-pl08x drivers.
 
 
 
 
 
  15 */
  16
  17#include <linux/clk.h>
  18#include <linux/init.h>
  19#include <linux/interrupt.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/dmaengine.h>
  22#include <linux/module.h>
  23#include <linux/mod_devicetable.h>
  24#include <linux/of_dma.h>
  25#include <linux/overflow.h>
  26#include <linux/platform_device.h>
  27#include <linux/slab.h>
  28
 
 
  29#include "dmaengine.h"
  30
  31/* M2P registers */
  32#define M2P_CONTROL			0x0000
  33#define M2P_CONTROL_STALLINT		BIT(0)
  34#define M2P_CONTROL_NFBINT		BIT(1)
  35#define M2P_CONTROL_CH_ERROR_INT	BIT(3)
  36#define M2P_CONTROL_ENABLE		BIT(4)
  37#define M2P_CONTROL_ICE			BIT(6)
  38
  39#define M2P_INTERRUPT			0x0004
  40#define M2P_INTERRUPT_STALL		BIT(0)
  41#define M2P_INTERRUPT_NFB		BIT(1)
  42#define M2P_INTERRUPT_ERROR		BIT(3)
  43
  44#define M2P_PPALLOC			0x0008
  45#define M2P_STATUS			0x000c
  46
  47#define M2P_MAXCNT0			0x0020
  48#define M2P_BASE0			0x0024
  49#define M2P_MAXCNT1			0x0030
  50#define M2P_BASE1			0x0034
  51
  52#define M2P_STATE_IDLE			0
  53#define M2P_STATE_STALL			1
  54#define M2P_STATE_ON			2
  55#define M2P_STATE_NEXT			3
  56
  57/* M2M registers */
  58#define M2M_CONTROL			0x0000
  59#define M2M_CONTROL_DONEINT		BIT(2)
  60#define M2M_CONTROL_ENABLE		BIT(3)
  61#define M2M_CONTROL_START		BIT(4)
  62#define M2M_CONTROL_DAH			BIT(11)
  63#define M2M_CONTROL_SAH			BIT(12)
  64#define M2M_CONTROL_PW_SHIFT		9
  65#define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
  66#define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
  67#define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
  68#define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
  69#define M2M_CONTROL_TM_SHIFT		13
  70#define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
  71#define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
  72#define M2M_CONTROL_NFBINT		BIT(21)
  73#define M2M_CONTROL_RSS_SHIFT		22
  74#define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
  75#define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
  76#define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
  77#define M2M_CONTROL_NO_HDSK		BIT(24)
  78#define M2M_CONTROL_PWSC_SHIFT		25
  79
  80#define M2M_INTERRUPT			0x0004
  81#define M2M_INTERRUPT_MASK		6
  82
  83#define M2M_STATUS			0x000c
  84#define M2M_STATUS_CTL_SHIFT		1
  85#define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
  86#define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
  87#define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
  88#define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
  89#define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
  90#define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
  91#define M2M_STATUS_BUF_SHIFT		4
  92#define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
  93#define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
  94#define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
  95#define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
  96#define M2M_STATUS_DONE			BIT(6)
  97
  98#define M2M_BCR0			0x0010
  99#define M2M_BCR1			0x0014
 100#define M2M_SAR_BASE0			0x0018
 101#define M2M_SAR_BASE1			0x001c
 102#define M2M_DAR_BASE0			0x002c
 103#define M2M_DAR_BASE1			0x0030
 104
 105#define DMA_MAX_CHAN_BYTES		0xffff
 106#define DMA_MAX_CHAN_DESCRIPTORS	32
 107
 108/*
 109 * M2P channels.
 110 *
 111 * Note that these values are also directly used for setting the PPALLOC
 112 * register.
 113 */
 114#define EP93XX_DMA_I2S1			0
 115#define EP93XX_DMA_I2S2			1
 116#define EP93XX_DMA_AAC1			2
 117#define EP93XX_DMA_AAC2			3
 118#define EP93XX_DMA_AAC3			4
 119#define EP93XX_DMA_I2S3			5
 120#define EP93XX_DMA_UART1		6
 121#define EP93XX_DMA_UART2		7
 122#define EP93XX_DMA_UART3		8
 123#define EP93XX_DMA_IRDA			9
 124/* M2M channels */
 125#define EP93XX_DMA_SSP			10
 126#define EP93XX_DMA_IDE			11
 127
 128enum ep93xx_dma_type {
 129	M2P_DMA,
 130	M2M_DMA,
 131};
 132
 133struct ep93xx_dma_engine;
 134static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
 135					 enum dma_transfer_direction dir,
 136					 struct dma_slave_config *config);
 137
 138/**
 139 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
 140 * @src_addr: source address of the transaction
 141 * @dst_addr: destination address of the transaction
 142 * @size: size of the transaction (in bytes)
 143 * @complete: this descriptor is completed
 144 * @txd: dmaengine API descriptor
 145 * @tx_list: list of linked descriptors
 146 * @node: link used for putting this into a channel queue
 147 */
 148struct ep93xx_dma_desc {
 149	u32				src_addr;
 150	u32				dst_addr;
 151	size_t				size;
 152	bool				complete;
 153	struct dma_async_tx_descriptor	txd;
 154	struct list_head		tx_list;
 155	struct list_head		node;
 156};
 157
 158struct ep93xx_dma_chan_cfg {
 159	u8				port;
 160	enum dma_transfer_direction	dir;
 161};
 162
 163/**
 164 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
 165 * @chan: dmaengine API channel
 166 * @edma: pointer to the engine device
 167 * @regs: memory mapped registers
 168 * @dma_cfg: channel number, direction
 169 * @irq: interrupt number of the channel
 170 * @clk: clock used by this channel
 171 * @tasklet: channel specific tasklet used for callbacks
 172 * @lock: lock protecting the fields following
 173 * @flags: flags for the channel
 174 * @buffer: which buffer to use next (0/1)
 175 * @active: flattened chain of descriptors currently being processed
 176 * @queue: pending descriptors which are handled next
 177 * @free_list: list of free descriptors which can be used
 178 * @runtime_addr: physical address currently used as dest/src (M2M only). This
 179 *                is set via .device_config before slave operation is
 180 *                prepared
 181 * @runtime_ctrl: M2M runtime values for the control register.
 182 * @slave_config: slave configuration
 183 *
 184 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
 185 * will have slightly different scheme here: @active points to a head of
 186 * flattened DMA descriptor chain.
 187 *
 188 * @queue holds pending transactions. These are linked through the first
 189 * descriptor in the chain. When a descriptor is moved to the @active queue,
 190 * the first and chained descriptors are flattened into a single list.
 191 *
 
 
 
 192 */
 193struct ep93xx_dma_chan {
 194	struct dma_chan			chan;
 195	const struct ep93xx_dma_engine	*edma;
 196	void __iomem			*regs;
 197	struct ep93xx_dma_chan_cfg	dma_cfg;
 198	int				irq;
 199	struct clk			*clk;
 200	struct tasklet_struct		tasklet;
 201	/* protects the fields following */
 202	spinlock_t			lock;
 203	unsigned long			flags;
 204/* Channel is configured for cyclic transfers */
 205#define EP93XX_DMA_IS_CYCLIC		0
 206
 207	int				buffer;
 208	struct list_head		active;
 209	struct list_head		queue;
 210	struct list_head		free_list;
 211	u32				runtime_addr;
 212	u32				runtime_ctrl;
 213	struct dma_slave_config		slave_config;
 214};
 215
 216/**
 217 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
 218 * @dma_dev: holds the dmaengine device
 219 * @m2m: is this an M2M or M2P device
 220 * @hw_setup: method which sets the channel up for operation
 221 * @hw_synchronize: synchronizes DMA channel termination to current context
 222 * @hw_shutdown: shuts the channel down and flushes whatever is left
 223 * @hw_submit: pushes active descriptor(s) to the hardware
 224 * @hw_interrupt: handle the interrupt
 225 * @num_channels: number of channels for this instance
 226 * @channels: array of channels
 227 *
 228 * There is one instance of this struct for the M2P channels and one for the
 229 * M2M channels. hw_xxx() methods are used to perform operations which are
 230 * different on M2M and M2P channels. These methods are called with channel
 231 * lock held and interrupts disabled so they cannot sleep.
 232 */
 233struct ep93xx_dma_engine {
 234	struct dma_device	dma_dev;
 235	bool			m2m;
 236	int			(*hw_setup)(struct ep93xx_dma_chan *);
 237	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
 238	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
 239	void			(*hw_submit)(struct ep93xx_dma_chan *);
 240	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
 241#define INTERRUPT_UNKNOWN	0
 242#define INTERRUPT_DONE		1
 243#define INTERRUPT_NEXT_BUFFER	2
 244
 245	size_t			num_channels;
 246	struct ep93xx_dma_chan	channels[] __counted_by(num_channels);
 247};
 248
 249struct ep93xx_edma_data {
 250	u32	id;
 251	size_t	num_channels;
 252};
 253
 254static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
 255{
 256	return &edmac->chan.dev->device;
 257}
 258
 259static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
 260{
 261	return container_of(chan, struct ep93xx_dma_chan, chan);
 262}
 263
 264static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
 265{
 266	if (device_is_compatible(chan->device->dev, "cirrus,ep9301-dma-m2p"))
 267		return true;
 268
 269	return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
 270}
 271
 272/*
 273 * ep93xx_dma_chan_direction - returns direction the channel can be used
 274 *
 275 * This function can be used in filter functions to find out whether the
 276 * channel supports given DMA direction. Only M2P channels have such
 277 * limitation, for M2M channels the direction is configurable.
 278 */
 279static inline enum dma_transfer_direction
 280ep93xx_dma_chan_direction(struct dma_chan *chan)
 281{
 282	if (!ep93xx_dma_chan_is_m2p(chan))
 283		return DMA_TRANS_NONE;
 284
 285	/* even channels are for TX, odd for RX */
 286	return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
 287}
 288
 289/**
 290 * ep93xx_dma_set_active - set new active descriptor chain
 291 * @edmac: channel
 292 * @desc: head of the new active descriptor chain
 293 *
 294 * Sets @desc to be the head of the new active descriptor chain. This is the
 295 * chain which is processed next. The active list must be empty before calling
 296 * this function.
 297 *
 298 * Called with @edmac->lock held and interrupts disabled.
 299 */
 300static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
 301				  struct ep93xx_dma_desc *desc)
 302{
 303	BUG_ON(!list_empty(&edmac->active));
 304
 305	list_add_tail(&desc->node, &edmac->active);
 306
 307	/* Flatten the @desc->tx_list chain into @edmac->active list */
 308	while (!list_empty(&desc->tx_list)) {
 309		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
 310			struct ep93xx_dma_desc, node);
 311
 312		/*
 313		 * We copy the callback parameters from the first descriptor
 314		 * to all the chained descriptors. This way we can call the
 315		 * callback without having to find out the first descriptor in
 316		 * the chain. Useful for cyclic transfers.
 317		 */
 318		d->txd.callback = desc->txd.callback;
 319		d->txd.callback_param = desc->txd.callback_param;
 320
 321		list_move_tail(&d->node, &edmac->active);
 322	}
 323}
 324
 325/* Called with @edmac->lock held and interrupts disabled */
 326static struct ep93xx_dma_desc *
 327ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 328{
 329	return list_first_entry_or_null(&edmac->active,
 330					struct ep93xx_dma_desc, node);
 
 
 331}
 332
 333/**
 334 * ep93xx_dma_advance_active - advances to the next active descriptor
 335 * @edmac: channel
 336 *
 337 * Function advances active descriptor to the next in the @edmac->active and
 338 * returns %true if we still have descriptors in the chain to process.
 339 * Otherwise returns %false.
 340 *
 341 * When the channel is in cyclic mode always returns %true.
 342 *
 343 * Called with @edmac->lock held and interrupts disabled.
 344 */
 345static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 346{
 347	struct ep93xx_dma_desc *desc;
 348
 349	list_rotate_left(&edmac->active);
 350
 351	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 352		return true;
 353
 354	desc = ep93xx_dma_get_active(edmac);
 355	if (!desc)
 356		return false;
 357
 358	/*
 359	 * If txd.cookie is set it means that we are back in the first
 360	 * descriptor in the chain and hence done with it.
 361	 */
 362	return !desc->txd.cookie;
 363}
 364
 365/*
 366 * M2P DMA implementation
 367 */
 368
 369static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
 370{
 371	writel(control, edmac->regs + M2P_CONTROL);
 372	/*
 373	 * EP93xx User's Guide states that we must perform a dummy read after
 374	 * write to the control register.
 375	 */
 376	readl(edmac->regs + M2P_CONTROL);
 377}
 378
 379static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
 380{
 
 381	u32 control;
 382
 383	writel(edmac->dma_cfg.port & 0xf, edmac->regs + M2P_PPALLOC);
 384
 385	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
 386		| M2P_CONTROL_ENABLE;
 387	m2p_set_control(edmac, control);
 388
 389	edmac->buffer = 0;
 390
 391	return 0;
 392}
 393
 394static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
 395{
 396	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 397}
 398
 399static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 400{
 401	unsigned long flags;
 402	u32 control;
 403
 404	spin_lock_irqsave(&edmac->lock, flags);
 405	control = readl(edmac->regs + M2P_CONTROL);
 406	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 407	m2p_set_control(edmac, control);
 408	spin_unlock_irqrestore(&edmac->lock, flags);
 409
 410	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
 411		schedule();
 412}
 413
 414static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
 415{
 416	m2p_set_control(edmac, 0);
 417
 418	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
 419		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 420}
 421
 422static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 423{
 424	struct ep93xx_dma_desc *desc;
 425	u32 bus_addr;
 426
 427	desc = ep93xx_dma_get_active(edmac);
 428	if (!desc) {
 429		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
 430		return;
 431	}
 432
 433	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
 434		bus_addr = desc->src_addr;
 435	else
 436		bus_addr = desc->dst_addr;
 437
 438	if (edmac->buffer == 0) {
 439		writel(desc->size, edmac->regs + M2P_MAXCNT0);
 440		writel(bus_addr, edmac->regs + M2P_BASE0);
 441	} else {
 442		writel(desc->size, edmac->regs + M2P_MAXCNT1);
 443		writel(bus_addr, edmac->regs + M2P_BASE1);
 444	}
 445
 446	edmac->buffer ^= 1;
 447}
 448
 449static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
 450{
 451	u32 control = readl(edmac->regs + M2P_CONTROL);
 452
 453	m2p_fill_desc(edmac);
 454	control |= M2P_CONTROL_STALLINT;
 455
 456	if (ep93xx_dma_advance_active(edmac)) {
 457		m2p_fill_desc(edmac);
 458		control |= M2P_CONTROL_NFBINT;
 459	}
 460
 461	m2p_set_control(edmac, control);
 462}
 463
 464static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
 465{
 466	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
 467	u32 control;
 468
 469	if (irq_status & M2P_INTERRUPT_ERROR) {
 470		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
 471
 472		/* Clear the error interrupt */
 473		writel(1, edmac->regs + M2P_INTERRUPT);
 474
 475		/*
 476		 * It seems that there is no easy way of reporting errors back
 477		 * to client so we just report the error here and continue as
 478		 * usual.
 479		 *
 480		 * Revisit this when there is a mechanism to report back the
 481		 * errors.
 482		 */
 483		dev_err(chan2dev(edmac),
 484			"DMA transfer failed! Details:\n"
 485			"\tcookie	: %d\n"
 486			"\tsrc_addr	: 0x%08x\n"
 487			"\tdst_addr	: 0x%08x\n"
 488			"\tsize		: %zu\n",
 489			desc->txd.cookie, desc->src_addr, desc->dst_addr,
 490			desc->size);
 491	}
 492
 493	/*
 494	 * Even latest E2 silicon revision sometimes assert STALL interrupt
 495	 * instead of NFB. Therefore we treat them equally, basing on the
 496	 * amount of data we still have to transfer.
 497	 */
 498	if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
 499		return INTERRUPT_UNKNOWN;
 
 
 
 
 
 500
 501	if (ep93xx_dma_advance_active(edmac)) {
 502		m2p_fill_desc(edmac);
 503		return INTERRUPT_NEXT_BUFFER;
 504	}
 505
 506	/* Disable interrupts */
 507	control = readl(edmac->regs + M2P_CONTROL);
 508	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 509	m2p_set_control(edmac, control);
 510
 511	return INTERRUPT_DONE;
 512}
 513
 514/*
 515 * M2M DMA implementation
 516 */
 517
 518static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
 519{
 
 520	u32 control = 0;
 521
 522	if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
 523		/* This is memcpy channel, nothing to configure */
 524		writel(control, edmac->regs + M2M_CONTROL);
 525		return 0;
 526	}
 527
 528	switch (edmac->dma_cfg.port) {
 529	case EP93XX_DMA_SSP:
 530		/*
 531		 * This was found via experimenting - anything less than 5
 532		 * causes the channel to perform only a partial transfer which
 533		 * leads to problems since we don't get DONE interrupt then.
 534		 */
 535		control = (5 << M2M_CONTROL_PWSC_SHIFT);
 536		control |= M2M_CONTROL_NO_HDSK;
 537
 538		if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) {
 539			control |= M2M_CONTROL_DAH;
 540			control |= M2M_CONTROL_TM_TX;
 541			control |= M2M_CONTROL_RSS_SSPTX;
 542		} else {
 543			control |= M2M_CONTROL_SAH;
 544			control |= M2M_CONTROL_TM_RX;
 545			control |= M2M_CONTROL_RSS_SSPRX;
 546		}
 547		break;
 548
 549	case EP93XX_DMA_IDE:
 550		/*
 551		 * This IDE part is totally untested. Values below are taken
 552		 * from the EP93xx Users's Guide and might not be correct.
 553		 */
 554		if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) {
 555			/* Worst case from the UG */
 556			control = (3 << M2M_CONTROL_PWSC_SHIFT);
 557			control |= M2M_CONTROL_DAH;
 558			control |= M2M_CONTROL_TM_TX;
 559		} else {
 560			control = (2 << M2M_CONTROL_PWSC_SHIFT);
 561			control |= M2M_CONTROL_SAH;
 562			control |= M2M_CONTROL_TM_RX;
 563		}
 564
 565		control |= M2M_CONTROL_NO_HDSK;
 566		control |= M2M_CONTROL_RSS_IDE;
 567		control |= M2M_CONTROL_PW_16;
 568		break;
 569
 570	default:
 571		return -EINVAL;
 572	}
 573
 574	writel(control, edmac->regs + M2M_CONTROL);
 575	return 0;
 576}
 577
 578static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
 579{
 580	/* Just disable the channel */
 581	writel(0, edmac->regs + M2M_CONTROL);
 582}
 583
 584static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 585{
 586	struct ep93xx_dma_desc *desc;
 587
 588	desc = ep93xx_dma_get_active(edmac);
 589	if (!desc) {
 590		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
 591		return;
 592	}
 593
 594	if (edmac->buffer == 0) {
 595		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
 596		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
 597		writel(desc->size, edmac->regs + M2M_BCR0);
 598	} else {
 599		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
 600		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
 601		writel(desc->size, edmac->regs + M2M_BCR1);
 602	}
 603
 604	edmac->buffer ^= 1;
 605}
 606
 607static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
 608{
 
 609	u32 control = readl(edmac->regs + M2M_CONTROL);
 610
 611	/*
 612	 * Since we allow clients to configure PW (peripheral width) we always
 613	 * clear PW bits here and then set them according what is given in
 614	 * the runtime configuration.
 615	 */
 616	control &= ~M2M_CONTROL_PW_MASK;
 617	control |= edmac->runtime_ctrl;
 618
 619	m2m_fill_desc(edmac);
 620	control |= M2M_CONTROL_DONEINT;
 621
 622	if (ep93xx_dma_advance_active(edmac)) {
 623		m2m_fill_desc(edmac);
 624		control |= M2M_CONTROL_NFBINT;
 625	}
 626
 627	/*
 628	 * Now we can finally enable the channel. For M2M channel this must be
 629	 * done _after_ the BCRx registers are programmed.
 630	 */
 631	control |= M2M_CONTROL_ENABLE;
 632	writel(control, edmac->regs + M2M_CONTROL);
 633
 634	if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
 635		/*
 636		 * For memcpy channels the software trigger must be asserted
 637		 * in order to start the memcpy operation.
 638		 */
 639		control |= M2M_CONTROL_START;
 640		writel(control, edmac->regs + M2M_CONTROL);
 641	}
 642}
 643
 644/*
 645 * According to EP93xx User's Guide, we should receive DONE interrupt when all
 646 * M2M DMA controller transactions complete normally. This is not always the
 647 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
 648 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
 649 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
 650 * In effect, disabling the channel when only DONE bit is set could stop
 651 * currently running DMA transfer. To avoid this, we use Buffer FSM and
 652 * Control FSM to check current state of DMA channel.
 653 */
 654static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
 655{
 656	u32 status = readl(edmac->regs + M2M_STATUS);
 657	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
 658	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
 659	bool done = status & M2M_STATUS_DONE;
 660	bool last_done;
 661	u32 control;
 662	struct ep93xx_dma_desc *desc;
 663
 664	/* Accept only DONE and NFB interrupts */
 665	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
 666		return INTERRUPT_UNKNOWN;
 667
 668	if (done) {
 669		/* Clear the DONE bit */
 670		writel(0, edmac->regs + M2M_INTERRUPT);
 671	}
 672
 673	/*
 674	 * Check whether we are done with descriptors or not. This, together
 675	 * with DMA channel state, determines action to take in interrupt.
 676	 */
 677	desc = ep93xx_dma_get_active(edmac);
 678	last_done = !desc || desc->txd.cookie;
 679
 680	/*
 681	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
 682	 * DMA channel. Using DONE and NFB bits from channel status register
 683	 * or bits from channel interrupt register is not reliable.
 684	 */
 685	if (!last_done &&
 686	    (buf_fsm == M2M_STATUS_BUF_NO ||
 687	     buf_fsm == M2M_STATUS_BUF_ON)) {
 688		/*
 689		 * Two buffers are ready for update when Buffer FSM is in
 690		 * DMA_NO_BUF state. Only one buffer can be prepared without
 691		 * disabling the channel or polling the DONE bit.
 692		 * To simplify things, always prepare only one buffer.
 693		 */
 694		if (ep93xx_dma_advance_active(edmac)) {
 695			m2m_fill_desc(edmac);
 696			if (done && edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
 697				/* Software trigger for memcpy channel */
 698				control = readl(edmac->regs + M2M_CONTROL);
 699				control |= M2M_CONTROL_START;
 700				writel(control, edmac->regs + M2M_CONTROL);
 701			}
 702			return INTERRUPT_NEXT_BUFFER;
 703		} else {
 704			last_done = true;
 705		}
 706	}
 707
 708	/*
 709	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
 710	 * and Control FSM is in DMA_STALL state.
 711	 */
 712	if (last_done &&
 713	    buf_fsm == M2M_STATUS_BUF_NO &&
 714	    ctl_fsm == M2M_STATUS_CTL_STALL) {
 715		/* Disable interrupts and the channel */
 716		control = readl(edmac->regs + M2M_CONTROL);
 717		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
 718			    | M2M_CONTROL_ENABLE);
 719		writel(control, edmac->regs + M2M_CONTROL);
 720		return INTERRUPT_DONE;
 721	}
 722
 723	/*
 724	 * Nothing to do this time.
 725	 */
 726	return INTERRUPT_NEXT_BUFFER;
 727}
 728
 729/*
 730 * DMA engine API implementation
 731 */
 732
 733static struct ep93xx_dma_desc *
 734ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
 735{
 736	struct ep93xx_dma_desc *desc, *_desc;
 737	struct ep93xx_dma_desc *ret = NULL;
 738	unsigned long flags;
 739
 740	spin_lock_irqsave(&edmac->lock, flags);
 741	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
 742		if (async_tx_test_ack(&desc->txd)) {
 743			list_del_init(&desc->node);
 744
 745			/* Re-initialize the descriptor */
 746			desc->src_addr = 0;
 747			desc->dst_addr = 0;
 748			desc->size = 0;
 749			desc->complete = false;
 750			desc->txd.cookie = 0;
 751			desc->txd.callback = NULL;
 752			desc->txd.callback_param = NULL;
 753
 754			ret = desc;
 755			break;
 756		}
 757	}
 758	spin_unlock_irqrestore(&edmac->lock, flags);
 759	return ret;
 760}
 761
 762static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
 763				struct ep93xx_dma_desc *desc)
 764{
 765	if (desc) {
 766		unsigned long flags;
 767
 768		spin_lock_irqsave(&edmac->lock, flags);
 769		list_splice_init(&desc->tx_list, &edmac->free_list);
 770		list_add(&desc->node, &edmac->free_list);
 771		spin_unlock_irqrestore(&edmac->lock, flags);
 772	}
 773}
 774
 775/**
 776 * ep93xx_dma_advance_work - start processing the next pending transaction
 777 * @edmac: channel
 778 *
 779 * If we have pending transactions queued and we are currently idling, this
 780 * function takes the next queued transaction from the @edmac->queue and
 781 * pushes it to the hardware for execution.
 782 */
 783static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
 784{
 785	struct ep93xx_dma_desc *new;
 786	unsigned long flags;
 787
 788	spin_lock_irqsave(&edmac->lock, flags);
 789	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
 790		spin_unlock_irqrestore(&edmac->lock, flags);
 791		return;
 792	}
 793
 794	/* Take the next descriptor from the pending queue */
 795	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
 796	list_del_init(&new->node);
 797
 798	ep93xx_dma_set_active(edmac, new);
 799
 800	/* Push it to the hardware */
 801	edmac->edma->hw_submit(edmac);
 802	spin_unlock_irqrestore(&edmac->lock, flags);
 803}
 804
 805static void ep93xx_dma_tasklet(struct tasklet_struct *t)
 806{
 807	struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
 808	struct ep93xx_dma_desc *desc, *d;
 809	struct dmaengine_desc_callback cb;
 
 810	LIST_HEAD(list);
 811
 812	memset(&cb, 0, sizeof(cb));
 813	spin_lock_irq(&edmac->lock);
 814	/*
 815	 * If dma_terminate_all() was called before we get to run, the active
 816	 * list has become empty. If that happens we aren't supposed to do
 817	 * anything more than call ep93xx_dma_advance_work().
 818	 */
 819	desc = ep93xx_dma_get_active(edmac);
 820	if (desc) {
 821		if (desc->complete) {
 822			/* mark descriptor complete for non cyclic case only */
 823			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 824				dma_cookie_complete(&desc->txd);
 825			list_splice_init(&edmac->active, &list);
 826		}
 827		dmaengine_desc_get_callback(&desc->txd, &cb);
 
 828	}
 829	spin_unlock_irq(&edmac->lock);
 830
 831	/* Pick up the next descriptor from the queue */
 832	ep93xx_dma_advance_work(edmac);
 833
 834	/* Now we can release all the chained descriptors */
 835	list_for_each_entry_safe(desc, d, &list, node) {
 836		dma_descriptor_unmap(&desc->txd);
 837		ep93xx_dma_desc_put(edmac, desc);
 838	}
 839
 840	dmaengine_desc_callback_invoke(&cb, NULL);
 
 841}
 842
 843static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 844{
 845	struct ep93xx_dma_chan *edmac = dev_id;
 846	struct ep93xx_dma_desc *desc;
 847	irqreturn_t ret = IRQ_HANDLED;
 848
 849	spin_lock(&edmac->lock);
 850
 851	desc = ep93xx_dma_get_active(edmac);
 852	if (!desc) {
 853		dev_warn(chan2dev(edmac),
 854			 "got interrupt while active list is empty\n");
 855		spin_unlock(&edmac->lock);
 856		return IRQ_NONE;
 857	}
 858
 859	switch (edmac->edma->hw_interrupt(edmac)) {
 860	case INTERRUPT_DONE:
 861		desc->complete = true;
 862		tasklet_schedule(&edmac->tasklet);
 863		break;
 864
 865	case INTERRUPT_NEXT_BUFFER:
 866		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 867			tasklet_schedule(&edmac->tasklet);
 868		break;
 869
 870	default:
 871		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
 872		ret = IRQ_NONE;
 873		break;
 874	}
 875
 876	spin_unlock(&edmac->lock);
 877	return ret;
 878}
 879
 880/**
 881 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
 882 * @tx: descriptor to be executed
 883 *
 884 * Function will execute given descriptor on the hardware or if the hardware
 885 * is busy, queue the descriptor to be executed later on. Returns cookie which
 886 * can be used to poll the status of the descriptor.
 887 */
 888static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 889{
 890	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
 891	struct ep93xx_dma_desc *desc;
 892	dma_cookie_t cookie;
 893	unsigned long flags;
 894
 895	spin_lock_irqsave(&edmac->lock, flags);
 896	cookie = dma_cookie_assign(tx);
 897
 898	desc = container_of(tx, struct ep93xx_dma_desc, txd);
 899
 900	/*
 901	 * If nothing is currently processed, we push this descriptor
 902	 * directly to the hardware. Otherwise we put the descriptor
 903	 * to the pending queue.
 904	 */
 905	if (list_empty(&edmac->active)) {
 906		ep93xx_dma_set_active(edmac, desc);
 907		edmac->edma->hw_submit(edmac);
 908	} else {
 909		list_add_tail(&desc->node, &edmac->queue);
 910	}
 911
 912	spin_unlock_irqrestore(&edmac->lock, flags);
 913	return cookie;
 914}
 915
 916/**
 917 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
 918 * @chan: channel to allocate resources
 919 *
 920 * Function allocates necessary resources for the given DMA channel and
 921 * returns number of allocated descriptors for the channel. Negative errno
 922 * is returned in case of failure.
 923 */
 924static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
 925{
 926	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 
 927	const char *name = dma_chan_name(chan);
 928	int ret, i;
 929
 930	/* Sanity check the channel parameters */
 931	if (!edmac->edma->m2m) {
 932		if (edmac->dma_cfg.port > EP93XX_DMA_IRDA)
 933			return -EINVAL;
 934		if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan))
 
 
 
 935			return -EINVAL;
 936	} else {
 937		if (edmac->dma_cfg.dir != DMA_MEM_TO_MEM) {
 938			switch (edmac->dma_cfg.port) {
 939			case EP93XX_DMA_SSP:
 940			case EP93XX_DMA_IDE:
 941				if (!is_slave_direction(edmac->dma_cfg.dir))
 942					return -EINVAL;
 943				break;
 944			default:
 945				return -EINVAL;
 946			}
 947		}
 948	}
 949
 950	ret = clk_prepare_enable(edmac->clk);
 
 
 
 951	if (ret)
 952		return ret;
 953
 954	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
 955	if (ret)
 956		goto fail_clk_disable;
 957
 958	spin_lock_irq(&edmac->lock);
 959	dma_cookie_init(&edmac->chan);
 960	ret = edmac->edma->hw_setup(edmac);
 961	spin_unlock_irq(&edmac->lock);
 962
 963	if (ret)
 964		goto fail_free_irq;
 965
 966	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
 967		struct ep93xx_dma_desc *desc;
 968
 969		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 970		if (!desc) {
 971			dev_warn(chan2dev(edmac), "not enough descriptors\n");
 972			break;
 973		}
 974
 975		INIT_LIST_HEAD(&desc->tx_list);
 976
 977		dma_async_tx_descriptor_init(&desc->txd, chan);
 978		desc->txd.flags = DMA_CTRL_ACK;
 979		desc->txd.tx_submit = ep93xx_dma_tx_submit;
 980
 981		ep93xx_dma_desc_put(edmac, desc);
 982	}
 983
 984	return i;
 985
 986fail_free_irq:
 987	free_irq(edmac->irq, edmac);
 988fail_clk_disable:
 989	clk_disable_unprepare(edmac->clk);
 990
 991	return ret;
 992}
 993
 994/**
 995 * ep93xx_dma_free_chan_resources - release resources for the channel
 996 * @chan: channel
 997 *
 998 * Function releases all the resources allocated for the given channel.
 999 * The channel must be idle when this is called.
1000 */
1001static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
1002{
1003	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1004	struct ep93xx_dma_desc *desc, *d;
1005	unsigned long flags;
1006	LIST_HEAD(list);
1007
1008	BUG_ON(!list_empty(&edmac->active));
1009	BUG_ON(!list_empty(&edmac->queue));
1010
1011	spin_lock_irqsave(&edmac->lock, flags);
1012	edmac->edma->hw_shutdown(edmac);
1013	edmac->runtime_addr = 0;
1014	edmac->runtime_ctrl = 0;
1015	edmac->buffer = 0;
1016	list_splice_init(&edmac->free_list, &list);
1017	spin_unlock_irqrestore(&edmac->lock, flags);
1018
1019	list_for_each_entry_safe(desc, d, &list, node)
1020		kfree(desc);
1021
1022	clk_disable_unprepare(edmac->clk);
1023	free_irq(edmac->irq, edmac);
1024}
1025
1026/**
1027 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
1028 * @chan: channel
1029 * @dest: destination bus address
1030 * @src: source bus address
1031 * @len: size of the transaction
1032 * @flags: flags for the descriptor
1033 *
1034 * Returns a valid DMA descriptor or %NULL in case of failure.
1035 */
1036static struct dma_async_tx_descriptor *
1037ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
1038			   dma_addr_t src, size_t len, unsigned long flags)
1039{
1040	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1041	struct ep93xx_dma_desc *desc, *first;
1042	size_t bytes, offset;
1043
1044	first = NULL;
1045	for (offset = 0; offset < len; offset += bytes) {
1046		desc = ep93xx_dma_desc_get(edmac);
1047		if (!desc) {
1048			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1049			goto fail;
1050		}
1051
1052		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1053
1054		desc->src_addr = src + offset;
1055		desc->dst_addr = dest + offset;
1056		desc->size = bytes;
1057
1058		if (!first)
1059			first = desc;
1060		else
1061			list_add_tail(&desc->node, &first->tx_list);
1062	}
1063
1064	first->txd.cookie = -EBUSY;
1065	first->txd.flags = flags;
1066
1067	return &first->txd;
1068fail:
1069	ep93xx_dma_desc_put(edmac, first);
1070	return NULL;
1071}
1072
1073/**
1074 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1075 * @chan: channel
1076 * @sgl: list of buffers to transfer
1077 * @sg_len: number of entries in @sgl
1078 * @dir: direction of the DMA transfer
1079 * @flags: flags for the descriptor
1080 * @context: operation context (ignored)
1081 *
1082 * Returns a valid DMA descriptor or %NULL in case of failure.
1083 */
1084static struct dma_async_tx_descriptor *
1085ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1086			 unsigned int sg_len, enum dma_transfer_direction dir,
1087			 unsigned long flags, void *context)
1088{
1089	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1090	struct ep93xx_dma_desc *desc, *first;
1091	struct scatterlist *sg;
1092	int i;
1093
1094	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1095		dev_warn(chan2dev(edmac),
1096			 "channel was configured with different direction\n");
1097		return NULL;
1098	}
1099
1100	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1101		dev_warn(chan2dev(edmac),
1102			 "channel is already used for cyclic transfers\n");
1103		return NULL;
1104	}
1105
1106	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1107
1108	first = NULL;
1109	for_each_sg(sgl, sg, sg_len, i) {
1110		size_t len = sg_dma_len(sg);
1111
1112		if (len > DMA_MAX_CHAN_BYTES) {
1113			dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1114				 len);
1115			goto fail;
1116		}
1117
1118		desc = ep93xx_dma_desc_get(edmac);
1119		if (!desc) {
1120			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1121			goto fail;
1122		}
1123
1124		if (dir == DMA_MEM_TO_DEV) {
1125			desc->src_addr = sg_dma_address(sg);
1126			desc->dst_addr = edmac->runtime_addr;
1127		} else {
1128			desc->src_addr = edmac->runtime_addr;
1129			desc->dst_addr = sg_dma_address(sg);
1130		}
1131		desc->size = len;
1132
1133		if (!first)
1134			first = desc;
1135		else
1136			list_add_tail(&desc->node, &first->tx_list);
1137	}
1138
1139	first->txd.cookie = -EBUSY;
1140	first->txd.flags = flags;
1141
1142	return &first->txd;
1143
1144fail:
1145	ep93xx_dma_desc_put(edmac, first);
1146	return NULL;
1147}
1148
1149/**
1150 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1151 * @chan: channel
1152 * @dma_addr: DMA mapped address of the buffer
1153 * @buf_len: length of the buffer (in bytes)
1154 * @period_len: length of a single period
1155 * @dir: direction of the operation
1156 * @flags: tx descriptor status flags
 
1157 *
1158 * Prepares a descriptor for cyclic DMA operation. This means that once the
1159 * descriptor is submitted, we will be submitting in a @period_len sized
1160 * buffers and calling callback once the period has been elapsed. Transfer
1161 * terminates only when client calls dmaengine_terminate_all() for this
1162 * channel.
1163 *
1164 * Returns a valid DMA descriptor or %NULL in case of failure.
1165 */
1166static struct dma_async_tx_descriptor *
1167ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1168			   size_t buf_len, size_t period_len,
1169			   enum dma_transfer_direction dir, unsigned long flags)
 
1170{
1171	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1172	struct ep93xx_dma_desc *desc, *first;
1173	size_t offset = 0;
1174
1175	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1176		dev_warn(chan2dev(edmac),
1177			 "channel was configured with different direction\n");
1178		return NULL;
1179	}
1180
1181	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1182		dev_warn(chan2dev(edmac),
1183			 "channel is already used for cyclic transfers\n");
1184		return NULL;
1185	}
1186
1187	if (period_len > DMA_MAX_CHAN_BYTES) {
1188		dev_warn(chan2dev(edmac), "too big period length %zu\n",
1189			 period_len);
1190		return NULL;
1191	}
1192
1193	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1194
1195	/* Split the buffer into period size chunks */
1196	first = NULL;
1197	for (offset = 0; offset < buf_len; offset += period_len) {
1198		desc = ep93xx_dma_desc_get(edmac);
1199		if (!desc) {
1200			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1201			goto fail;
1202		}
1203
1204		if (dir == DMA_MEM_TO_DEV) {
1205			desc->src_addr = dma_addr + offset;
1206			desc->dst_addr = edmac->runtime_addr;
1207		} else {
1208			desc->src_addr = edmac->runtime_addr;
1209			desc->dst_addr = dma_addr + offset;
1210		}
1211
1212		desc->size = period_len;
1213
1214		if (!first)
1215			first = desc;
1216		else
1217			list_add_tail(&desc->node, &first->tx_list);
1218	}
1219
1220	first->txd.cookie = -EBUSY;
1221
1222	return &first->txd;
1223
1224fail:
1225	ep93xx_dma_desc_put(edmac, first);
1226	return NULL;
1227}
1228
1229/**
1230 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1231 * current context.
1232 * @chan: channel
1233 *
1234 * Synchronizes the DMA channel termination to the current context. When this
1235 * function returns it is guaranteed that all transfers for previously issued
1236 * descriptors have stopped and it is safe to free the memory associated
1237 * with them. Furthermore it is guaranteed that all complete callback functions
1238 * for a previously submitted descriptor have finished running and it is safe to
1239 * free resources accessed from within the complete callbacks.
1240 */
1241static void ep93xx_dma_synchronize(struct dma_chan *chan)
1242{
1243	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1244
1245	if (edmac->edma->hw_synchronize)
1246		edmac->edma->hw_synchronize(edmac);
1247}
1248
1249/**
1250 * ep93xx_dma_terminate_all - terminate all transactions
1251 * @chan: channel
1252 *
1253 * Stops all DMA transactions. All descriptors are put back to the
1254 * @edmac->free_list and callbacks are _not_ called.
1255 */
1256static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1257{
1258	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1259	struct ep93xx_dma_desc *desc, *_d;
1260	unsigned long flags;
1261	LIST_HEAD(list);
1262
1263	spin_lock_irqsave(&edmac->lock, flags);
1264	/* First we disable and flush the DMA channel */
1265	edmac->edma->hw_shutdown(edmac);
1266	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1267	list_splice_init(&edmac->active, &list);
1268	list_splice_init(&edmac->queue, &list);
1269	/*
1270	 * We then re-enable the channel. This way we can continue submitting
1271	 * the descriptors by just calling ->hw_submit() again.
1272	 */
1273	edmac->edma->hw_setup(edmac);
1274	spin_unlock_irqrestore(&edmac->lock, flags);
1275
1276	list_for_each_entry_safe(desc, _d, &list, node)
1277		ep93xx_dma_desc_put(edmac, desc);
1278
1279	return 0;
1280}
1281
1282static int ep93xx_dma_slave_config(struct dma_chan *chan,
1283				   struct dma_slave_config *config)
1284{
1285	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1286
1287	memcpy(&edmac->slave_config, config, sizeof(*config));
1288
1289	return 0;
1290}
1291
1292static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
1293					 enum dma_transfer_direction dir,
1294					 struct dma_slave_config *config)
1295{
1296	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1297	enum dma_slave_buswidth width;
1298	unsigned long flags;
1299	u32 addr, ctrl;
1300
1301	if (!edmac->edma->m2m)
1302		return -EINVAL;
1303
1304	switch (dir) {
1305	case DMA_DEV_TO_MEM:
1306		width = config->src_addr_width;
1307		addr = config->src_addr;
1308		break;
1309
1310	case DMA_MEM_TO_DEV:
1311		width = config->dst_addr_width;
1312		addr = config->dst_addr;
1313		break;
1314
1315	default:
1316		return -EINVAL;
1317	}
1318
1319	switch (width) {
1320	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1321		ctrl = 0;
1322		break;
1323	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1324		ctrl = M2M_CONTROL_PW_16;
1325		break;
1326	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1327		ctrl = M2M_CONTROL_PW_32;
1328		break;
1329	default:
1330		return -EINVAL;
1331	}
1332
1333	spin_lock_irqsave(&edmac->lock, flags);
1334	edmac->runtime_addr = addr;
1335	edmac->runtime_ctrl = ctrl;
1336	spin_unlock_irqrestore(&edmac->lock, flags);
1337
1338	return 0;
1339}
1340
1341/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1342 * ep93xx_dma_tx_status - check if a transaction is completed
1343 * @chan: channel
1344 * @cookie: transaction specific cookie
1345 * @state: state of the transaction is stored here if given
1346 *
1347 * This function can be used to query state of a given transaction.
1348 */
1349static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1350					    dma_cookie_t cookie,
1351					    struct dma_tx_state *state)
1352{
1353	return dma_cookie_status(chan, cookie, state);
1354}
1355
1356/**
1357 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1358 * @chan: channel
1359 *
1360 * When this function is called, all pending transactions are pushed to the
1361 * hardware and executed.
1362 */
1363static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1364{
1365	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1366}
1367
1368static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pdev)
1369{
1370	const struct ep93xx_edma_data *data;
1371	struct device *dev = &pdev->dev;
1372	struct ep93xx_dma_engine *edma;
1373	struct dma_device *dma_dev;
1374	char dma_clk_name[5];
1375	int i;
1376
1377	data = device_get_match_data(dev);
1378	if (!data)
1379		return ERR_PTR(dev_err_probe(dev, -ENODEV, "No device match found\n"));
1380
1381	edma = devm_kzalloc(dev, struct_size(edma, channels, data->num_channels),
1382			    GFP_KERNEL);
1383	if (!edma)
1384		return ERR_PTR(-ENOMEM);
1385
1386	edma->m2m = data->id;
1387	edma->num_channels = data->num_channels;
1388	dma_dev = &edma->dma_dev;
 
 
1389
1390	INIT_LIST_HEAD(&dma_dev->channels);
1391	for (i = 0; i < edma->num_channels; i++) {
 
1392		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1393		int len;
1394
1395		edmac->chan.device = dma_dev;
1396		edmac->regs = devm_platform_ioremap_resource(pdev, i);
1397		if (IS_ERR(edmac->regs))
1398			return ERR_CAST(edmac->regs);
1399
1400		edmac->irq = fwnode_irq_get(dev_fwnode(dev), i);
1401		if (edmac->irq < 0)
1402			return ERR_PTR(edmac->irq);
1403
1404		edmac->edma = edma;
1405
1406		if (edma->m2m)
1407			len = snprintf(dma_clk_name, sizeof(dma_clk_name), "m2m%u", i);
1408		else
1409			len = snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i);
1410		if (len >= sizeof(dma_clk_name))
1411			return ERR_PTR(-ENOBUFS);
1412
1413		edmac->clk = devm_clk_get(dev, dma_clk_name);
1414		if (IS_ERR(edmac->clk)) {
1415			dev_err_probe(dev, PTR_ERR(edmac->clk),
1416				      "no %s clock found\n", dma_clk_name);
1417			return ERR_CAST(edmac->clk);
1418		}
1419
1420		spin_lock_init(&edmac->lock);
1421		INIT_LIST_HEAD(&edmac->active);
1422		INIT_LIST_HEAD(&edmac->queue);
1423		INIT_LIST_HEAD(&edmac->free_list);
1424		tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
 
1425
1426		list_add_tail(&edmac->chan.device_node,
1427			      &dma_dev->channels);
1428	}
1429
1430	return edma;
1431}
1432
1433static bool ep93xx_m2p_dma_filter(struct dma_chan *chan, void *filter_param)
1434{
1435	struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan);
1436	struct ep93xx_dma_chan_cfg *cfg = filter_param;
1437
1438	if (cfg->dir != ep93xx_dma_chan_direction(chan))
1439		return false;
1440
1441	echan->dma_cfg = *cfg;
1442	return true;
1443}
1444
1445static struct dma_chan *ep93xx_m2p_dma_of_xlate(struct of_phandle_args *dma_spec,
1446					    struct of_dma *ofdma)
1447{
1448	struct ep93xx_dma_engine *edma = ofdma->of_dma_data;
1449	dma_cap_mask_t mask = edma->dma_dev.cap_mask;
1450	struct ep93xx_dma_chan_cfg dma_cfg;
1451	u8 port = dma_spec->args[0];
1452	u8 direction = dma_spec->args[1];
1453
1454	if (port > EP93XX_DMA_IRDA)
1455		return NULL;
1456
1457	if (!is_slave_direction(direction))
1458		return NULL;
1459
1460	dma_cfg.port = port;
1461	dma_cfg.dir = direction;
1462
1463	return __dma_request_channel(&mask, ep93xx_m2p_dma_filter, &dma_cfg, ofdma->of_node);
1464}
1465
1466static bool ep93xx_m2m_dma_filter(struct dma_chan *chan, void *filter_param)
1467{
1468	struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan);
1469	struct ep93xx_dma_chan_cfg *cfg = filter_param;
1470
1471	echan->dma_cfg = *cfg;
1472
1473	return true;
1474}
1475
1476static struct dma_chan *ep93xx_m2m_dma_of_xlate(struct of_phandle_args *dma_spec,
1477					    struct of_dma *ofdma)
1478{
1479	struct ep93xx_dma_engine *edma = ofdma->of_dma_data;
1480	dma_cap_mask_t mask = edma->dma_dev.cap_mask;
1481	struct ep93xx_dma_chan_cfg dma_cfg;
1482	u8 port = dma_spec->args[0];
1483	u8 direction = dma_spec->args[1];
1484
1485	if (!is_slave_direction(direction))
1486		return NULL;
1487
1488	switch (port) {
1489	case EP93XX_DMA_SSP:
1490	case EP93XX_DMA_IDE:
1491		break;
1492	default:
1493		return NULL;
1494	}
1495
1496	dma_cfg.port = port;
1497	dma_cfg.dir = direction;
1498
1499	return __dma_request_channel(&mask, ep93xx_m2m_dma_filter, &dma_cfg, ofdma->of_node);
1500}
1501
1502static int ep93xx_dma_probe(struct platform_device *pdev)
1503{
1504	struct ep93xx_dma_engine *edma;
1505	struct dma_device *dma_dev;
1506	int ret;
1507
1508	edma = ep93xx_dma_of_probe(pdev);
1509	if (IS_ERR(edma))
1510		return PTR_ERR(edma);
1511
1512	dma_dev = &edma->dma_dev;
1513
1514	dma_cap_zero(dma_dev->cap_mask);
1515	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1516	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1517
1518	dma_dev->dev = &pdev->dev;
1519	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1520	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1521	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1522	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1523	dma_dev->device_config = ep93xx_dma_slave_config;
1524	dma_dev->device_synchronize = ep93xx_dma_synchronize;
1525	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1526	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1527	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1528
1529	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1530
1531	if (edma->m2m) {
1532		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1533		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1534
1535		edma->hw_setup = m2m_hw_setup;
1536		edma->hw_shutdown = m2m_hw_shutdown;
1537		edma->hw_submit = m2m_hw_submit;
1538		edma->hw_interrupt = m2m_hw_interrupt;
1539	} else {
1540		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1541
1542		edma->hw_synchronize = m2p_hw_synchronize;
1543		edma->hw_setup = m2p_hw_setup;
1544		edma->hw_shutdown = m2p_hw_shutdown;
1545		edma->hw_submit = m2p_hw_submit;
1546		edma->hw_interrupt = m2p_hw_interrupt;
1547	}
1548
1549	ret = dma_async_device_register(dma_dev);
1550	if (ret)
1551		return ret;
1552
1553	if (edma->m2m) {
1554		ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2m_dma_of_xlate,
1555						 edma);
 
1556	} else {
1557		ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2p_dma_of_xlate,
1558						 edma);
1559	}
1560	if (ret)
1561		goto err_dma_unregister;
1562
1563	dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", edma->m2m ? "M" : "P");
1564
1565	return 0;
1566
1567err_dma_unregister:
1568	dma_async_device_unregister(dma_dev);
1569
1570	return ret;
1571}
1572
1573static const struct ep93xx_edma_data edma_m2p = {
1574	.id = M2P_DMA,
1575	.num_channels = 10,
1576};
1577
1578static const struct ep93xx_edma_data edma_m2m = {
1579	.id = M2M_DMA,
1580	.num_channels = 2,
1581};
1582
1583static const struct of_device_id ep93xx_dma_of_ids[] = {
1584	{ .compatible = "cirrus,ep9301-dma-m2p", .data = &edma_m2p },
1585	{ .compatible = "cirrus,ep9301-dma-m2m", .data = &edma_m2m },
1586	{ /* sentinel */ }
1587};
1588MODULE_DEVICE_TABLE(of, ep93xx_dma_of_ids);
1589
1590static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1591	{ "ep93xx-dma-m2p", 0 },
1592	{ "ep93xx-dma-m2m", 1 },
1593	{ },
1594};
1595
1596static struct platform_driver ep93xx_dma_driver = {
1597	.driver		= {
1598		.name	= "ep93xx-dma",
1599		.of_match_table = ep93xx_dma_of_ids,
1600	},
1601	.id_table	= ep93xx_dma_driver_ids,
1602	.probe		= ep93xx_dma_probe,
1603};
1604
1605module_platform_driver(ep93xx_dma_driver);
 
 
 
 
1606
1607MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1608MODULE_DESCRIPTION("EP93xx DMA driver");