Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * drivers/dma/imx-sdma.c
   3 *
   4 * This file contains a driver for the Freescale Smart DMA engine
   5 *
   6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   7 *
   8 * Based on code from Freescale:
   9 *
  10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
  11 *
  12 * The code contained herein is licensed under the GNU General Public
  13 * License. You may obtain a copy of the GNU General Public License
  14 * Version 2 or later at the following locations:
  15 *
  16 * http://www.opensource.org/licenses/gpl-license.html
  17 * http://www.gnu.org/copyleft/gpl.html
  18 */
  19
  20#include <linux/init.h>
 
  21#include <linux/types.h>
 
  22#include <linux/mm.h>
  23#include <linux/interrupt.h>
  24#include <linux/clk.h>
  25#include <linux/wait.h>
  26#include <linux/sched.h>
  27#include <linux/semaphore.h>
  28#include <linux/spinlock.h>
  29#include <linux/device.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/firmware.h>
  32#include <linux/slab.h>
  33#include <linux/platform_device.h>
  34#include <linux/dmaengine.h>
  35#include <linux/of.h>
 
  36#include <linux/of_device.h>
 
  37
  38#include <asm/irq.h>
  39#include <mach/sdma.h>
  40#include <mach/dma.h>
  41#include <mach/hardware.h>
 
 
 
 
  42
  43/* SDMA registers */
  44#define SDMA_H_C0PTR		0x000
  45#define SDMA_H_INTR		0x004
  46#define SDMA_H_STATSTOP		0x008
  47#define SDMA_H_START		0x00c
  48#define SDMA_H_EVTOVR		0x010
  49#define SDMA_H_DSPOVR		0x014
  50#define SDMA_H_HOSTOVR		0x018
  51#define SDMA_H_EVTPEND		0x01c
  52#define SDMA_H_DSPENBL		0x020
  53#define SDMA_H_RESET		0x024
  54#define SDMA_H_EVTERR		0x028
  55#define SDMA_H_INTRMSK		0x02c
  56#define SDMA_H_PSW		0x030
  57#define SDMA_H_EVTERRDBG	0x034
  58#define SDMA_H_CONFIG		0x038
  59#define SDMA_ONCE_ENB		0x040
  60#define SDMA_ONCE_DATA		0x044
  61#define SDMA_ONCE_INSTR		0x048
  62#define SDMA_ONCE_STAT		0x04c
  63#define SDMA_ONCE_CMD		0x050
  64#define SDMA_EVT_MIRROR		0x054
  65#define SDMA_ILLINSTADDR	0x058
  66#define SDMA_CHN0ADDR		0x05c
  67#define SDMA_ONCE_RTB		0x060
  68#define SDMA_XTRIG_CONF1	0x070
  69#define SDMA_XTRIG_CONF2	0x074
  70#define SDMA_CHNENBL0_IMX35	0x200
  71#define SDMA_CHNENBL0_IMX31	0x080
  72#define SDMA_CHNPRI_0		0x100
  73
  74/*
  75 * Buffer descriptor status values.
  76 */
  77#define BD_DONE  0x01
  78#define BD_WRAP  0x02
  79#define BD_CONT  0x04
  80#define BD_INTR  0x08
  81#define BD_RROR  0x10
  82#define BD_LAST  0x20
  83#define BD_EXTD  0x80
  84
  85/*
  86 * Data Node descriptor status values.
  87 */
  88#define DND_END_OF_FRAME  0x80
  89#define DND_END_OF_XFER   0x40
  90#define DND_DONE          0x20
  91#define DND_UNUSED        0x01
  92
  93/*
  94 * IPCV2 descriptor status values.
  95 */
  96#define BD_IPCV2_END_OF_FRAME  0x40
  97
  98#define IPCV2_MAX_NODES        50
  99/*
 100 * Error bit set in the CCB status field by the SDMA,
 101 * in setbd routine, in case of a transfer error
 102 */
 103#define DATA_ERROR  0x10000000
 104
 105/*
 106 * Buffer descriptor commands.
 107 */
 108#define C0_ADDR             0x01
 109#define C0_LOAD             0x02
 110#define C0_DUMP             0x03
 111#define C0_SETCTX           0x07
 112#define C0_GETCTX           0x03
 113#define C0_SETDM            0x01
 114#define C0_SETPM            0x04
 115#define C0_GETDM            0x02
 116#define C0_GETPM            0x08
 117/*
 118 * Change endianness indicator in the BD command field
 119 */
 120#define CHANGE_ENDIANNESS   0x80
 121
 122/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123 * Mode/Count of data node descriptors - IPCv2
 124 */
 125struct sdma_mode_count {
 126	u32 count   : 16; /* size of the buffer pointed by this BD */
 127	u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
 128	u32 command :  8; /* command mostlky used for channel 0 */
 129};
 130
 131/*
 132 * Buffer descriptor
 133 */
 134struct sdma_buffer_descriptor {
 135	struct sdma_mode_count  mode;
 136	u32 buffer_addr;	/* address of the buffer described */
 137	u32 ext_buffer_addr;	/* extended buffer address */
 138} __attribute__ ((packed));
 139
 140/**
 141 * struct sdma_channel_control - Channel control Block
 142 *
 143 * @current_bd_ptr	current buffer descriptor processed
 144 * @base_bd_ptr		first element of buffer descriptor array
 145 * @unused		padding. The SDMA engine expects an array of 128 byte
 146 *			control blocks
 147 */
 148struct sdma_channel_control {
 149	u32 current_bd_ptr;
 150	u32 base_bd_ptr;
 151	u32 unused[2];
 152} __attribute__ ((packed));
 153
 154/**
 155 * struct sdma_state_registers - SDMA context for a channel
 156 *
 157 * @pc:		program counter
 158 * @t:		test bit: status of arithmetic & test instruction
 159 * @rpc:	return program counter
 160 * @sf:		source fault while loading data
 161 * @spc:	loop start program counter
 162 * @df:		destination fault while storing data
 163 * @epc:	loop end program counter
 164 * @lm:		loop mode
 165 */
 166struct sdma_state_registers {
 167	u32 pc     :14;
 168	u32 unused1: 1;
 169	u32 t      : 1;
 170	u32 rpc    :14;
 171	u32 unused0: 1;
 172	u32 sf     : 1;
 173	u32 spc    :14;
 174	u32 unused2: 1;
 175	u32 df     : 1;
 176	u32 epc    :14;
 177	u32 lm     : 2;
 178} __attribute__ ((packed));
 179
 180/**
 181 * struct sdma_context_data - sdma context specific to a channel
 182 *
 183 * @channel_state:	channel state bits
 184 * @gReg:		general registers
 185 * @mda:		burst dma destination address register
 186 * @msa:		burst dma source address register
 187 * @ms:			burst dma status register
 188 * @md:			burst dma data register
 189 * @pda:		peripheral dma destination address register
 190 * @psa:		peripheral dma source address register
 191 * @ps:			peripheral dma status register
 192 * @pd:			peripheral dma data register
 193 * @ca:			CRC polynomial register
 194 * @cs:			CRC accumulator register
 195 * @dda:		dedicated core destination address register
 196 * @dsa:		dedicated core source address register
 197 * @ds:			dedicated core status register
 198 * @dd:			dedicated core data register
 199 */
 200struct sdma_context_data {
 201	struct sdma_state_registers  channel_state;
 202	u32  gReg[8];
 203	u32  mda;
 204	u32  msa;
 205	u32  ms;
 206	u32  md;
 207	u32  pda;
 208	u32  psa;
 209	u32  ps;
 210	u32  pd;
 211	u32  ca;
 212	u32  cs;
 213	u32  dda;
 214	u32  dsa;
 215	u32  ds;
 216	u32  dd;
 217	u32  scratch0;
 218	u32  scratch1;
 219	u32  scratch2;
 220	u32  scratch3;
 221	u32  scratch4;
 222	u32  scratch5;
 223	u32  scratch6;
 224	u32  scratch7;
 225} __attribute__ ((packed));
 226
 227#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
 228
 229struct sdma_engine;
 230
 231/**
 232 * struct sdma_channel - housekeeping for a SDMA channel
 233 *
 234 * @sdma		pointer to the SDMA engine for this channel
 235 * @channel		the channel number, matches dmaengine chan_id + 1
 236 * @direction		transfer type. Needed for setting SDMA script
 237 * @peripheral_type	Peripheral type. Needed for setting SDMA script
 238 * @event_id0		aka dma request line
 239 * @event_id1		for channels that use 2 events
 240 * @word_size		peripheral access size
 241 * @buf_tail		ID of the buffer that was processed
 242 * @done		channel completion
 243 * @num_bd		max NUM_BD. number of descriptors currently handling
 244 */
 245struct sdma_channel {
 246	struct sdma_engine		*sdma;
 247	unsigned int			channel;
 248	enum dma_data_direction		direction;
 249	enum sdma_peripheral_type	peripheral_type;
 250	unsigned int			event_id0;
 251	unsigned int			event_id1;
 252	enum dma_slave_buswidth		word_size;
 253	unsigned int			buf_tail;
 254	struct completion		done;
 255	unsigned int			num_bd;
 
 256	struct sdma_buffer_descriptor	*bd;
 257	dma_addr_t			bd_phys;
 258	unsigned int			pc_from_device, pc_to_device;
 
 259	unsigned long			flags;
 260	dma_addr_t			per_address;
 261	u32				event_mask0, event_mask1;
 262	u32				watermark_level;
 263	u32				shp_addr, per_addr;
 264	struct dma_chan			chan;
 265	spinlock_t			lock;
 266	struct dma_async_tx_descriptor	desc;
 267	dma_cookie_t			last_completed;
 268	enum dma_status			status;
 
 
 
 
 269};
 270
 271#define IMX_DMA_SG_LOOP		(1 << 0)
 272
 273#define MAX_DMA_CHANNELS 32
 274#define MXC_SDMA_DEFAULT_PRIORITY 1
 275#define MXC_SDMA_MIN_PRIORITY 1
 276#define MXC_SDMA_MAX_PRIORITY 7
 277
 278#define SDMA_FIRMWARE_MAGIC 0x414d4453
 279
 280/**
 281 * struct sdma_firmware_header - Layout of the firmware image
 282 *
 283 * @magic		"SDMA"
 284 * @version_major	increased whenever layout of struct sdma_script_start_addrs
 285 *			changes.
 286 * @version_minor	firmware minor version (for binary compatible changes)
 287 * @script_addrs_start	offset of struct sdma_script_start_addrs in this image
 288 * @num_script_addrs	Number of script addresses in this image
 289 * @ram_code_start	offset of SDMA ram image in this firmware image
 290 * @ram_code_size	size of SDMA ram image
 291 * @script_addrs	Stores the start address of the SDMA scripts
 292 *			(in SDMA memory space)
 293 */
 294struct sdma_firmware_header {
 295	u32	magic;
 296	u32	version_major;
 297	u32	version_minor;
 298	u32	script_addrs_start;
 299	u32	num_script_addrs;
 300	u32	ram_code_start;
 301	u32	ram_code_size;
 302};
 303
 304enum sdma_devtype {
 305	IMX31_SDMA,	/* runs on i.mx31 */
 306	IMX35_SDMA,	/* runs on i.mx35 and later */
 
 307};
 308
 309struct sdma_engine {
 310	struct device			*dev;
 311	struct device_dma_parameters	dma_parms;
 312	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 313	struct sdma_channel_control	*channel_control;
 314	void __iomem			*regs;
 315	enum sdma_devtype		devtype;
 316	unsigned int			num_events;
 317	struct sdma_context_data	*context;
 318	dma_addr_t			context_phys;
 319	struct dma_device		dma_device;
 320	struct clk			*clk;
 
 
 
 321	struct sdma_script_start_addrs	*script_addrs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 322};
 323
 324static struct platform_device_id sdma_devtypes[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 325	{
 
 
 
 326		.name = "imx31-sdma",
 327		.driver_data = IMX31_SDMA,
 328	}, {
 329		.name = "imx35-sdma",
 330		.driver_data = IMX35_SDMA,
 
 
 
 
 
 
 
 
 
 331	}, {
 332		/* sentinel */
 333	}
 334};
 335MODULE_DEVICE_TABLE(platform, sdma_devtypes);
 336
 337static const struct of_device_id sdma_dt_ids[] = {
 338	{ .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
 339	{ .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
 
 
 
 
 340	{ /* sentinel */ }
 341};
 342MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 343
 344#define SDMA_H_CONFIG_DSPDMA	(1 << 12) /* indicates if the DSPDMA is used */
 345#define SDMA_H_CONFIG_RTD_PINS	(1 << 11) /* indicates if Real-Time Debug pins are enabled */
 346#define SDMA_H_CONFIG_ACR	(1 << 4)  /* indicates if AHB freq /core freq = 2 or 1 */
 347#define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
 348
 349static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 350{
 351	u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
 352						      SDMA_CHNENBL0_IMX35);
 353	return chnenbl0 + event * 4;
 354}
 355
 356static int sdma_config_ownership(struct sdma_channel *sdmac,
 357		bool event_override, bool mcu_override, bool dsp_override)
 358{
 359	struct sdma_engine *sdma = sdmac->sdma;
 360	int channel = sdmac->channel;
 361	u32 evt, mcu, dsp;
 362
 363	if (event_override && mcu_override && dsp_override)
 364		return -EINVAL;
 365
 366	evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
 367	mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
 368	dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
 369
 370	if (dsp_override)
 371		dsp &= ~(1 << channel);
 372	else
 373		dsp |= (1 << channel);
 374
 375	if (event_override)
 376		evt &= ~(1 << channel);
 377	else
 378		evt |= (1 << channel);
 379
 380	if (mcu_override)
 381		mcu &= ~(1 << channel);
 382	else
 383		mcu |= (1 << channel);
 384
 385	__raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
 386	__raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
 387	__raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
 388
 389	return 0;
 390}
 391
 
 
 
 
 
 392/*
 393 * sdma_run_channel - run a channel and wait till it's done
 394 */
 395static int sdma_run_channel(struct sdma_channel *sdmac)
 396{
 397	struct sdma_engine *sdma = sdmac->sdma;
 398	int channel = sdmac->channel;
 399	int ret;
 
 400
 401	init_completion(&sdmac->done);
 402
 403	__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
 
 
 
 
 404
 405	ret = wait_for_completion_timeout(&sdmac->done, HZ);
 
 
 
 
 
 
 
 
 
 406
 407	return ret ? 0 : -ETIMEDOUT;
 408}
 409
 410static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 411		u32 address)
 412{
 413	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
 414	void *buf_virt;
 415	dma_addr_t buf_phys;
 416	int ret;
 
 417
 418	buf_virt = dma_alloc_coherent(NULL,
 419			size,
 420			&buf_phys, GFP_KERNEL);
 421	if (!buf_virt)
 422		return -ENOMEM;
 
 
 
 423
 424	bd0->mode.command = C0_SETPM;
 425	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
 426	bd0->mode.count = size / 2;
 427	bd0->buffer_addr = buf_phys;
 428	bd0->ext_buffer_addr = address;
 429
 430	memcpy(buf_virt, buf, size);
 431
 432	ret = sdma_run_channel(&sdma->channel[0]);
 
 
 433
 434	dma_free_coherent(NULL, size, buf_virt, buf_phys);
 435
 436	return ret;
 437}
 438
 439static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 440{
 441	struct sdma_engine *sdma = sdmac->sdma;
 442	int channel = sdmac->channel;
 443	u32 val;
 444	u32 chnenbl = chnenbl_ofs(sdma, event);
 445
 446	val = __raw_readl(sdma->regs + chnenbl);
 447	val |= (1 << channel);
 448	__raw_writel(val, sdma->regs + chnenbl);
 449}
 450
 451static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 452{
 453	struct sdma_engine *sdma = sdmac->sdma;
 454	int channel = sdmac->channel;
 455	u32 chnenbl = chnenbl_ofs(sdma, event);
 456	u32 val;
 457
 458	val = __raw_readl(sdma->regs + chnenbl);
 459	val &= ~(1 << channel);
 460	__raw_writel(val, sdma->regs + chnenbl);
 461}
 462
 463static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
 464{
 
 
 
 
 
 
 465	struct sdma_buffer_descriptor *bd;
 466
 467	/*
 468	 * loop mode. Iterate over descriptors, re-setup them and
 469	 * call callback function.
 470	 */
 471	while (1) {
 472		bd = &sdmac->bd[sdmac->buf_tail];
 473
 474		if (bd->mode.status & BD_DONE)
 475			break;
 476
 477		if (bd->mode.status & BD_RROR)
 478			sdmac->status = DMA_ERROR;
 479		else
 480			sdmac->status = DMA_IN_PROGRESS;
 481
 482		bd->mode.status |= BD_DONE;
 483		sdmac->buf_tail++;
 484		sdmac->buf_tail %= sdmac->num_bd;
 485
 486		if (sdmac->desc.callback)
 487			sdmac->desc.callback(sdmac->desc.callback_param);
 488	}
 489}
 490
 491static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
 492{
 493	struct sdma_buffer_descriptor *bd;
 494	int i, error = 0;
 495
 
 496	/*
 497	 * non loop mode. Iterate over all descriptors, collect
 498	 * errors and call callback function
 499	 */
 500	for (i = 0; i < sdmac->num_bd; i++) {
 501		bd = &sdmac->bd[i];
 502
 503		 if (bd->mode.status & (BD_DONE | BD_RROR))
 504			error = -EIO;
 
 505	}
 506
 507	if (error)
 508		sdmac->status = DMA_ERROR;
 509	else
 510		sdmac->status = DMA_SUCCESS;
 511
 
 512	if (sdmac->desc.callback)
 513		sdmac->desc.callback(sdmac->desc.callback_param);
 514	sdmac->last_completed = sdmac->desc.cookie;
 515}
 516
 517static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
 518{
 519	complete(&sdmac->done);
 520
 521	/* not interested in channel 0 interrupts */
 522	if (sdmac->channel == 0)
 523		return;
 524
 525	if (sdmac->flags & IMX_DMA_SG_LOOP)
 526		sdma_handle_channel_loop(sdmac);
 527	else
 528		mxc_sdma_handle_channel_normal(sdmac);
 529}
 530
 531static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 532{
 533	struct sdma_engine *sdma = dev_id;
 534	u32 stat;
 535
 536	stat = __raw_readl(sdma->regs + SDMA_H_INTR);
 537	__raw_writel(stat, sdma->regs + SDMA_H_INTR);
 
 
 538
 539	while (stat) {
 540		int channel = fls(stat) - 1;
 541		struct sdma_channel *sdmac = &sdma->channel[channel];
 542
 543		mxc_sdma_handle_channel(sdmac);
 
 544
 545		stat &= ~(1 << channel);
 
 
 546	}
 547
 548	return IRQ_HANDLED;
 549}
 550
 551/*
 552 * sets the pc of SDMA script according to the peripheral type
 553 */
 554static void sdma_get_pc(struct sdma_channel *sdmac,
 555		enum sdma_peripheral_type peripheral_type)
 556{
 557	struct sdma_engine *sdma = sdmac->sdma;
 558	int per_2_emi = 0, emi_2_per = 0;
 559	/*
 560	 * These are needed once we start to support transfers between
 561	 * two peripherals or memory-to-memory transfers
 562	 */
 563	int per_2_per = 0, emi_2_emi = 0;
 564
 565	sdmac->pc_from_device = 0;
 566	sdmac->pc_to_device = 0;
 
 567
 568	switch (peripheral_type) {
 569	case IMX_DMATYPE_MEMORY:
 570		emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
 571		break;
 572	case IMX_DMATYPE_DSP:
 573		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
 574		per_2_emi = sdma->script_addrs->ap_2_bp_addr;
 575		break;
 576	case IMX_DMATYPE_FIRI:
 577		per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
 578		emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
 579		break;
 580	case IMX_DMATYPE_UART:
 581		per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
 582		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 583		break;
 584	case IMX_DMATYPE_UART_SP:
 585		per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
 586		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 587		break;
 588	case IMX_DMATYPE_ATA:
 589		per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
 590		emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
 591		break;
 592	case IMX_DMATYPE_CSPI:
 593	case IMX_DMATYPE_EXT:
 594	case IMX_DMATYPE_SSI:
 
 595		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
 596		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 597		break;
 
 
 
 
 598	case IMX_DMATYPE_SSI_SP:
 599	case IMX_DMATYPE_MMC:
 600	case IMX_DMATYPE_SDHC:
 601	case IMX_DMATYPE_CSPI_SP:
 602	case IMX_DMATYPE_ESAI:
 603	case IMX_DMATYPE_MSHC_SP:
 604		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
 605		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 606		break;
 607	case IMX_DMATYPE_ASRC:
 608		per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
 609		emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
 610		per_2_per = sdma->script_addrs->per_2_per_addr;
 611		break;
 
 
 
 
 
 612	case IMX_DMATYPE_MSHC:
 613		per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
 614		emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
 615		break;
 616	case IMX_DMATYPE_CCM:
 617		per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
 618		break;
 619	case IMX_DMATYPE_SPDIF:
 620		per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
 621		emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
 622		break;
 623	case IMX_DMATYPE_IPU_MEMORY:
 624		emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
 625		break;
 626	default:
 627		break;
 628	}
 629
 630	sdmac->pc_from_device = per_2_emi;
 631	sdmac->pc_to_device = emi_2_per;
 
 632}
 633
 634static int sdma_load_context(struct sdma_channel *sdmac)
 635{
 636	struct sdma_engine *sdma = sdmac->sdma;
 637	int channel = sdmac->channel;
 638	int load_address;
 639	struct sdma_context_data *context = sdma->context;
 640	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
 641	int ret;
 
 642
 643	if (sdmac->direction == DMA_FROM_DEVICE) {
 644		load_address = sdmac->pc_from_device;
 645	} else {
 
 
 646		load_address = sdmac->pc_to_device;
 647	}
 648
 649	if (load_address < 0)
 650		return load_address;
 651
 652	dev_dbg(sdma->dev, "load_address = %d\n", load_address);
 653	dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
 654	dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
 655	dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
 656	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
 657	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
 
 
 658
 659	memset(context, 0, sizeof(*context));
 660	context->channel_state.pc = load_address;
 661
 662	/* Send by context the event mask,base address for peripheral
 663	 * and watermark level
 664	 */
 665	context->gReg[0] = sdmac->event_mask1;
 666	context->gReg[1] = sdmac->event_mask0;
 667	context->gReg[2] = sdmac->per_addr;
 668	context->gReg[6] = sdmac->shp_addr;
 669	context->gReg[7] = sdmac->watermark_level;
 670
 671	bd0->mode.command = C0_SETDM;
 672	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
 673	bd0->mode.count = sizeof(*context) / 4;
 674	bd0->buffer_addr = sdma->context_phys;
 675	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
 
 676
 677	ret = sdma_run_channel(&sdma->channel[0]);
 678
 679	return ret;
 680}
 681
 682static void sdma_disable_channel(struct sdma_channel *sdmac)
 
 
 
 
 
 683{
 
 684	struct sdma_engine *sdma = sdmac->sdma;
 685	int channel = sdmac->channel;
 686
 687	__raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
 688	sdmac->status = DMA_ERROR;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 689}
 690
 691static int sdma_config_channel(struct sdma_channel *sdmac)
 692{
 
 693	int ret;
 694
 695	sdma_disable_channel(sdmac);
 696
 697	sdmac->event_mask0 = 0;
 698	sdmac->event_mask1 = 0;
 699	sdmac->shp_addr = 0;
 700	sdmac->per_addr = 0;
 701
 702	if (sdmac->event_id0) {
 703		if (sdmac->event_id0 > 32)
 704			return -EINVAL;
 705		sdma_event_enable(sdmac, sdmac->event_id0);
 706	}
 707
 
 
 
 
 
 
 708	switch (sdmac->peripheral_type) {
 709	case IMX_DMATYPE_DSP:
 710		sdma_config_ownership(sdmac, false, true, true);
 711		break;
 712	case IMX_DMATYPE_MEMORY:
 713		sdma_config_ownership(sdmac, false, true, false);
 714		break;
 715	default:
 716		sdma_config_ownership(sdmac, true, true, false);
 717		break;
 718	}
 719
 720	sdma_get_pc(sdmac, sdmac->peripheral_type);
 721
 722	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
 723			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
 724		/* Handle multiple event channels differently */
 725		if (sdmac->event_id1) {
 726			sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
 727			if (sdmac->event_id1 > 31)
 728				sdmac->watermark_level |= 1 << 31;
 729			sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
 730			if (sdmac->event_id0 > 31)
 731				sdmac->watermark_level |= 1 << 30;
 732		} else {
 733			sdmac->event_mask0 = 1 << sdmac->event_id0;
 734			sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
 735		}
 736		/* Watermark Level */
 737		sdmac->watermark_level |= sdmac->watermark_level;
 738		/* Address */
 739		sdmac->shp_addr = sdmac->per_address;
 
 740	} else {
 741		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
 742	}
 743
 744	ret = sdma_load_context(sdmac);
 745
 746	return ret;
 747}
 748
 749static int sdma_set_channel_priority(struct sdma_channel *sdmac,
 750		unsigned int priority)
 751{
 752	struct sdma_engine *sdma = sdmac->sdma;
 753	int channel = sdmac->channel;
 754
 755	if (priority < MXC_SDMA_MIN_PRIORITY
 756	    || priority > MXC_SDMA_MAX_PRIORITY) {
 757		return -EINVAL;
 758	}
 759
 760	__raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
 761
 762	return 0;
 763}
 764
 765static int sdma_request_channel(struct sdma_channel *sdmac)
 766{
 767	struct sdma_engine *sdma = sdmac->sdma;
 768	int channel = sdmac->channel;
 769	int ret = -EBUSY;
 770
 771	sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
 
 772	if (!sdmac->bd) {
 773		ret = -ENOMEM;
 774		goto out;
 775	}
 776
 777	memset(sdmac->bd, 0, PAGE_SIZE);
 778
 779	sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
 780	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 781
 782	clk_enable(sdma->clk);
 783
 784	sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
 785
 786	init_completion(&sdmac->done);
 787
 788	sdmac->buf_tail = 0;
 789
 790	return 0;
 791out:
 792
 793	return ret;
 794}
 795
 796static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 797{
 798	__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
 799}
 800
 801static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
 802{
 803	dma_cookie_t cookie = sdmac->chan.cookie;
 804
 805	if (++cookie < 0)
 806		cookie = 1;
 807
 808	sdmac->chan.cookie = cookie;
 809	sdmac->desc.cookie = cookie;
 810
 811	return cookie;
 812}
 813
 814static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 815{
 816	return container_of(chan, struct sdma_channel, chan);
 817}
 818
 819static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 820{
 
 821	struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
 822	struct sdma_engine *sdma = sdmac->sdma;
 823	dma_cookie_t cookie;
 824
 825	spin_lock_irq(&sdmac->lock);
 826
 827	cookie = sdma_assign_cookie(sdmac);
 828
 829	sdma_enable_channel(sdma, sdmac->channel);
 830
 831	spin_unlock_irq(&sdmac->lock);
 832
 833	return cookie;
 834}
 835
 836static int sdma_alloc_chan_resources(struct dma_chan *chan)
 837{
 838	struct sdma_channel *sdmac = to_sdma_chan(chan);
 839	struct imx_dma_data *data = chan->private;
 840	int prio, ret;
 841
 842	if (!data)
 843		return -EINVAL;
 844
 845	switch (data->priority) {
 846	case DMA_PRIO_HIGH:
 847		prio = 3;
 848		break;
 849	case DMA_PRIO_MEDIUM:
 850		prio = 2;
 851		break;
 852	case DMA_PRIO_LOW:
 853	default:
 854		prio = 1;
 855		break;
 856	}
 857
 858	sdmac->peripheral_type = data->peripheral_type;
 859	sdmac->event_id0 = data->dma_request;
 860	ret = sdma_set_channel_priority(sdmac, prio);
 
 
 861	if (ret)
 862		return ret;
 
 
 
 863
 864	ret = sdma_request_channel(sdmac);
 865	if (ret)
 866		return ret;
 
 
 
 
 867
 868	dma_async_tx_descriptor_init(&sdmac->desc, chan);
 869	sdmac->desc.tx_submit = sdma_tx_submit;
 870	/* txd.flags will be overwritten in prep funcs */
 871	sdmac->desc.flags = DMA_CTRL_ACK;
 872
 873	return 0;
 
 
 
 
 
 
 874}
 875
 876static void sdma_free_chan_resources(struct dma_chan *chan)
 877{
 878	struct sdma_channel *sdmac = to_sdma_chan(chan);
 879	struct sdma_engine *sdma = sdmac->sdma;
 880
 881	sdma_disable_channel(sdmac);
 882
 883	if (sdmac->event_id0)
 884		sdma_event_disable(sdmac, sdmac->event_id0);
 885	if (sdmac->event_id1)
 886		sdma_event_disable(sdmac, sdmac->event_id1);
 887
 888	sdmac->event_id0 = 0;
 889	sdmac->event_id1 = 0;
 890
 891	sdma_set_channel_priority(sdmac, 0);
 892
 893	dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
 894
 895	clk_disable(sdma->clk);
 
 896}
 897
 898static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 899		struct dma_chan *chan, struct scatterlist *sgl,
 900		unsigned int sg_len, enum dma_data_direction direction,
 901		unsigned long flags)
 902{
 903	struct sdma_channel *sdmac = to_sdma_chan(chan);
 904	struct sdma_engine *sdma = sdmac->sdma;
 905	int ret, i, count;
 906	int channel = sdmac->channel;
 907	struct scatterlist *sg;
 908
 909	if (sdmac->status == DMA_IN_PROGRESS)
 910		return NULL;
 911	sdmac->status = DMA_IN_PROGRESS;
 912
 913	sdmac->flags = 0;
 914
 
 
 915	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
 916			sg_len, channel);
 917
 918	sdmac->direction = direction;
 919	ret = sdma_load_context(sdmac);
 920	if (ret)
 921		goto err_out;
 922
 923	if (sg_len > NUM_BD) {
 924		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
 925				channel, sg_len, NUM_BD);
 926		ret = -EINVAL;
 927		goto err_out;
 928	}
 929
 
 930	for_each_sg(sgl, sg, sg_len, i) {
 931		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
 932		int param;
 933
 934		bd->buffer_addr = sg->dma_address;
 935
 936		count = sg->length;
 937
 938		if (count > 0xffff) {
 939			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
 940					channel, count, 0xffff);
 941			ret = -EINVAL;
 942			goto err_out;
 943		}
 944
 945		bd->mode.count = count;
 
 946
 947		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
 948			ret =  -EINVAL;
 949			goto err_out;
 950		}
 951
 952		switch (sdmac->word_size) {
 953		case DMA_SLAVE_BUSWIDTH_4_BYTES:
 954			bd->mode.command = 0;
 955			if (count & 3 || sg->dma_address & 3)
 956				return NULL;
 957			break;
 958		case DMA_SLAVE_BUSWIDTH_2_BYTES:
 959			bd->mode.command = 2;
 960			if (count & 1 || sg->dma_address & 1)
 961				return NULL;
 962			break;
 963		case DMA_SLAVE_BUSWIDTH_1_BYTE:
 964			bd->mode.command = 1;
 965			break;
 966		default:
 967			return NULL;
 968		}
 969
 970		param = BD_DONE | BD_EXTD | BD_CONT;
 971
 972		if (i + 1 == sg_len) {
 973			param |= BD_INTR;
 974			param |= BD_LAST;
 975			param &= ~BD_CONT;
 976		}
 977
 978		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
 979				i, count, sg->dma_address,
 980				param & BD_WRAP ? "wrap" : "",
 981				param & BD_INTR ? " intr" : "");
 982
 983		bd->mode.status = param;
 984	}
 985
 986	sdmac->num_bd = sg_len;
 987	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 988
 989	return &sdmac->desc;
 990err_out:
 991	sdmac->status = DMA_ERROR;
 992	return NULL;
 993}
 994
 995static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 996		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
 997		size_t period_len, enum dma_data_direction direction)
 
 998{
 999	struct sdma_channel *sdmac = to_sdma_chan(chan);
1000	struct sdma_engine *sdma = sdmac->sdma;
1001	int num_periods = buf_len / period_len;
1002	int channel = sdmac->channel;
1003	int ret, i = 0, buf = 0;
1004
1005	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1006
1007	if (sdmac->status == DMA_IN_PROGRESS)
1008		return NULL;
1009
1010	sdmac->status = DMA_IN_PROGRESS;
1011
 
 
 
1012	sdmac->flags |= IMX_DMA_SG_LOOP;
1013	sdmac->direction = direction;
1014	ret = sdma_load_context(sdmac);
1015	if (ret)
1016		goto err_out;
1017
1018	if (num_periods > NUM_BD) {
1019		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1020				channel, num_periods, NUM_BD);
1021		goto err_out;
1022	}
1023
1024	if (period_len > 0xffff) {
1025		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1026				channel, period_len, 0xffff);
1027		goto err_out;
1028	}
1029
1030	while (buf < buf_len) {
1031		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1032		int param;
1033
1034		bd->buffer_addr = dma_addr;
1035
1036		bd->mode.count = period_len;
1037
1038		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1039			goto err_out;
1040		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1041			bd->mode.command = 0;
1042		else
1043			bd->mode.command = sdmac->word_size;
1044
1045		param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1046		if (i + 1 == num_periods)
1047			param |= BD_WRAP;
1048
1049		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1050				i, period_len, dma_addr,
1051				param & BD_WRAP ? "wrap" : "",
1052				param & BD_INTR ? " intr" : "");
1053
1054		bd->mode.status = param;
1055
1056		dma_addr += period_len;
1057		buf += period_len;
1058
1059		i++;
1060	}
1061
1062	sdmac->num_bd = num_periods;
1063	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1064
1065	return &sdmac->desc;
1066err_out:
1067	sdmac->status = DMA_ERROR;
1068	return NULL;
1069}
1070
1071static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1072		unsigned long arg)
1073{
1074	struct sdma_channel *sdmac = to_sdma_chan(chan);
1075	struct dma_slave_config *dmaengine_cfg = (void *)arg;
1076
1077	switch (cmd) {
1078	case DMA_TERMINATE_ALL:
1079		sdma_disable_channel(sdmac);
1080		return 0;
1081	case DMA_SLAVE_CONFIG:
1082		if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
1083			sdmac->per_address = dmaengine_cfg->src_addr;
1084			sdmac->watermark_level = dmaengine_cfg->src_maxburst;
1085			sdmac->word_size = dmaengine_cfg->src_addr_width;
1086		} else {
1087			sdmac->per_address = dmaengine_cfg->dst_addr;
1088			sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
1089			sdmac->word_size = dmaengine_cfg->dst_addr_width;
1090		}
1091		return sdma_config_channel(sdmac);
1092	default:
1093		return -ENOSYS;
 
1094	}
1095
1096	return -EINVAL;
1097}
1098
1099static enum dma_status sdma_tx_status(struct dma_chan *chan,
1100					    dma_cookie_t cookie,
1101					    struct dma_tx_state *txstate)
1102{
1103	struct sdma_channel *sdmac = to_sdma_chan(chan);
1104	dma_cookie_t last_used;
1105
1106	last_used = chan->cookie;
 
 
 
1107
1108	dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
 
1109
1110	return sdmac->status;
1111}
1112
1113static void sdma_issue_pending(struct dma_chan *chan)
1114{
1115	/*
1116	 * Nothing to do. We only have a single descriptor
1117	 */
 
 
1118}
1119
1120#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
 
 
1121
1122static void sdma_add_scripts(struct sdma_engine *sdma,
1123		const struct sdma_script_start_addrs *addr)
1124{
1125	s32 *addr_arr = (u32 *)addr;
1126	s32 *saddr_arr = (u32 *)sdma->script_addrs;
1127	int i;
1128
1129	for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
 
 
 
 
1130		if (addr_arr[i] > 0)
1131			saddr_arr[i] = addr_arr[i];
1132}
1133
1134static int __init sdma_get_firmware(struct sdma_engine *sdma,
1135		const char *fw_name)
1136{
1137	const struct firmware *fw;
1138	const struct sdma_firmware_header *header;
1139	int ret;
1140	const struct sdma_script_start_addrs *addr;
1141	unsigned short *ram_code;
1142
1143	ret = request_firmware(&fw, fw_name, sdma->dev);
1144	if (ret)
1145		return ret;
 
 
1146
1147	if (fw->size < sizeof(*header))
1148		goto err_firmware;
1149
1150	header = (struct sdma_firmware_header *)fw->data;
1151
1152	if (header->magic != SDMA_FIRMWARE_MAGIC)
1153		goto err_firmware;
1154	if (header->ram_code_start + header->ram_code_size > fw->size)
1155		goto err_firmware;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1156
1157	addr = (void *)header + header->script_addrs_start;
1158	ram_code = (void *)header + header->ram_code_start;
1159
1160	clk_enable(sdma->clk);
 
1161	/* download the RAM image for SDMA */
1162	sdma_load_script(sdma, ram_code,
1163			header->ram_code_size,
1164			addr->ram_code_start_addr);
1165	clk_disable(sdma->clk);
 
1166
1167	sdma_add_scripts(sdma, addr);
1168
1169	dev_info(sdma->dev, "loaded firmware %d.%d\n",
1170			header->version_major,
1171			header->version_minor);
1172
1173err_firmware:
1174	release_firmware(fw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1175
1176	return ret;
1177}
1178
1179static int __init sdma_init(struct sdma_engine *sdma)
1180{
1181	int i, ret;
1182	dma_addr_t ccb_phys;
1183
1184	switch (sdma->devtype) {
1185	case IMX31_SDMA:
1186		sdma->num_events = 32;
1187		break;
1188	case IMX35_SDMA:
1189		sdma->num_events = 48;
1190		break;
1191	default:
1192		dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
1193			sdma->devtype);
1194		return -ENODEV;
1195	}
1196
1197	clk_enable(sdma->clk);
1198
1199	/* Be sure SDMA has not started yet */
1200	__raw_writel(0, sdma->regs + SDMA_H_C0PTR);
1201
1202	sdma->channel_control = dma_alloc_coherent(NULL,
1203			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1204			sizeof(struct sdma_context_data),
1205			&ccb_phys, GFP_KERNEL);
1206
1207	if (!sdma->channel_control) {
1208		ret = -ENOMEM;
1209		goto err_dma_alloc;
1210	}
1211
1212	sdma->context = (void *)sdma->channel_control +
1213		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1214	sdma->context_phys = ccb_phys +
1215		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1216
1217	/* Zero-out the CCB structures array just allocated */
1218	memset(sdma->channel_control, 0,
1219			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1220
1221	/* disable all channels */
1222	for (i = 0; i < sdma->num_events; i++)
1223		__raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
1224
1225	/* All channels have priority 0 */
1226	for (i = 0; i < MAX_DMA_CHANNELS; i++)
1227		__raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1228
1229	ret = sdma_request_channel(&sdma->channel[0]);
1230	if (ret)
1231		goto err_dma_alloc;
1232
1233	sdma_config_ownership(&sdma->channel[0], false, true, false);
1234
1235	/* Set Command Channel (Channel Zero) */
1236	__raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
1237
1238	/* Set bits of CONFIG register but with static context switching */
1239	/* FIXME: Check whether to set ACR bit depending on clock ratios */
1240	__raw_writel(0, sdma->regs + SDMA_H_CONFIG);
1241
1242	__raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1243
1244	/* Set bits of CONFIG register with given context switching mode */
1245	__raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1246
1247	/* Initializes channel's priorities */
1248	sdma_set_channel_priority(&sdma->channel[0], 7);
1249
1250	clk_disable(sdma->clk);
 
1251
1252	return 0;
1253
1254err_dma_alloc:
1255	clk_disable(sdma->clk);
 
 
1256	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1257	return ret;
1258}
1259
1260static int __init sdma_probe(struct platform_device *pdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1261{
1262	const struct of_device_id *of_id =
1263			of_match_device(sdma_dt_ids, &pdev->dev);
1264	struct device_node *np = pdev->dev.of_node;
 
1265	const char *fw_name;
1266	int ret;
1267	int irq;
1268	struct resource *iores;
1269	struct sdma_platform_data *pdata = pdev->dev.platform_data;
 
1270	int i;
1271	struct sdma_engine *sdma;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1272
1273	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1274	if (!sdma)
1275		return -ENOMEM;
1276
 
 
1277	sdma->dev = &pdev->dev;
 
1278
1279	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1280	irq = platform_get_irq(pdev, 0);
1281	if (!iores || irq < 0) {
1282		ret = -EINVAL;
1283		goto err_irq;
1284	}
1285
1286	if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1287		ret = -EBUSY;
1288		goto err_request_region;
1289	}
1290
1291	sdma->clk = clk_get(&pdev->dev, NULL);
1292	if (IS_ERR(sdma->clk)) {
1293		ret = PTR_ERR(sdma->clk);
1294		goto err_clk;
1295	}
 
 
 
 
 
 
 
1296
1297	sdma->regs = ioremap(iores->start, resource_size(iores));
1298	if (!sdma->regs) {
1299		ret = -ENOMEM;
1300		goto err_ioremap;
1301	}
1302
1303	ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
 
1304	if (ret)
1305		goto err_request_irq;
1306
1307	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1308	if (!sdma->script_addrs) {
1309		ret = -ENOMEM;
1310		goto err_alloc;
1311	}
1312
1313	if (of_id)
1314		pdev->id_entry = of_id->data;
1315	sdma->devtype = pdev->id_entry->driver_data;
 
1316
1317	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1318	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1319
1320	INIT_LIST_HEAD(&sdma->dma_device.channels);
1321	/* Initialize channel parameters */
1322	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1323		struct sdma_channel *sdmac = &sdma->channel[i];
1324
1325		sdmac->sdma = sdma;
1326		spin_lock_init(&sdmac->lock);
1327
1328		sdmac->chan.device = &sdma->dma_device;
 
1329		sdmac->channel = i;
1330
 
 
1331		/*
1332		 * Add the channel to the DMAC list. Do not add channel 0 though
1333		 * because we need it internally in the SDMA driver. This also means
1334		 * that channel 0 in dmaengine counting matches sdma channel 1.
1335		 */
1336		if (i)
1337			list_add_tail(&sdmac->chan.device_node,
1338					&sdma->dma_device.channels);
1339	}
1340
1341	ret = sdma_init(sdma);
1342	if (ret)
1343		goto err_init;
1344
 
 
 
 
 
 
1345	if (pdata && pdata->script_addrs)
1346		sdma_add_scripts(sdma, pdata->script_addrs);
1347
1348	if (pdata) {
1349		sdma_get_firmware(sdma, pdata->fw_name);
 
 
1350	} else {
1351		/*
1352		 * Because that device tree does not encode ROM script address,
1353		 * the RAM script in firmware is mandatory for device tree
1354		 * probe, otherwise it fails.
1355		 */
1356		ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1357					      &fw_name);
1358		if (ret) {
1359			dev_err(&pdev->dev, "failed to get firmware name\n");
1360			goto err_init;
1361		}
1362
1363		ret = sdma_get_firmware(sdma, fw_name);
1364		if (ret) {
1365			dev_err(&pdev->dev, "failed to get firmware\n");
1366			goto err_init;
1367		}
1368	}
1369
1370	sdma->dma_device.dev = &pdev->dev;
1371
1372	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1373	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1374	sdma->dma_device.device_tx_status = sdma_tx_status;
1375	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1376	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1377	sdma->dma_device.device_control = sdma_control;
 
 
 
 
 
1378	sdma->dma_device.device_issue_pending = sdma_issue_pending;
1379	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1380	dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1381
 
 
1382	ret = dma_async_device_register(&sdma->dma_device);
1383	if (ret) {
1384		dev_err(&pdev->dev, "unable to register\n");
1385		goto err_init;
1386	}
1387
1388	dev_info(sdma->dev, "initialized\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389
1390	return 0;
1391
 
 
1392err_init:
1393	kfree(sdma->script_addrs);
1394err_alloc:
1395	free_irq(irq, sdma);
1396err_request_irq:
1397	iounmap(sdma->regs);
1398err_ioremap:
1399	clk_put(sdma->clk);
1400err_clk:
1401	release_mem_region(iores->start, resource_size(iores));
1402err_request_region:
1403err_irq:
1404	kfree(sdma);
1405	return ret;
1406}
1407
1408static int __exit sdma_remove(struct platform_device *pdev)
1409{
1410	return -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
1411}
1412
1413static struct platform_driver sdma_driver = {
1414	.driver		= {
1415		.name	= "imx-sdma",
1416		.of_match_table = sdma_dt_ids,
1417	},
1418	.id_table	= sdma_devtypes,
1419	.remove		= __exit_p(sdma_remove),
 
1420};
1421
1422static int __init sdma_module_init(void)
1423{
1424	return platform_driver_probe(&sdma_driver, sdma_probe);
1425}
1426module_init(sdma_module_init);
1427
1428MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1429MODULE_DESCRIPTION("i.MX SDMA driver");
1430MODULE_LICENSE("GPL");
v4.6
   1/*
   2 * drivers/dma/imx-sdma.c
   3 *
   4 * This file contains a driver for the Freescale Smart DMA engine
   5 *
   6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   7 *
   8 * Based on code from Freescale:
   9 *
  10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
  11 *
  12 * The code contained herein is licensed under the GNU General Public
  13 * License. You may obtain a copy of the GNU General Public License
  14 * Version 2 or later at the following locations:
  15 *
  16 * http://www.opensource.org/licenses/gpl-license.html
  17 * http://www.gnu.org/copyleft/gpl.html
  18 */
  19
  20#include <linux/init.h>
  21#include <linux/module.h>
  22#include <linux/types.h>
  23#include <linux/bitops.h>
  24#include <linux/mm.h>
  25#include <linux/interrupt.h>
  26#include <linux/clk.h>
  27#include <linux/delay.h>
  28#include <linux/sched.h>
  29#include <linux/semaphore.h>
  30#include <linux/spinlock.h>
  31#include <linux/device.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/firmware.h>
  34#include <linux/slab.h>
  35#include <linux/platform_device.h>
  36#include <linux/dmaengine.h>
  37#include <linux/of.h>
  38#include <linux/of_address.h>
  39#include <linux/of_device.h>
  40#include <linux/of_dma.h>
  41
  42#include <asm/irq.h>
  43#include <linux/platform_data/dma-imx-sdma.h>
  44#include <linux/platform_data/dma-imx.h>
  45#include <linux/regmap.h>
  46#include <linux/mfd/syscon.h>
  47#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  48
  49#include "dmaengine.h"
  50
  51/* SDMA registers */
  52#define SDMA_H_C0PTR		0x000
  53#define SDMA_H_INTR		0x004
  54#define SDMA_H_STATSTOP		0x008
  55#define SDMA_H_START		0x00c
  56#define SDMA_H_EVTOVR		0x010
  57#define SDMA_H_DSPOVR		0x014
  58#define SDMA_H_HOSTOVR		0x018
  59#define SDMA_H_EVTPEND		0x01c
  60#define SDMA_H_DSPENBL		0x020
  61#define SDMA_H_RESET		0x024
  62#define SDMA_H_EVTERR		0x028
  63#define SDMA_H_INTRMSK		0x02c
  64#define SDMA_H_PSW		0x030
  65#define SDMA_H_EVTERRDBG	0x034
  66#define SDMA_H_CONFIG		0x038
  67#define SDMA_ONCE_ENB		0x040
  68#define SDMA_ONCE_DATA		0x044
  69#define SDMA_ONCE_INSTR		0x048
  70#define SDMA_ONCE_STAT		0x04c
  71#define SDMA_ONCE_CMD		0x050
  72#define SDMA_EVT_MIRROR		0x054
  73#define SDMA_ILLINSTADDR	0x058
  74#define SDMA_CHN0ADDR		0x05c
  75#define SDMA_ONCE_RTB		0x060
  76#define SDMA_XTRIG_CONF1	0x070
  77#define SDMA_XTRIG_CONF2	0x074
  78#define SDMA_CHNENBL0_IMX35	0x200
  79#define SDMA_CHNENBL0_IMX31	0x080
  80#define SDMA_CHNPRI_0		0x100
  81
  82/*
  83 * Buffer descriptor status values.
  84 */
  85#define BD_DONE  0x01
  86#define BD_WRAP  0x02
  87#define BD_CONT  0x04
  88#define BD_INTR  0x08
  89#define BD_RROR  0x10
  90#define BD_LAST  0x20
  91#define BD_EXTD  0x80
  92
  93/*
  94 * Data Node descriptor status values.
  95 */
  96#define DND_END_OF_FRAME  0x80
  97#define DND_END_OF_XFER   0x40
  98#define DND_DONE          0x20
  99#define DND_UNUSED        0x01
 100
 101/*
 102 * IPCV2 descriptor status values.
 103 */
 104#define BD_IPCV2_END_OF_FRAME  0x40
 105
 106#define IPCV2_MAX_NODES        50
 107/*
 108 * Error bit set in the CCB status field by the SDMA,
 109 * in setbd routine, in case of a transfer error
 110 */
 111#define DATA_ERROR  0x10000000
 112
 113/*
 114 * Buffer descriptor commands.
 115 */
 116#define C0_ADDR             0x01
 117#define C0_LOAD             0x02
 118#define C0_DUMP             0x03
 119#define C0_SETCTX           0x07
 120#define C0_GETCTX           0x03
 121#define C0_SETDM            0x01
 122#define C0_SETPM            0x04
 123#define C0_GETDM            0x02
 124#define C0_GETPM            0x08
 125/*
 126 * Change endianness indicator in the BD command field
 127 */
 128#define CHANGE_ENDIANNESS   0x80
 129
 130/*
 131 *  p_2_p watermark_level description
 132 *	Bits		Name			Description
 133 *	0-7		Lower WML		Lower watermark level
 134 *	8		PS			1: Pad Swallowing
 135 *						0: No Pad Swallowing
 136 *	9		PA			1: Pad Adding
 137 *						0: No Pad Adding
 138 *	10		SPDIF			If this bit is set both source
 139 *						and destination are on SPBA
 140 *	11		Source Bit(SP)		1: Source on SPBA
 141 *						0: Source on AIPS
 142 *	12		Destination Bit(DP)	1: Destination on SPBA
 143 *						0: Destination on AIPS
 144 *	13-15		---------		MUST BE 0
 145 *	16-23		Higher WML		HWML
 146 *	24-27		N			Total number of samples after
 147 *						which Pad adding/Swallowing
 148 *						must be done. It must be odd.
 149 *	28		Lower WML Event(LWE)	SDMA events reg to check for
 150 *						LWML event mask
 151 *						0: LWE in EVENTS register
 152 *						1: LWE in EVENTS2 register
 153 *	29		Higher WML Event(HWE)	SDMA events reg to check for
 154 *						HWML event mask
 155 *						0: HWE in EVENTS register
 156 *						1: HWE in EVENTS2 register
 157 *	30		---------		MUST BE 0
 158 *	31		CONT			1: Amount of samples to be
 159 *						transferred is unknown and
 160 *						script will keep on
 161 *						transferring samples as long as
 162 *						both events are detected and
 163 *						script must be manually stopped
 164 *						by the application
 165 *						0: The amount of samples to be
 166 *						transferred is equal to the
 167 *						count field of mode word
 168 */
 169#define SDMA_WATERMARK_LEVEL_LWML	0xFF
 170#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
 171#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
 172#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
 173#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
 174#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
 175#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
 176#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
 177#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
 178#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
 179
 180/*
 181 * Mode/Count of data node descriptors - IPCv2
 182 */
 183struct sdma_mode_count {
 184	u32 count   : 16; /* size of the buffer pointed by this BD */
 185	u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
 186	u32 command :  8; /* command mostlky used for channel 0 */
 187};
 188
 189/*
 190 * Buffer descriptor
 191 */
 192struct sdma_buffer_descriptor {
 193	struct sdma_mode_count  mode;
 194	u32 buffer_addr;	/* address of the buffer described */
 195	u32 ext_buffer_addr;	/* extended buffer address */
 196} __attribute__ ((packed));
 197
 198/**
 199 * struct sdma_channel_control - Channel control Block
 200 *
 201 * @current_bd_ptr	current buffer descriptor processed
 202 * @base_bd_ptr		first element of buffer descriptor array
 203 * @unused		padding. The SDMA engine expects an array of 128 byte
 204 *			control blocks
 205 */
 206struct sdma_channel_control {
 207	u32 current_bd_ptr;
 208	u32 base_bd_ptr;
 209	u32 unused[2];
 210} __attribute__ ((packed));
 211
 212/**
 213 * struct sdma_state_registers - SDMA context for a channel
 214 *
 215 * @pc:		program counter
 216 * @t:		test bit: status of arithmetic & test instruction
 217 * @rpc:	return program counter
 218 * @sf:		source fault while loading data
 219 * @spc:	loop start program counter
 220 * @df:		destination fault while storing data
 221 * @epc:	loop end program counter
 222 * @lm:		loop mode
 223 */
 224struct sdma_state_registers {
 225	u32 pc     :14;
 226	u32 unused1: 1;
 227	u32 t      : 1;
 228	u32 rpc    :14;
 229	u32 unused0: 1;
 230	u32 sf     : 1;
 231	u32 spc    :14;
 232	u32 unused2: 1;
 233	u32 df     : 1;
 234	u32 epc    :14;
 235	u32 lm     : 2;
 236} __attribute__ ((packed));
 237
 238/**
 239 * struct sdma_context_data - sdma context specific to a channel
 240 *
 241 * @channel_state:	channel state bits
 242 * @gReg:		general registers
 243 * @mda:		burst dma destination address register
 244 * @msa:		burst dma source address register
 245 * @ms:			burst dma status register
 246 * @md:			burst dma data register
 247 * @pda:		peripheral dma destination address register
 248 * @psa:		peripheral dma source address register
 249 * @ps:			peripheral dma status register
 250 * @pd:			peripheral dma data register
 251 * @ca:			CRC polynomial register
 252 * @cs:			CRC accumulator register
 253 * @dda:		dedicated core destination address register
 254 * @dsa:		dedicated core source address register
 255 * @ds:			dedicated core status register
 256 * @dd:			dedicated core data register
 257 */
 258struct sdma_context_data {
 259	struct sdma_state_registers  channel_state;
 260	u32  gReg[8];
 261	u32  mda;
 262	u32  msa;
 263	u32  ms;
 264	u32  md;
 265	u32  pda;
 266	u32  psa;
 267	u32  ps;
 268	u32  pd;
 269	u32  ca;
 270	u32  cs;
 271	u32  dda;
 272	u32  dsa;
 273	u32  ds;
 274	u32  dd;
 275	u32  scratch0;
 276	u32  scratch1;
 277	u32  scratch2;
 278	u32  scratch3;
 279	u32  scratch4;
 280	u32  scratch5;
 281	u32  scratch6;
 282	u32  scratch7;
 283} __attribute__ ((packed));
 284
 285#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
 286
 287struct sdma_engine;
 288
 289/**
 290 * struct sdma_channel - housekeeping for a SDMA channel
 291 *
 292 * @sdma		pointer to the SDMA engine for this channel
 293 * @channel		the channel number, matches dmaengine chan_id + 1
 294 * @direction		transfer type. Needed for setting SDMA script
 295 * @peripheral_type	Peripheral type. Needed for setting SDMA script
 296 * @event_id0		aka dma request line
 297 * @event_id1		for channels that use 2 events
 298 * @word_size		peripheral access size
 299 * @buf_tail		ID of the buffer that was processed
 
 300 * @num_bd		max NUM_BD. number of descriptors currently handling
 301 */
 302struct sdma_channel {
 303	struct sdma_engine		*sdma;
 304	unsigned int			channel;
 305	enum dma_transfer_direction		direction;
 306	enum sdma_peripheral_type	peripheral_type;
 307	unsigned int			event_id0;
 308	unsigned int			event_id1;
 309	enum dma_slave_buswidth		word_size;
 310	unsigned int			buf_tail;
 
 311	unsigned int			num_bd;
 312	unsigned int			period_len;
 313	struct sdma_buffer_descriptor	*bd;
 314	dma_addr_t			bd_phys;
 315	unsigned int			pc_from_device, pc_to_device;
 316	unsigned int			device_to_device;
 317	unsigned long			flags;
 318	dma_addr_t			per_address, per_address2;
 319	unsigned long			event_mask[2];
 320	unsigned long			watermark_level;
 321	u32				shp_addr, per_addr;
 322	struct dma_chan			chan;
 323	spinlock_t			lock;
 324	struct dma_async_tx_descriptor	desc;
 
 325	enum dma_status			status;
 326	unsigned int			chn_count;
 327	unsigned int			chn_real_count;
 328	struct tasklet_struct		tasklet;
 329	struct imx_dma_data		data;
 330};
 331
 332#define IMX_DMA_SG_LOOP		BIT(0)
 333
 334#define MAX_DMA_CHANNELS 32
 335#define MXC_SDMA_DEFAULT_PRIORITY 1
 336#define MXC_SDMA_MIN_PRIORITY 1
 337#define MXC_SDMA_MAX_PRIORITY 7
 338
 339#define SDMA_FIRMWARE_MAGIC 0x414d4453
 340
 341/**
 342 * struct sdma_firmware_header - Layout of the firmware image
 343 *
 344 * @magic		"SDMA"
 345 * @version_major	increased whenever layout of struct sdma_script_start_addrs
 346 *			changes.
 347 * @version_minor	firmware minor version (for binary compatible changes)
 348 * @script_addrs_start	offset of struct sdma_script_start_addrs in this image
 349 * @num_script_addrs	Number of script addresses in this image
 350 * @ram_code_start	offset of SDMA ram image in this firmware image
 351 * @ram_code_size	size of SDMA ram image
 352 * @script_addrs	Stores the start address of the SDMA scripts
 353 *			(in SDMA memory space)
 354 */
 355struct sdma_firmware_header {
 356	u32	magic;
 357	u32	version_major;
 358	u32	version_minor;
 359	u32	script_addrs_start;
 360	u32	num_script_addrs;
 361	u32	ram_code_start;
 362	u32	ram_code_size;
 363};
 364
 365struct sdma_driver_data {
 366	int chnenbl0;
 367	int num_events;
 368	struct sdma_script_start_addrs	*script_addrs;
 369};
 370
 371struct sdma_engine {
 372	struct device			*dev;
 373	struct device_dma_parameters	dma_parms;
 374	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 375	struct sdma_channel_control	*channel_control;
 376	void __iomem			*regs;
 
 
 377	struct sdma_context_data	*context;
 378	dma_addr_t			context_phys;
 379	struct dma_device		dma_device;
 380	struct clk			*clk_ipg;
 381	struct clk			*clk_ahb;
 382	spinlock_t			channel_0_lock;
 383	u32				script_number;
 384	struct sdma_script_start_addrs	*script_addrs;
 385	const struct sdma_driver_data	*drvdata;
 386	u32				spba_start_addr;
 387	u32				spba_end_addr;
 388};
 389
 390static struct sdma_driver_data sdma_imx31 = {
 391	.chnenbl0 = SDMA_CHNENBL0_IMX31,
 392	.num_events = 32,
 393};
 394
 395static struct sdma_script_start_addrs sdma_script_imx25 = {
 396	.ap_2_ap_addr = 729,
 397	.uart_2_mcu_addr = 904,
 398	.per_2_app_addr = 1255,
 399	.mcu_2_app_addr = 834,
 400	.uartsh_2_mcu_addr = 1120,
 401	.per_2_shp_addr = 1329,
 402	.mcu_2_shp_addr = 1048,
 403	.ata_2_mcu_addr = 1560,
 404	.mcu_2_ata_addr = 1479,
 405	.app_2_per_addr = 1189,
 406	.app_2_mcu_addr = 770,
 407	.shp_2_per_addr = 1407,
 408	.shp_2_mcu_addr = 979,
 409};
 410
 411static struct sdma_driver_data sdma_imx25 = {
 412	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 413	.num_events = 48,
 414	.script_addrs = &sdma_script_imx25,
 415};
 416
 417static struct sdma_driver_data sdma_imx35 = {
 418	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 419	.num_events = 48,
 420};
 421
 422static struct sdma_script_start_addrs sdma_script_imx51 = {
 423	.ap_2_ap_addr = 642,
 424	.uart_2_mcu_addr = 817,
 425	.mcu_2_app_addr = 747,
 426	.mcu_2_shp_addr = 961,
 427	.ata_2_mcu_addr = 1473,
 428	.mcu_2_ata_addr = 1392,
 429	.app_2_per_addr = 1033,
 430	.app_2_mcu_addr = 683,
 431	.shp_2_per_addr = 1251,
 432	.shp_2_mcu_addr = 892,
 433};
 434
 435static struct sdma_driver_data sdma_imx51 = {
 436	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 437	.num_events = 48,
 438	.script_addrs = &sdma_script_imx51,
 439};
 440
 441static struct sdma_script_start_addrs sdma_script_imx53 = {
 442	.ap_2_ap_addr = 642,
 443	.app_2_mcu_addr = 683,
 444	.mcu_2_app_addr = 747,
 445	.uart_2_mcu_addr = 817,
 446	.shp_2_mcu_addr = 891,
 447	.mcu_2_shp_addr = 960,
 448	.uartsh_2_mcu_addr = 1032,
 449	.spdif_2_mcu_addr = 1100,
 450	.mcu_2_spdif_addr = 1134,
 451	.firi_2_mcu_addr = 1193,
 452	.mcu_2_firi_addr = 1290,
 453};
 454
 455static struct sdma_driver_data sdma_imx53 = {
 456	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 457	.num_events = 48,
 458	.script_addrs = &sdma_script_imx53,
 459};
 460
 461static struct sdma_script_start_addrs sdma_script_imx6q = {
 462	.ap_2_ap_addr = 642,
 463	.uart_2_mcu_addr = 817,
 464	.mcu_2_app_addr = 747,
 465	.per_2_per_addr = 6331,
 466	.uartsh_2_mcu_addr = 1032,
 467	.mcu_2_shp_addr = 960,
 468	.app_2_mcu_addr = 683,
 469	.shp_2_mcu_addr = 891,
 470	.spdif_2_mcu_addr = 1100,
 471	.mcu_2_spdif_addr = 1134,
 472};
 473
 474static struct sdma_driver_data sdma_imx6q = {
 475	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 476	.num_events = 48,
 477	.script_addrs = &sdma_script_imx6q,
 478};
 479
 480static const struct platform_device_id sdma_devtypes[] = {
 481	{
 482		.name = "imx25-sdma",
 483		.driver_data = (unsigned long)&sdma_imx25,
 484	}, {
 485		.name = "imx31-sdma",
 486		.driver_data = (unsigned long)&sdma_imx31,
 487	}, {
 488		.name = "imx35-sdma",
 489		.driver_data = (unsigned long)&sdma_imx35,
 490	}, {
 491		.name = "imx51-sdma",
 492		.driver_data = (unsigned long)&sdma_imx51,
 493	}, {
 494		.name = "imx53-sdma",
 495		.driver_data = (unsigned long)&sdma_imx53,
 496	}, {
 497		.name = "imx6q-sdma",
 498		.driver_data = (unsigned long)&sdma_imx6q,
 499	}, {
 500		/* sentinel */
 501	}
 502};
 503MODULE_DEVICE_TABLE(platform, sdma_devtypes);
 504
 505static const struct of_device_id sdma_dt_ids[] = {
 506	{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
 507	{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
 508	{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
 509	{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
 510	{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
 511	{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
 512	{ /* sentinel */ }
 513};
 514MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 515
 516#define SDMA_H_CONFIG_DSPDMA	BIT(12) /* indicates if the DSPDMA is used */
 517#define SDMA_H_CONFIG_RTD_PINS	BIT(11) /* indicates if Real-Time Debug pins are enabled */
 518#define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
 519#define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
 520
 521static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 522{
 523	u32 chnenbl0 = sdma->drvdata->chnenbl0;
 
 524	return chnenbl0 + event * 4;
 525}
 526
 527static int sdma_config_ownership(struct sdma_channel *sdmac,
 528		bool event_override, bool mcu_override, bool dsp_override)
 529{
 530	struct sdma_engine *sdma = sdmac->sdma;
 531	int channel = sdmac->channel;
 532	unsigned long evt, mcu, dsp;
 533
 534	if (event_override && mcu_override && dsp_override)
 535		return -EINVAL;
 536
 537	evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
 538	mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
 539	dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
 540
 541	if (dsp_override)
 542		__clear_bit(channel, &dsp);
 543	else
 544		__set_bit(channel, &dsp);
 545
 546	if (event_override)
 547		__clear_bit(channel, &evt);
 548	else
 549		__set_bit(channel, &evt);
 550
 551	if (mcu_override)
 552		__clear_bit(channel, &mcu);
 553	else
 554		__set_bit(channel, &mcu);
 555
 556	writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
 557	writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
 558	writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
 559
 560	return 0;
 561}
 562
 563static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 564{
 565	writel(BIT(channel), sdma->regs + SDMA_H_START);
 566}
 567
 568/*
 569 * sdma_run_channel0 - run a channel and wait till it's done
 570 */
 571static int sdma_run_channel0(struct sdma_engine *sdma)
 572{
 
 
 573	int ret;
 574	unsigned long timeout = 500;
 575
 576	sdma_enable_channel(sdma, 0);
 577
 578	while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
 579		if (timeout-- <= 0)
 580			break;
 581		udelay(1);
 582	}
 583
 584	if (ret) {
 585		/* Clear the interrupt status */
 586		writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
 587	} else {
 588		dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
 589	}
 590
 591	/* Set bits of CONFIG register with dynamic context switching */
 592	if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
 593		writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
 594
 595	return ret ? 0 : -ETIMEDOUT;
 596}
 597
 598static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 599		u32 address)
 600{
 601	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
 602	void *buf_virt;
 603	dma_addr_t buf_phys;
 604	int ret;
 605	unsigned long flags;
 606
 607	buf_virt = dma_alloc_coherent(NULL,
 608			size,
 609			&buf_phys, GFP_KERNEL);
 610	if (!buf_virt) {
 611		return -ENOMEM;
 612	}
 613
 614	spin_lock_irqsave(&sdma->channel_0_lock, flags);
 615
 616	bd0->mode.command = C0_SETPM;
 617	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
 618	bd0->mode.count = size / 2;
 619	bd0->buffer_addr = buf_phys;
 620	bd0->ext_buffer_addr = address;
 621
 622	memcpy(buf_virt, buf, size);
 623
 624	ret = sdma_run_channel0(sdma);
 625
 626	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 627
 628	dma_free_coherent(NULL, size, buf_virt, buf_phys);
 629
 630	return ret;
 631}
 632
 633static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 634{
 635	struct sdma_engine *sdma = sdmac->sdma;
 636	int channel = sdmac->channel;
 637	unsigned long val;
 638	u32 chnenbl = chnenbl_ofs(sdma, event);
 639
 640	val = readl_relaxed(sdma->regs + chnenbl);
 641	__set_bit(channel, &val);
 642	writel_relaxed(val, sdma->regs + chnenbl);
 643}
 644
 645static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 646{
 647	struct sdma_engine *sdma = sdmac->sdma;
 648	int channel = sdmac->channel;
 649	u32 chnenbl = chnenbl_ofs(sdma, event);
 650	unsigned long val;
 651
 652	val = readl_relaxed(sdma->regs + chnenbl);
 653	__clear_bit(channel, &val);
 654	writel_relaxed(val, sdma->regs + chnenbl);
 655}
 656
 657static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
 658{
 659	if (sdmac->desc.callback)
 660		sdmac->desc.callback(sdmac->desc.callback_param);
 661}
 662
 663static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 664{
 665	struct sdma_buffer_descriptor *bd;
 666
 667	/*
 668	 * loop mode. Iterate over descriptors, re-setup them and
 669	 * call callback function.
 670	 */
 671	while (1) {
 672		bd = &sdmac->bd[sdmac->buf_tail];
 673
 674		if (bd->mode.status & BD_DONE)
 675			break;
 676
 677		if (bd->mode.status & BD_RROR)
 678			sdmac->status = DMA_ERROR;
 
 
 679
 680		bd->mode.status |= BD_DONE;
 681		sdmac->buf_tail++;
 682		sdmac->buf_tail %= sdmac->num_bd;
 
 
 
 683	}
 684}
 685
 686static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
 687{
 688	struct sdma_buffer_descriptor *bd;
 689	int i, error = 0;
 690
 691	sdmac->chn_real_count = 0;
 692	/*
 693	 * non loop mode. Iterate over all descriptors, collect
 694	 * errors and call callback function
 695	 */
 696	for (i = 0; i < sdmac->num_bd; i++) {
 697		bd = &sdmac->bd[i];
 698
 699		 if (bd->mode.status & (BD_DONE | BD_RROR))
 700			error = -EIO;
 701		 sdmac->chn_real_count += bd->mode.count;
 702	}
 703
 704	if (error)
 705		sdmac->status = DMA_ERROR;
 706	else
 707		sdmac->status = DMA_COMPLETE;
 708
 709	dma_cookie_complete(&sdmac->desc);
 710	if (sdmac->desc.callback)
 711		sdmac->desc.callback(sdmac->desc.callback_param);
 
 712}
 713
 714static void sdma_tasklet(unsigned long data)
 715{
 716	struct sdma_channel *sdmac = (struct sdma_channel *) data;
 
 
 
 
 717
 718	if (sdmac->flags & IMX_DMA_SG_LOOP)
 719		sdma_handle_channel_loop(sdmac);
 720	else
 721		mxc_sdma_handle_channel_normal(sdmac);
 722}
 723
 724static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 725{
 726	struct sdma_engine *sdma = dev_id;
 727	unsigned long stat;
 728
 729	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
 730	/* not interested in channel 0 interrupts */
 731	stat &= ~1;
 732	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 733
 734	while (stat) {
 735		int channel = fls(stat) - 1;
 736		struct sdma_channel *sdmac = &sdma->channel[channel];
 737
 738		if (sdmac->flags & IMX_DMA_SG_LOOP)
 739			sdma_update_channel_loop(sdmac);
 740
 741		tasklet_schedule(&sdmac->tasklet);
 742
 743		__clear_bit(channel, &stat);
 744	}
 745
 746	return IRQ_HANDLED;
 747}
 748
 749/*
 750 * sets the pc of SDMA script according to the peripheral type
 751 */
 752static void sdma_get_pc(struct sdma_channel *sdmac,
 753		enum sdma_peripheral_type peripheral_type)
 754{
 755	struct sdma_engine *sdma = sdmac->sdma;
 756	int per_2_emi = 0, emi_2_per = 0;
 757	/*
 758	 * These are needed once we start to support transfers between
 759	 * two peripherals or memory-to-memory transfers
 760	 */
 761	int per_2_per = 0, emi_2_emi = 0;
 762
 763	sdmac->pc_from_device = 0;
 764	sdmac->pc_to_device = 0;
 765	sdmac->device_to_device = 0;
 766
 767	switch (peripheral_type) {
 768	case IMX_DMATYPE_MEMORY:
 769		emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
 770		break;
 771	case IMX_DMATYPE_DSP:
 772		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
 773		per_2_emi = sdma->script_addrs->ap_2_bp_addr;
 774		break;
 775	case IMX_DMATYPE_FIRI:
 776		per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
 777		emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
 778		break;
 779	case IMX_DMATYPE_UART:
 780		per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
 781		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 782		break;
 783	case IMX_DMATYPE_UART_SP:
 784		per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
 785		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 786		break;
 787	case IMX_DMATYPE_ATA:
 788		per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
 789		emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
 790		break;
 791	case IMX_DMATYPE_CSPI:
 792	case IMX_DMATYPE_EXT:
 793	case IMX_DMATYPE_SSI:
 794	case IMX_DMATYPE_SAI:
 795		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
 796		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 797		break;
 798	case IMX_DMATYPE_SSI_DUAL:
 799		per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
 800		emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
 801		break;
 802	case IMX_DMATYPE_SSI_SP:
 803	case IMX_DMATYPE_MMC:
 804	case IMX_DMATYPE_SDHC:
 805	case IMX_DMATYPE_CSPI_SP:
 806	case IMX_DMATYPE_ESAI:
 807	case IMX_DMATYPE_MSHC_SP:
 808		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
 809		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 810		break;
 811	case IMX_DMATYPE_ASRC:
 812		per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
 813		emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
 814		per_2_per = sdma->script_addrs->per_2_per_addr;
 815		break;
 816	case IMX_DMATYPE_ASRC_SP:
 817		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
 818		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 819		per_2_per = sdma->script_addrs->per_2_per_addr;
 820		break;
 821	case IMX_DMATYPE_MSHC:
 822		per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
 823		emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
 824		break;
 825	case IMX_DMATYPE_CCM:
 826		per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
 827		break;
 828	case IMX_DMATYPE_SPDIF:
 829		per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
 830		emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
 831		break;
 832	case IMX_DMATYPE_IPU_MEMORY:
 833		emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
 834		break;
 835	default:
 836		break;
 837	}
 838
 839	sdmac->pc_from_device = per_2_emi;
 840	sdmac->pc_to_device = emi_2_per;
 841	sdmac->device_to_device = per_2_per;
 842}
 843
 844static int sdma_load_context(struct sdma_channel *sdmac)
 845{
 846	struct sdma_engine *sdma = sdmac->sdma;
 847	int channel = sdmac->channel;
 848	int load_address;
 849	struct sdma_context_data *context = sdma->context;
 850	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
 851	int ret;
 852	unsigned long flags;
 853
 854	if (sdmac->direction == DMA_DEV_TO_MEM)
 855		load_address = sdmac->pc_from_device;
 856	else if (sdmac->direction == DMA_DEV_TO_DEV)
 857		load_address = sdmac->device_to_device;
 858	else
 859		load_address = sdmac->pc_to_device;
 
 860
 861	if (load_address < 0)
 862		return load_address;
 863
 864	dev_dbg(sdma->dev, "load_address = %d\n", load_address);
 865	dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
 866	dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
 867	dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
 868	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
 869	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
 870
 871	spin_lock_irqsave(&sdma->channel_0_lock, flags);
 872
 873	memset(context, 0, sizeof(*context));
 874	context->channel_state.pc = load_address;
 875
 876	/* Send by context the event mask,base address for peripheral
 877	 * and watermark level
 878	 */
 879	context->gReg[0] = sdmac->event_mask[1];
 880	context->gReg[1] = sdmac->event_mask[0];
 881	context->gReg[2] = sdmac->per_addr;
 882	context->gReg[6] = sdmac->shp_addr;
 883	context->gReg[7] = sdmac->watermark_level;
 884
 885	bd0->mode.command = C0_SETDM;
 886	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
 887	bd0->mode.count = sizeof(*context) / 4;
 888	bd0->buffer_addr = sdma->context_phys;
 889	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
 890	ret = sdma_run_channel0(sdma);
 891
 892	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 893
 894	return ret;
 895}
 896
 897static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 898{
 899	return container_of(chan, struct sdma_channel, chan);
 900}
 901
 902static int sdma_disable_channel(struct dma_chan *chan)
 903{
 904	struct sdma_channel *sdmac = to_sdma_chan(chan);
 905	struct sdma_engine *sdma = sdmac->sdma;
 906	int channel = sdmac->channel;
 907
 908	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
 909	sdmac->status = DMA_ERROR;
 910
 911	return 0;
 912}
 913
 914static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
 915{
 916	struct sdma_engine *sdma = sdmac->sdma;
 917
 918	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
 919	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
 920
 921	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
 922	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
 923
 924	if (sdmac->event_id0 > 31)
 925		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
 926
 927	if (sdmac->event_id1 > 31)
 928		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
 929
 930	/*
 931	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
 932	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
 933	 * r0(event_mask[1]) and r1(event_mask[0]).
 934	 */
 935	if (lwml > hwml) {
 936		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
 937						SDMA_WATERMARK_LEVEL_HWML);
 938		sdmac->watermark_level |= hwml;
 939		sdmac->watermark_level |= lwml << 16;
 940		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
 941	}
 942
 943	if (sdmac->per_address2 >= sdma->spba_start_addr &&
 944			sdmac->per_address2 <= sdma->spba_end_addr)
 945		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
 946
 947	if (sdmac->per_address >= sdma->spba_start_addr &&
 948			sdmac->per_address <= sdma->spba_end_addr)
 949		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
 950
 951	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
 952}
 953
 954static int sdma_config_channel(struct dma_chan *chan)
 955{
 956	struct sdma_channel *sdmac = to_sdma_chan(chan);
 957	int ret;
 958
 959	sdma_disable_channel(chan);
 960
 961	sdmac->event_mask[0] = 0;
 962	sdmac->event_mask[1] = 0;
 963	sdmac->shp_addr = 0;
 964	sdmac->per_addr = 0;
 965
 966	if (sdmac->event_id0) {
 967		if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
 968			return -EINVAL;
 969		sdma_event_enable(sdmac, sdmac->event_id0);
 970	}
 971
 972	if (sdmac->event_id1) {
 973		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
 974			return -EINVAL;
 975		sdma_event_enable(sdmac, sdmac->event_id1);
 976	}
 977
 978	switch (sdmac->peripheral_type) {
 979	case IMX_DMATYPE_DSP:
 980		sdma_config_ownership(sdmac, false, true, true);
 981		break;
 982	case IMX_DMATYPE_MEMORY:
 983		sdma_config_ownership(sdmac, false, true, false);
 984		break;
 985	default:
 986		sdma_config_ownership(sdmac, true, true, false);
 987		break;
 988	}
 989
 990	sdma_get_pc(sdmac, sdmac->peripheral_type);
 991
 992	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
 993			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
 994		/* Handle multiple event channels differently */
 995		if (sdmac->event_id1) {
 996			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
 997			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
 998				sdma_set_watermarklevel_for_p2p(sdmac);
 999		} else
1000			__set_bit(sdmac->event_id0, sdmac->event_mask);
1001
 
 
 
 
1002		/* Watermark Level */
1003		sdmac->watermark_level |= sdmac->watermark_level;
1004		/* Address */
1005		sdmac->shp_addr = sdmac->per_address;
1006		sdmac->per_addr = sdmac->per_address2;
1007	} else {
1008		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1009	}
1010
1011	ret = sdma_load_context(sdmac);
1012
1013	return ret;
1014}
1015
1016static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1017		unsigned int priority)
1018{
1019	struct sdma_engine *sdma = sdmac->sdma;
1020	int channel = sdmac->channel;
1021
1022	if (priority < MXC_SDMA_MIN_PRIORITY
1023	    || priority > MXC_SDMA_MAX_PRIORITY) {
1024		return -EINVAL;
1025	}
1026
1027	writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1028
1029	return 0;
1030}
1031
1032static int sdma_request_channel(struct sdma_channel *sdmac)
1033{
1034	struct sdma_engine *sdma = sdmac->sdma;
1035	int channel = sdmac->channel;
1036	int ret = -EBUSY;
1037
1038	sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
1039					GFP_KERNEL);
1040	if (!sdmac->bd) {
1041		ret = -ENOMEM;
1042		goto out;
1043	}
1044
 
 
1045	sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
1046	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1047
 
 
1048	sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
 
 
 
 
 
1049	return 0;
1050out:
1051
1052	return ret;
1053}
1054
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1055static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
1056{
1057	unsigned long flags;
1058	struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
 
1059	dma_cookie_t cookie;
1060
1061	spin_lock_irqsave(&sdmac->lock, flags);
 
 
1062
1063	cookie = dma_cookie_assign(tx);
1064
1065	spin_unlock_irqrestore(&sdmac->lock, flags);
1066
1067	return cookie;
1068}
1069
1070static int sdma_alloc_chan_resources(struct dma_chan *chan)
1071{
1072	struct sdma_channel *sdmac = to_sdma_chan(chan);
1073	struct imx_dma_data *data = chan->private;
1074	int prio, ret;
1075
1076	if (!data)
1077		return -EINVAL;
1078
1079	switch (data->priority) {
1080	case DMA_PRIO_HIGH:
1081		prio = 3;
1082		break;
1083	case DMA_PRIO_MEDIUM:
1084		prio = 2;
1085		break;
1086	case DMA_PRIO_LOW:
1087	default:
1088		prio = 1;
1089		break;
1090	}
1091
1092	sdmac->peripheral_type = data->peripheral_type;
1093	sdmac->event_id0 = data->dma_request;
1094	sdmac->event_id1 = data->dma_request2;
1095
1096	ret = clk_enable(sdmac->sdma->clk_ipg);
1097	if (ret)
1098		return ret;
1099	ret = clk_enable(sdmac->sdma->clk_ahb);
1100	if (ret)
1101		goto disable_clk_ipg;
1102
1103	ret = sdma_request_channel(sdmac);
1104	if (ret)
1105		goto disable_clk_ahb;
1106
1107	ret = sdma_set_channel_priority(sdmac, prio);
1108	if (ret)
1109		goto disable_clk_ahb;
1110
1111	dma_async_tx_descriptor_init(&sdmac->desc, chan);
1112	sdmac->desc.tx_submit = sdma_tx_submit;
1113	/* txd.flags will be overwritten in prep funcs */
1114	sdmac->desc.flags = DMA_CTRL_ACK;
1115
1116	return 0;
1117
1118disable_clk_ahb:
1119	clk_disable(sdmac->sdma->clk_ahb);
1120disable_clk_ipg:
1121	clk_disable(sdmac->sdma->clk_ipg);
1122	return ret;
1123}
1124
1125static void sdma_free_chan_resources(struct dma_chan *chan)
1126{
1127	struct sdma_channel *sdmac = to_sdma_chan(chan);
1128	struct sdma_engine *sdma = sdmac->sdma;
1129
1130	sdma_disable_channel(chan);
1131
1132	if (sdmac->event_id0)
1133		sdma_event_disable(sdmac, sdmac->event_id0);
1134	if (sdmac->event_id1)
1135		sdma_event_disable(sdmac, sdmac->event_id1);
1136
1137	sdmac->event_id0 = 0;
1138	sdmac->event_id1 = 0;
1139
1140	sdma_set_channel_priority(sdmac, 0);
1141
1142	dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
1143
1144	clk_disable(sdma->clk_ipg);
1145	clk_disable(sdma->clk_ahb);
1146}
1147
1148static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1149		struct dma_chan *chan, struct scatterlist *sgl,
1150		unsigned int sg_len, enum dma_transfer_direction direction,
1151		unsigned long flags, void *context)
1152{
1153	struct sdma_channel *sdmac = to_sdma_chan(chan);
1154	struct sdma_engine *sdma = sdmac->sdma;
1155	int ret, i, count;
1156	int channel = sdmac->channel;
1157	struct scatterlist *sg;
1158
1159	if (sdmac->status == DMA_IN_PROGRESS)
1160		return NULL;
1161	sdmac->status = DMA_IN_PROGRESS;
1162
1163	sdmac->flags = 0;
1164
1165	sdmac->buf_tail = 0;
1166
1167	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1168			sg_len, channel);
1169
1170	sdmac->direction = direction;
1171	ret = sdma_load_context(sdmac);
1172	if (ret)
1173		goto err_out;
1174
1175	if (sg_len > NUM_BD) {
1176		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1177				channel, sg_len, NUM_BD);
1178		ret = -EINVAL;
1179		goto err_out;
1180	}
1181
1182	sdmac->chn_count = 0;
1183	for_each_sg(sgl, sg, sg_len, i) {
1184		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1185		int param;
1186
1187		bd->buffer_addr = sg->dma_address;
1188
1189		count = sg_dma_len(sg);
1190
1191		if (count > 0xffff) {
1192			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1193					channel, count, 0xffff);
1194			ret = -EINVAL;
1195			goto err_out;
1196		}
1197
1198		bd->mode.count = count;
1199		sdmac->chn_count += count;
1200
1201		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1202			ret =  -EINVAL;
1203			goto err_out;
1204		}
1205
1206		switch (sdmac->word_size) {
1207		case DMA_SLAVE_BUSWIDTH_4_BYTES:
1208			bd->mode.command = 0;
1209			if (count & 3 || sg->dma_address & 3)
1210				return NULL;
1211			break;
1212		case DMA_SLAVE_BUSWIDTH_2_BYTES:
1213			bd->mode.command = 2;
1214			if (count & 1 || sg->dma_address & 1)
1215				return NULL;
1216			break;
1217		case DMA_SLAVE_BUSWIDTH_1_BYTE:
1218			bd->mode.command = 1;
1219			break;
1220		default:
1221			return NULL;
1222		}
1223
1224		param = BD_DONE | BD_EXTD | BD_CONT;
1225
1226		if (i + 1 == sg_len) {
1227			param |= BD_INTR;
1228			param |= BD_LAST;
1229			param &= ~BD_CONT;
1230		}
1231
1232		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1233				i, count, (u64)sg->dma_address,
1234				param & BD_WRAP ? "wrap" : "",
1235				param & BD_INTR ? " intr" : "");
1236
1237		bd->mode.status = param;
1238	}
1239
1240	sdmac->num_bd = sg_len;
1241	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1242
1243	return &sdmac->desc;
1244err_out:
1245	sdmac->status = DMA_ERROR;
1246	return NULL;
1247}
1248
1249static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1250		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1251		size_t period_len, enum dma_transfer_direction direction,
1252		unsigned long flags)
1253{
1254	struct sdma_channel *sdmac = to_sdma_chan(chan);
1255	struct sdma_engine *sdma = sdmac->sdma;
1256	int num_periods = buf_len / period_len;
1257	int channel = sdmac->channel;
1258	int ret, i = 0, buf = 0;
1259
1260	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1261
1262	if (sdmac->status == DMA_IN_PROGRESS)
1263		return NULL;
1264
1265	sdmac->status = DMA_IN_PROGRESS;
1266
1267	sdmac->buf_tail = 0;
1268	sdmac->period_len = period_len;
1269
1270	sdmac->flags |= IMX_DMA_SG_LOOP;
1271	sdmac->direction = direction;
1272	ret = sdma_load_context(sdmac);
1273	if (ret)
1274		goto err_out;
1275
1276	if (num_periods > NUM_BD) {
1277		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1278				channel, num_periods, NUM_BD);
1279		goto err_out;
1280	}
1281
1282	if (period_len > 0xffff) {
1283		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1284				channel, period_len, 0xffff);
1285		goto err_out;
1286	}
1287
1288	while (buf < buf_len) {
1289		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1290		int param;
1291
1292		bd->buffer_addr = dma_addr;
1293
1294		bd->mode.count = period_len;
1295
1296		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1297			goto err_out;
1298		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1299			bd->mode.command = 0;
1300		else
1301			bd->mode.command = sdmac->word_size;
1302
1303		param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1304		if (i + 1 == num_periods)
1305			param |= BD_WRAP;
1306
1307		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1308				i, period_len, (u64)dma_addr,
1309				param & BD_WRAP ? "wrap" : "",
1310				param & BD_INTR ? " intr" : "");
1311
1312		bd->mode.status = param;
1313
1314		dma_addr += period_len;
1315		buf += period_len;
1316
1317		i++;
1318	}
1319
1320	sdmac->num_bd = num_periods;
1321	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1322
1323	return &sdmac->desc;
1324err_out:
1325	sdmac->status = DMA_ERROR;
1326	return NULL;
1327}
1328
1329static int sdma_config(struct dma_chan *chan,
1330		       struct dma_slave_config *dmaengine_cfg)
1331{
1332	struct sdma_channel *sdmac = to_sdma_chan(chan);
 
1333
1334	if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1335		sdmac->per_address = dmaengine_cfg->src_addr;
1336		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1337			dmaengine_cfg->src_addr_width;
1338		sdmac->word_size = dmaengine_cfg->src_addr_width;
1339	} else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
1340		sdmac->per_address2 = dmaengine_cfg->src_addr;
1341		sdmac->per_address = dmaengine_cfg->dst_addr;
1342		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1343			SDMA_WATERMARK_LEVEL_LWML;
1344		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1345			SDMA_WATERMARK_LEVEL_HWML;
1346		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1347	} else {
1348		sdmac->per_address = dmaengine_cfg->dst_addr;
1349		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1350			dmaengine_cfg->dst_addr_width;
1351		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1352	}
1353	sdmac->direction = dmaengine_cfg->direction;
1354	return sdma_config_channel(chan);
1355}
1356
1357static enum dma_status sdma_tx_status(struct dma_chan *chan,
1358				      dma_cookie_t cookie,
1359				      struct dma_tx_state *txstate)
1360{
1361	struct sdma_channel *sdmac = to_sdma_chan(chan);
1362	u32 residue;
1363
1364	if (sdmac->flags & IMX_DMA_SG_LOOP)
1365		residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
1366	else
1367		residue = sdmac->chn_count - sdmac->chn_real_count;
1368
1369	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1370			 residue);
1371
1372	return sdmac->status;
1373}
1374
1375static void sdma_issue_pending(struct dma_chan *chan)
1376{
1377	struct sdma_channel *sdmac = to_sdma_chan(chan);
1378	struct sdma_engine *sdma = sdmac->sdma;
1379
1380	if (sdmac->status == DMA_IN_PROGRESS)
1381		sdma_enable_channel(sdma, sdmac->channel);
1382}
1383
1384#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
1385#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2	38
1386#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3	41
1387
1388static void sdma_add_scripts(struct sdma_engine *sdma,
1389		const struct sdma_script_start_addrs *addr)
1390{
1391	s32 *addr_arr = (u32 *)addr;
1392	s32 *saddr_arr = (u32 *)sdma->script_addrs;
1393	int i;
1394
1395	/* use the default firmware in ROM if missing external firmware */
1396	if (!sdma->script_number)
1397		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1398
1399	for (i = 0; i < sdma->script_number; i++)
1400		if (addr_arr[i] > 0)
1401			saddr_arr[i] = addr_arr[i];
1402}
1403
1404static void sdma_load_firmware(const struct firmware *fw, void *context)
 
1405{
1406	struct sdma_engine *sdma = context;
1407	const struct sdma_firmware_header *header;
 
1408	const struct sdma_script_start_addrs *addr;
1409	unsigned short *ram_code;
1410
1411	if (!fw) {
1412		dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1413		/* In this case we just use the ROM firmware. */
1414		return;
1415	}
1416
1417	if (fw->size < sizeof(*header))
1418		goto err_firmware;
1419
1420	header = (struct sdma_firmware_header *)fw->data;
1421
1422	if (header->magic != SDMA_FIRMWARE_MAGIC)
1423		goto err_firmware;
1424	if (header->ram_code_start + header->ram_code_size > fw->size)
1425		goto err_firmware;
1426	switch (header->version_major) {
1427	case 1:
1428		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1429		break;
1430	case 2:
1431		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1432		break;
1433	case 3:
1434		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1435		break;
1436	default:
1437		dev_err(sdma->dev, "unknown firmware version\n");
1438		goto err_firmware;
1439	}
1440
1441	addr = (void *)header + header->script_addrs_start;
1442	ram_code = (void *)header + header->ram_code_start;
1443
1444	clk_enable(sdma->clk_ipg);
1445	clk_enable(sdma->clk_ahb);
1446	/* download the RAM image for SDMA */
1447	sdma_load_script(sdma, ram_code,
1448			header->ram_code_size,
1449			addr->ram_code_start_addr);
1450	clk_disable(sdma->clk_ipg);
1451	clk_disable(sdma->clk_ahb);
1452
1453	sdma_add_scripts(sdma, addr);
1454
1455	dev_info(sdma->dev, "loaded firmware %d.%d\n",
1456			header->version_major,
1457			header->version_minor);
1458
1459err_firmware:
1460	release_firmware(fw);
1461}
1462
1463#define EVENT_REMAP_CELLS 3
1464
1465static int sdma_event_remap(struct sdma_engine *sdma)
1466{
1467	struct device_node *np = sdma->dev->of_node;
1468	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1469	struct property *event_remap;
1470	struct regmap *gpr;
1471	char propname[] = "fsl,sdma-event-remap";
1472	u32 reg, val, shift, num_map, i;
1473	int ret = 0;
1474
1475	if (IS_ERR(np) || IS_ERR(gpr_np))
1476		goto out;
1477
1478	event_remap = of_find_property(np, propname, NULL);
1479	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1480	if (!num_map) {
1481		dev_dbg(sdma->dev, "no event needs to be remapped\n");
1482		goto out;
1483	} else if (num_map % EVENT_REMAP_CELLS) {
1484		dev_err(sdma->dev, "the property %s must modulo %d\n",
1485				propname, EVENT_REMAP_CELLS);
1486		ret = -EINVAL;
1487		goto out;
1488	}
1489
1490	gpr = syscon_node_to_regmap(gpr_np);
1491	if (IS_ERR(gpr)) {
1492		dev_err(sdma->dev, "failed to get gpr regmap\n");
1493		ret = PTR_ERR(gpr);
1494		goto out;
1495	}
1496
1497	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1498		ret = of_property_read_u32_index(np, propname, i, &reg);
1499		if (ret) {
1500			dev_err(sdma->dev, "failed to read property %s index %d\n",
1501					propname, i);
1502			goto out;
1503		}
1504
1505		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1506		if (ret) {
1507			dev_err(sdma->dev, "failed to read property %s index %d\n",
1508					propname, i + 1);
1509			goto out;
1510		}
1511
1512		ret = of_property_read_u32_index(np, propname, i + 2, &val);
1513		if (ret) {
1514			dev_err(sdma->dev, "failed to read property %s index %d\n",
1515					propname, i + 2);
1516			goto out;
1517		}
1518
1519		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1520	}
1521
1522out:
1523	if (!IS_ERR(gpr_np))
1524		of_node_put(gpr_np);
1525
1526	return ret;
1527}
1528
1529static int sdma_get_firmware(struct sdma_engine *sdma,
1530		const char *fw_name)
1531{
1532	int ret;
1533
1534	ret = request_firmware_nowait(THIS_MODULE,
1535			FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1536			GFP_KERNEL, sdma, sdma_load_firmware);
1537
1538	return ret;
1539}
1540
1541static int sdma_init(struct sdma_engine *sdma)
1542{
1543	int i, ret;
1544	dma_addr_t ccb_phys;
1545
1546	ret = clk_enable(sdma->clk_ipg);
1547	if (ret)
1548		return ret;
1549	ret = clk_enable(sdma->clk_ahb);
1550	if (ret)
1551		goto disable_clk_ipg;
 
 
 
 
 
 
 
 
1552
1553	/* Be sure SDMA has not started yet */
1554	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1555
1556	sdma->channel_control = dma_alloc_coherent(NULL,
1557			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1558			sizeof(struct sdma_context_data),
1559			&ccb_phys, GFP_KERNEL);
1560
1561	if (!sdma->channel_control) {
1562		ret = -ENOMEM;
1563		goto err_dma_alloc;
1564	}
1565
1566	sdma->context = (void *)sdma->channel_control +
1567		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1568	sdma->context_phys = ccb_phys +
1569		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1570
1571	/* Zero-out the CCB structures array just allocated */
1572	memset(sdma->channel_control, 0,
1573			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1574
1575	/* disable all channels */
1576	for (i = 0; i < sdma->drvdata->num_events; i++)
1577		writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1578
1579	/* All channels have priority 0 */
1580	for (i = 0; i < MAX_DMA_CHANNELS; i++)
1581		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1582
1583	ret = sdma_request_channel(&sdma->channel[0]);
1584	if (ret)
1585		goto err_dma_alloc;
1586
1587	sdma_config_ownership(&sdma->channel[0], false, true, false);
1588
1589	/* Set Command Channel (Channel Zero) */
1590	writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1591
1592	/* Set bits of CONFIG register but with static context switching */
1593	/* FIXME: Check whether to set ACR bit depending on clock ratios */
1594	writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
 
 
1595
1596	writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
 
1597
1598	/* Initializes channel's priorities */
1599	sdma_set_channel_priority(&sdma->channel[0], 7);
1600
1601	clk_disable(sdma->clk_ipg);
1602	clk_disable(sdma->clk_ahb);
1603
1604	return 0;
1605
1606err_dma_alloc:
1607	clk_disable(sdma->clk_ahb);
1608disable_clk_ipg:
1609	clk_disable(sdma->clk_ipg);
1610	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1611	return ret;
1612}
1613
1614static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1615{
1616	struct sdma_channel *sdmac = to_sdma_chan(chan);
1617	struct imx_dma_data *data = fn_param;
1618
1619	if (!imx_dma_is_general_purpose(chan))
1620		return false;
1621
1622	sdmac->data = *data;
1623	chan->private = &sdmac->data;
1624
1625	return true;
1626}
1627
1628static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1629				   struct of_dma *ofdma)
1630{
1631	struct sdma_engine *sdma = ofdma->of_dma_data;
1632	dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1633	struct imx_dma_data data;
1634
1635	if (dma_spec->args_count != 3)
1636		return NULL;
1637
1638	data.dma_request = dma_spec->args[0];
1639	data.peripheral_type = dma_spec->args[1];
1640	data.priority = dma_spec->args[2];
1641	/*
1642	 * init dma_request2 to zero, which is not used by the dts.
1643	 * For P2P, dma_request2 is init from dma_request_channel(),
1644	 * chan->private will point to the imx_dma_data, and in
1645	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
1646	 * be set to sdmac->event_id1.
1647	 */
1648	data.dma_request2 = 0;
1649
1650	return dma_request_channel(mask, sdma_filter_fn, &data);
1651}
1652
1653static int sdma_probe(struct platform_device *pdev)
1654{
1655	const struct of_device_id *of_id =
1656			of_match_device(sdma_dt_ids, &pdev->dev);
1657	struct device_node *np = pdev->dev.of_node;
1658	struct device_node *spba_bus;
1659	const char *fw_name;
1660	int ret;
1661	int irq;
1662	struct resource *iores;
1663	struct resource spba_res;
1664	struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1665	int i;
1666	struct sdma_engine *sdma;
1667	s32 *saddr_arr;
1668	const struct sdma_driver_data *drvdata = NULL;
1669
1670	if (of_id)
1671		drvdata = of_id->data;
1672	else if (pdev->id_entry)
1673		drvdata = (void *)pdev->id_entry->driver_data;
1674
1675	if (!drvdata) {
1676		dev_err(&pdev->dev, "unable to find driver data\n");
1677		return -EINVAL;
1678	}
1679
1680	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1681	if (ret)
1682		return ret;
1683
1684	sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
1685	if (!sdma)
1686		return -ENOMEM;
1687
1688	spin_lock_init(&sdma->channel_0_lock);
1689
1690	sdma->dev = &pdev->dev;
1691	sdma->drvdata = drvdata;
1692
 
1693	irq = platform_get_irq(pdev, 0);
1694	if (irq < 0)
1695		return irq;
 
 
 
 
 
 
 
1696
1697	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1698	sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1699	if (IS_ERR(sdma->regs))
1700		return PTR_ERR(sdma->regs);
1701
1702	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1703	if (IS_ERR(sdma->clk_ipg))
1704		return PTR_ERR(sdma->clk_ipg);
1705
1706	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1707	if (IS_ERR(sdma->clk_ahb))
1708		return PTR_ERR(sdma->clk_ahb);
1709
1710	clk_prepare(sdma->clk_ipg);
1711	clk_prepare(sdma->clk_ahb);
 
 
 
1712
1713	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1714			       sdma);
1715	if (ret)
1716		return ret;
1717
1718	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1719	if (!sdma->script_addrs)
1720		return -ENOMEM;
 
 
1721
1722	/* initially no scripts available */
1723	saddr_arr = (s32 *)sdma->script_addrs;
1724	for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1725		saddr_arr[i] = -EINVAL;
1726
1727	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1728	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1729
1730	INIT_LIST_HEAD(&sdma->dma_device.channels);
1731	/* Initialize channel parameters */
1732	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1733		struct sdma_channel *sdmac = &sdma->channel[i];
1734
1735		sdmac->sdma = sdma;
1736		spin_lock_init(&sdmac->lock);
1737
1738		sdmac->chan.device = &sdma->dma_device;
1739		dma_cookie_init(&sdmac->chan);
1740		sdmac->channel = i;
1741
1742		tasklet_init(&sdmac->tasklet, sdma_tasklet,
1743			     (unsigned long) sdmac);
1744		/*
1745		 * Add the channel to the DMAC list. Do not add channel 0 though
1746		 * because we need it internally in the SDMA driver. This also means
1747		 * that channel 0 in dmaengine counting matches sdma channel 1.
1748		 */
1749		if (i)
1750			list_add_tail(&sdmac->chan.device_node,
1751					&sdma->dma_device.channels);
1752	}
1753
1754	ret = sdma_init(sdma);
1755	if (ret)
1756		goto err_init;
1757
1758	ret = sdma_event_remap(sdma);
1759	if (ret)
1760		goto err_init;
1761
1762	if (sdma->drvdata->script_addrs)
1763		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
1764	if (pdata && pdata->script_addrs)
1765		sdma_add_scripts(sdma, pdata->script_addrs);
1766
1767	if (pdata) {
1768		ret = sdma_get_firmware(sdma, pdata->fw_name);
1769		if (ret)
1770			dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
1771	} else {
1772		/*
1773		 * Because that device tree does not encode ROM script address,
1774		 * the RAM script in firmware is mandatory for device tree
1775		 * probe, otherwise it fails.
1776		 */
1777		ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1778					      &fw_name);
1779		if (ret)
1780			dev_warn(&pdev->dev, "failed to get firmware name\n");
1781		else {
1782			ret = sdma_get_firmware(sdma, fw_name);
1783			if (ret)
1784				dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
 
 
 
1785		}
1786	}
1787
1788	sdma->dma_device.dev = &pdev->dev;
1789
1790	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1791	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1792	sdma->dma_device.device_tx_status = sdma_tx_status;
1793	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1794	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1795	sdma->dma_device.device_config = sdma_config;
1796	sdma->dma_device.device_terminate_all = sdma_disable_channel;
1797	sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1798	sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1799	sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1800	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1801	sdma->dma_device.device_issue_pending = sdma_issue_pending;
1802	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1803	dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1804
1805	platform_set_drvdata(pdev, sdma);
1806
1807	ret = dma_async_device_register(&sdma->dma_device);
1808	if (ret) {
1809		dev_err(&pdev->dev, "unable to register\n");
1810		goto err_init;
1811	}
1812
1813	if (np) {
1814		ret = of_dma_controller_register(np, sdma_xlate, sdma);
1815		if (ret) {
1816			dev_err(&pdev->dev, "failed to register controller\n");
1817			goto err_register;
1818		}
1819
1820		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
1821		ret = of_address_to_resource(spba_bus, 0, &spba_res);
1822		if (!ret) {
1823			sdma->spba_start_addr = spba_res.start;
1824			sdma->spba_end_addr = spba_res.end;
1825		}
1826		of_node_put(spba_bus);
1827	}
1828
1829	return 0;
1830
1831err_register:
1832	dma_async_device_unregister(&sdma->dma_device);
1833err_init:
1834	kfree(sdma->script_addrs);
 
 
 
 
 
 
 
 
 
 
 
1835	return ret;
1836}
1837
1838static int sdma_remove(struct platform_device *pdev)
1839{
1840	struct sdma_engine *sdma = platform_get_drvdata(pdev);
1841	int i;
1842
1843	dma_async_device_unregister(&sdma->dma_device);
1844	kfree(sdma->script_addrs);
1845	/* Kill the tasklet */
1846	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1847		struct sdma_channel *sdmac = &sdma->channel[i];
1848
1849		tasklet_kill(&sdmac->tasklet);
1850	}
1851
1852	platform_set_drvdata(pdev, NULL);
1853	return 0;
1854}
1855
1856static struct platform_driver sdma_driver = {
1857	.driver		= {
1858		.name	= "imx-sdma",
1859		.of_match_table = sdma_dt_ids,
1860	},
1861	.id_table	= sdma_devtypes,
1862	.remove		= sdma_remove,
1863	.probe		= sdma_probe,
1864};
1865
1866module_platform_driver(sdma_driver);
 
 
 
 
1867
1868MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1869MODULE_DESCRIPTION("i.MX SDMA driver");
1870MODULE_LICENSE("GPL");