Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * drivers/dma/imx-sdma.c
   3 *
   4 * This file contains a driver for the Freescale Smart DMA engine
   5 *
   6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   7 *
   8 * Based on code from Freescale:
   9 *
  10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
  11 *
  12 * The code contained herein is licensed under the GNU General Public
  13 * License. You may obtain a copy of the GNU General Public License
  14 * Version 2 or later at the following locations:
  15 *
  16 * http://www.opensource.org/licenses/gpl-license.html
  17 * http://www.gnu.org/copyleft/gpl.html
  18 */
  19
  20#include <linux/init.h>
 
 
  21#include <linux/types.h>
 
 
  22#include <linux/mm.h>
  23#include <linux/interrupt.h>
  24#include <linux/clk.h>
  25#include <linux/wait.h>
  26#include <linux/sched.h>
  27#include <linux/semaphore.h>
  28#include <linux/spinlock.h>
  29#include <linux/device.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/firmware.h>
  32#include <linux/slab.h>
  33#include <linux/platform_device.h>
  34#include <linux/dmaengine.h>
  35#include <linux/of.h>
 
  36#include <linux/of_device.h>
 
 
  37
  38#include <asm/irq.h>
  39#include <mach/sdma.h>
  40#include <mach/dma.h>
  41#include <mach/hardware.h>
 
 
 
 
  42
  43/* SDMA registers */
  44#define SDMA_H_C0PTR		0x000
  45#define SDMA_H_INTR		0x004
  46#define SDMA_H_STATSTOP		0x008
  47#define SDMA_H_START		0x00c
  48#define SDMA_H_EVTOVR		0x010
  49#define SDMA_H_DSPOVR		0x014
  50#define SDMA_H_HOSTOVR		0x018
  51#define SDMA_H_EVTPEND		0x01c
  52#define SDMA_H_DSPENBL		0x020
  53#define SDMA_H_RESET		0x024
  54#define SDMA_H_EVTERR		0x028
  55#define SDMA_H_INTRMSK		0x02c
  56#define SDMA_H_PSW		0x030
  57#define SDMA_H_EVTERRDBG	0x034
  58#define SDMA_H_CONFIG		0x038
  59#define SDMA_ONCE_ENB		0x040
  60#define SDMA_ONCE_DATA		0x044
  61#define SDMA_ONCE_INSTR		0x048
  62#define SDMA_ONCE_STAT		0x04c
  63#define SDMA_ONCE_CMD		0x050
  64#define SDMA_EVT_MIRROR		0x054
  65#define SDMA_ILLINSTADDR	0x058
  66#define SDMA_CHN0ADDR		0x05c
  67#define SDMA_ONCE_RTB		0x060
  68#define SDMA_XTRIG_CONF1	0x070
  69#define SDMA_XTRIG_CONF2	0x074
  70#define SDMA_CHNENBL0_IMX35	0x200
  71#define SDMA_CHNENBL0_IMX31	0x080
  72#define SDMA_CHNPRI_0		0x100
 
  73
  74/*
  75 * Buffer descriptor status values.
  76 */
  77#define BD_DONE  0x01
  78#define BD_WRAP  0x02
  79#define BD_CONT  0x04
  80#define BD_INTR  0x08
  81#define BD_RROR  0x10
  82#define BD_LAST  0x20
  83#define BD_EXTD  0x80
  84
  85/*
  86 * Data Node descriptor status values.
  87 */
  88#define DND_END_OF_FRAME  0x80
  89#define DND_END_OF_XFER   0x40
  90#define DND_DONE          0x20
  91#define DND_UNUSED        0x01
  92
  93/*
  94 * IPCV2 descriptor status values.
  95 */
  96#define BD_IPCV2_END_OF_FRAME  0x40
  97
  98#define IPCV2_MAX_NODES        50
  99/*
 100 * Error bit set in the CCB status field by the SDMA,
 101 * in setbd routine, in case of a transfer error
 102 */
 103#define DATA_ERROR  0x10000000
 104
 105/*
 106 * Buffer descriptor commands.
 107 */
 108#define C0_ADDR             0x01
 109#define C0_LOAD             0x02
 110#define C0_DUMP             0x03
 111#define C0_SETCTX           0x07
 112#define C0_GETCTX           0x03
 113#define C0_SETDM            0x01
 114#define C0_SETPM            0x04
 115#define C0_GETDM            0x02
 116#define C0_GETPM            0x08
 117/*
 118 * Change endianness indicator in the BD command field
 119 */
 120#define CHANGE_ENDIANNESS   0x80
 121
 122/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123 * Mode/Count of data node descriptors - IPCv2
 124 */
 125struct sdma_mode_count {
 
 126	u32 count   : 16; /* size of the buffer pointed by this BD */
 127	u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
 128	u32 command :  8; /* command mostlky used for channel 0 */
 129};
 130
 131/*
 132 * Buffer descriptor
 133 */
 134struct sdma_buffer_descriptor {
 135	struct sdma_mode_count  mode;
 136	u32 buffer_addr;	/* address of the buffer described */
 137	u32 ext_buffer_addr;	/* extended buffer address */
 138} __attribute__ ((packed));
 139
 140/**
 141 * struct sdma_channel_control - Channel control Block
 142 *
 143 * @current_bd_ptr	current buffer descriptor processed
 144 * @base_bd_ptr		first element of buffer descriptor array
 145 * @unused		padding. The SDMA engine expects an array of 128 byte
 146 *			control blocks
 147 */
 148struct sdma_channel_control {
 149	u32 current_bd_ptr;
 150	u32 base_bd_ptr;
 151	u32 unused[2];
 152} __attribute__ ((packed));
 153
 154/**
 155 * struct sdma_state_registers - SDMA context for a channel
 156 *
 157 * @pc:		program counter
 
 158 * @t:		test bit: status of arithmetic & test instruction
 159 * @rpc:	return program counter
 
 160 * @sf:		source fault while loading data
 161 * @spc:	loop start program counter
 
 162 * @df:		destination fault while storing data
 163 * @epc:	loop end program counter
 164 * @lm:		loop mode
 165 */
 166struct sdma_state_registers {
 167	u32 pc     :14;
 168	u32 unused1: 1;
 169	u32 t      : 1;
 170	u32 rpc    :14;
 171	u32 unused0: 1;
 172	u32 sf     : 1;
 173	u32 spc    :14;
 174	u32 unused2: 1;
 175	u32 df     : 1;
 176	u32 epc    :14;
 177	u32 lm     : 2;
 178} __attribute__ ((packed));
 179
 180/**
 181 * struct sdma_context_data - sdma context specific to a channel
 182 *
 183 * @channel_state:	channel state bits
 184 * @gReg:		general registers
 185 * @mda:		burst dma destination address register
 186 * @msa:		burst dma source address register
 187 * @ms:			burst dma status register
 188 * @md:			burst dma data register
 189 * @pda:		peripheral dma destination address register
 190 * @psa:		peripheral dma source address register
 191 * @ps:			peripheral dma status register
 192 * @pd:			peripheral dma data register
 193 * @ca:			CRC polynomial register
 194 * @cs:			CRC accumulator register
 195 * @dda:		dedicated core destination address register
 196 * @dsa:		dedicated core source address register
 197 * @ds:			dedicated core status register
 198 * @dd:			dedicated core data register
 
 
 
 
 
 
 
 
 199 */
 200struct sdma_context_data {
 201	struct sdma_state_registers  channel_state;
 202	u32  gReg[8];
 203	u32  mda;
 204	u32  msa;
 205	u32  ms;
 206	u32  md;
 207	u32  pda;
 208	u32  psa;
 209	u32  ps;
 210	u32  pd;
 211	u32  ca;
 212	u32  cs;
 213	u32  dda;
 214	u32  dsa;
 215	u32  ds;
 216	u32  dd;
 217	u32  scratch0;
 218	u32  scratch1;
 219	u32  scratch2;
 220	u32  scratch3;
 221	u32  scratch4;
 222	u32  scratch5;
 223	u32  scratch6;
 224	u32  scratch7;
 225} __attribute__ ((packed));
 226
 227#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
 228
 229struct sdma_engine;
 230
 231/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 232 * struct sdma_channel - housekeeping for a SDMA channel
 233 *
 234 * @sdma		pointer to the SDMA engine for this channel
 235 * @channel		the channel number, matches dmaengine chan_id + 1
 236 * @direction		transfer type. Needed for setting SDMA script
 237 * @peripheral_type	Peripheral type. Needed for setting SDMA script
 238 * @event_id0		aka dma request line
 239 * @event_id1		for channels that use 2 events
 240 * @word_size		peripheral access size
 241 * @buf_tail		ID of the buffer that was processed
 242 * @done		channel completion
 243 * @num_bd		max NUM_BD. number of descriptors currently handling
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 244 */
 245struct sdma_channel {
 
 
 246	struct sdma_engine		*sdma;
 247	unsigned int			channel;
 248	enum dma_data_direction		direction;
 
 249	enum sdma_peripheral_type	peripheral_type;
 250	unsigned int			event_id0;
 251	unsigned int			event_id1;
 252	enum dma_slave_buswidth		word_size;
 253	unsigned int			buf_tail;
 254	struct completion		done;
 255	unsigned int			num_bd;
 256	struct sdma_buffer_descriptor	*bd;
 257	dma_addr_t			bd_phys;
 258	unsigned int			pc_from_device, pc_to_device;
 
 
 259	unsigned long			flags;
 260	dma_addr_t			per_address;
 261	u32				event_mask0, event_mask1;
 262	u32				watermark_level;
 263	u32				shp_addr, per_addr;
 264	struct dma_chan			chan;
 265	spinlock_t			lock;
 266	struct dma_async_tx_descriptor	desc;
 267	dma_cookie_t			last_completed;
 268	enum dma_status			status;
 
 
 
 
 
 
 
 
 
 
 269};
 270
 271#define IMX_DMA_SG_LOOP		(1 << 0)
 272
 273#define MAX_DMA_CHANNELS 32
 274#define MXC_SDMA_DEFAULT_PRIORITY 1
 275#define MXC_SDMA_MIN_PRIORITY 1
 276#define MXC_SDMA_MAX_PRIORITY 7
 277
 278#define SDMA_FIRMWARE_MAGIC 0x414d4453
 279
 280/**
 281 * struct sdma_firmware_header - Layout of the firmware image
 282 *
 283 * @magic		"SDMA"
 284 * @version_major	increased whenever layout of struct sdma_script_start_addrs
 285 *			changes.
 286 * @version_minor	firmware minor version (for binary compatible changes)
 287 * @script_addrs_start	offset of struct sdma_script_start_addrs in this image
 288 * @num_script_addrs	Number of script addresses in this image
 289 * @ram_code_start	offset of SDMA ram image in this firmware image
 290 * @ram_code_size	size of SDMA ram image
 291 * @script_addrs	Stores the start address of the SDMA scripts
 292 *			(in SDMA memory space)
 293 */
 294struct sdma_firmware_header {
 295	u32	magic;
 296	u32	version_major;
 297	u32	version_minor;
 298	u32	script_addrs_start;
 299	u32	num_script_addrs;
 300	u32	ram_code_start;
 301	u32	ram_code_size;
 302};
 303
 304enum sdma_devtype {
 305	IMX31_SDMA,	/* runs on i.mx31 */
 306	IMX35_SDMA,	/* runs on i.mx35 and later */
 
 
 
 
 
 
 
 
 
 307};
 308
 309struct sdma_engine {
 310	struct device			*dev;
 311	struct device_dma_parameters	dma_parms;
 312	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 313	struct sdma_channel_control	*channel_control;
 314	void __iomem			*regs;
 315	enum sdma_devtype		devtype;
 316	unsigned int			num_events;
 317	struct sdma_context_data	*context;
 318	dma_addr_t			context_phys;
 319	struct dma_device		dma_device;
 320	struct clk			*clk;
 
 
 
 321	struct sdma_script_start_addrs	*script_addrs;
 
 
 
 
 
 
 
 
 
 322};
 323
 324static struct platform_device_id sdma_devtypes[] = {
 325	{
 326		.name = "imx31-sdma",
 327		.driver_data = IMX31_SDMA,
 328	}, {
 329		.name = "imx35-sdma",
 330		.driver_data = IMX35_SDMA,
 331	}, {
 332		/* sentinel */
 333	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 334};
 335MODULE_DEVICE_TABLE(platform, sdma_devtypes);
 336
 337static const struct of_device_id sdma_dt_ids[] = {
 338	{ .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
 339	{ .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
 
 
 
 
 
 
 
 340	{ /* sentinel */ }
 341};
 342MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 343
 344#define SDMA_H_CONFIG_DSPDMA	(1 << 12) /* indicates if the DSPDMA is used */
 345#define SDMA_H_CONFIG_RTD_PINS	(1 << 11) /* indicates if Real-Time Debug pins are enabled */
 346#define SDMA_H_CONFIG_ACR	(1 << 4)  /* indicates if AHB freq /core freq = 2 or 1 */
 347#define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
 348
 349static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 350{
 351	u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
 352						      SDMA_CHNENBL0_IMX35);
 353	return chnenbl0 + event * 4;
 354}
 355
 356static int sdma_config_ownership(struct sdma_channel *sdmac,
 357		bool event_override, bool mcu_override, bool dsp_override)
 358{
 359	struct sdma_engine *sdma = sdmac->sdma;
 360	int channel = sdmac->channel;
 361	u32 evt, mcu, dsp;
 362
 363	if (event_override && mcu_override && dsp_override)
 364		return -EINVAL;
 365
 366	evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
 367	mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
 368	dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
 369
 370	if (dsp_override)
 371		dsp &= ~(1 << channel);
 372	else
 373		dsp |= (1 << channel);
 374
 375	if (event_override)
 376		evt &= ~(1 << channel);
 377	else
 378		evt |= (1 << channel);
 379
 380	if (mcu_override)
 381		mcu &= ~(1 << channel);
 382	else
 383		mcu |= (1 << channel);
 384
 385	__raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
 386	__raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
 387	__raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
 388
 389	return 0;
 390}
 391
 
 
 
 
 
 
 
 
 
 
 392/*
 393 * sdma_run_channel - run a channel and wait till it's done
 394 */
 395static int sdma_run_channel(struct sdma_channel *sdmac)
 396{
 397	struct sdma_engine *sdma = sdmac->sdma;
 398	int channel = sdmac->channel;
 399	int ret;
 
 400
 401	init_completion(&sdmac->done);
 402
 403	__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
 
 
 
 404
 405	ret = wait_for_completion_timeout(&sdmac->done, HZ);
 
 
 
 
 
 406
 407	return ret ? 0 : -ETIMEDOUT;
 408}
 409
 410static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 411		u32 address)
 412{
 413	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
 414	void *buf_virt;
 415	dma_addr_t buf_phys;
 416	int ret;
 
 417
 418	buf_virt = dma_alloc_coherent(NULL,
 419			size,
 420			&buf_phys, GFP_KERNEL);
 421	if (!buf_virt)
 422		return -ENOMEM;
 423
 
 
 424	bd0->mode.command = C0_SETPM;
 425	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
 426	bd0->mode.count = size / 2;
 427	bd0->buffer_addr = buf_phys;
 428	bd0->ext_buffer_addr = address;
 429
 430	memcpy(buf_virt, buf, size);
 431
 432	ret = sdma_run_channel(&sdma->channel[0]);
 433
 434	dma_free_coherent(NULL, size, buf_virt, buf_phys);
 
 
 435
 436	return ret;
 437}
 438
 439static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 440{
 441	struct sdma_engine *sdma = sdmac->sdma;
 442	int channel = sdmac->channel;
 443	u32 val;
 444	u32 chnenbl = chnenbl_ofs(sdma, event);
 445
 446	val = __raw_readl(sdma->regs + chnenbl);
 447	val |= (1 << channel);
 448	__raw_writel(val, sdma->regs + chnenbl);
 
 
 
 
 
 
 
 
 449}
 450
 451static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 452{
 453	struct sdma_engine *sdma = sdmac->sdma;
 454	int channel = sdmac->channel;
 455	u32 chnenbl = chnenbl_ofs(sdma, event);
 456	u32 val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 457
 458	val = __raw_readl(sdma->regs + chnenbl);
 459	val &= ~(1 << channel);
 460	__raw_writel(val, sdma->regs + chnenbl);
 461}
 462
 463static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
 464{
 465	struct sdma_buffer_descriptor *bd;
 
 
 466
 467	/*
 468	 * loop mode. Iterate over descriptors, re-setup them and
 469	 * call callback function.
 470	 */
 471	while (1) {
 472		bd = &sdmac->bd[sdmac->buf_tail];
 
 
 473
 474		if (bd->mode.status & BD_DONE)
 475			break;
 476
 477		if (bd->mode.status & BD_RROR)
 
 478			sdmac->status = DMA_ERROR;
 479		else
 480			sdmac->status = DMA_IN_PROGRESS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 481
 
 482		bd->mode.status |= BD_DONE;
 483		sdmac->buf_tail++;
 484		sdmac->buf_tail %= sdmac->num_bd;
 485
 486		if (sdmac->desc.callback)
 487			sdmac->desc.callback(sdmac->desc.callback_param);
 
 
 
 
 
 
 
 
 
 488	}
 489}
 490
 491static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
 492{
 
 493	struct sdma_buffer_descriptor *bd;
 494	int i, error = 0;
 495
 
 496	/*
 497	 * non loop mode. Iterate over all descriptors, collect
 498	 * errors and call callback function
 499	 */
 500	for (i = 0; i < sdmac->num_bd; i++) {
 501		bd = &sdmac->bd[i];
 502
 503		 if (bd->mode.status & (BD_DONE | BD_RROR))
 504			error = -EIO;
 
 505	}
 506
 507	if (error)
 508		sdmac->status = DMA_ERROR;
 509	else
 510		sdmac->status = DMA_SUCCESS;
 511
 512	if (sdmac->desc.callback)
 513		sdmac->desc.callback(sdmac->desc.callback_param);
 514	sdmac->last_completed = sdmac->desc.cookie;
 515}
 516
 517static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
 518{
 519	complete(&sdmac->done);
 520
 521	/* not interested in channel 0 interrupts */
 522	if (sdmac->channel == 0)
 523		return;
 524
 525	if (sdmac->flags & IMX_DMA_SG_LOOP)
 526		sdma_handle_channel_loop(sdmac);
 527	else
 528		mxc_sdma_handle_channel_normal(sdmac);
 529}
 530
 531static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 532{
 533	struct sdma_engine *sdma = dev_id;
 534	u32 stat;
 535
 536	stat = __raw_readl(sdma->regs + SDMA_H_INTR);
 537	__raw_writel(stat, sdma->regs + SDMA_H_INTR);
 
 
 538
 539	while (stat) {
 540		int channel = fls(stat) - 1;
 541		struct sdma_channel *sdmac = &sdma->channel[channel];
 
 542
 543		mxc_sdma_handle_channel(sdmac);
 
 
 
 
 
 
 
 
 
 
 544
 545		stat &= ~(1 << channel);
 
 546	}
 547
 548	return IRQ_HANDLED;
 549}
 550
 551/*
 552 * sets the pc of SDMA script according to the peripheral type
 553 */
 554static void sdma_get_pc(struct sdma_channel *sdmac,
 555		enum sdma_peripheral_type peripheral_type)
 556{
 557	struct sdma_engine *sdma = sdmac->sdma;
 558	int per_2_emi = 0, emi_2_per = 0;
 559	/*
 560	 * These are needed once we start to support transfers between
 561	 * two peripherals or memory-to-memory transfers
 562	 */
 563	int per_2_per = 0, emi_2_emi = 0;
 564
 565	sdmac->pc_from_device = 0;
 566	sdmac->pc_to_device = 0;
 
 
 
 567
 568	switch (peripheral_type) {
 569	case IMX_DMATYPE_MEMORY:
 570		emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
 571		break;
 572	case IMX_DMATYPE_DSP:
 573		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
 574		per_2_emi = sdma->script_addrs->ap_2_bp_addr;
 575		break;
 576	case IMX_DMATYPE_FIRI:
 577		per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
 578		emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
 579		break;
 580	case IMX_DMATYPE_UART:
 581		per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
 582		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 583		break;
 584	case IMX_DMATYPE_UART_SP:
 585		per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
 586		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 587		break;
 588	case IMX_DMATYPE_ATA:
 589		per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
 590		emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
 591		break;
 592	case IMX_DMATYPE_CSPI:
 
 
 
 
 
 
 
 
 
 
 
 593	case IMX_DMATYPE_EXT:
 594	case IMX_DMATYPE_SSI:
 
 595		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
 596		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 597		break;
 
 
 
 
 
 598	case IMX_DMATYPE_SSI_SP:
 599	case IMX_DMATYPE_MMC:
 600	case IMX_DMATYPE_SDHC:
 601	case IMX_DMATYPE_CSPI_SP:
 602	case IMX_DMATYPE_ESAI:
 603	case IMX_DMATYPE_MSHC_SP:
 604		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
 605		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 606		break;
 607	case IMX_DMATYPE_ASRC:
 608		per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
 609		emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
 610		per_2_per = sdma->script_addrs->per_2_per_addr;
 
 
 
 
 
 
 611		break;
 612	case IMX_DMATYPE_MSHC:
 613		per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
 614		emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
 615		break;
 616	case IMX_DMATYPE_CCM:
 617		per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
 618		break;
 619	case IMX_DMATYPE_SPDIF:
 620		per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
 621		emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
 622		break;
 623	case IMX_DMATYPE_IPU_MEMORY:
 624		emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
 625		break;
 626	default:
 
 
 627		break;
 
 
 
 
 628	}
 629
 630	sdmac->pc_from_device = per_2_emi;
 631	sdmac->pc_to_device = emi_2_per;
 
 
 
 
 632}
 633
 634static int sdma_load_context(struct sdma_channel *sdmac)
 635{
 636	struct sdma_engine *sdma = sdmac->sdma;
 637	int channel = sdmac->channel;
 638	int load_address;
 639	struct sdma_context_data *context = sdma->context;
 640	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
 641	int ret;
 
 642
 643	if (sdmac->direction == DMA_FROM_DEVICE) {
 644		load_address = sdmac->pc_from_device;
 645	} else {
 
 
 
 
 646		load_address = sdmac->pc_to_device;
 647	}
 648
 649	if (load_address < 0)
 650		return load_address;
 651
 652	dev_dbg(sdma->dev, "load_address = %d\n", load_address);
 653	dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
 654	dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
 655	dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
 656	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
 657	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
 
 
 658
 659	memset(context, 0, sizeof(*context));
 660	context->channel_state.pc = load_address;
 661
 662	/* Send by context the event mask,base address for peripheral
 663	 * and watermark level
 664	 */
 665	context->gReg[0] = sdmac->event_mask1;
 666	context->gReg[1] = sdmac->event_mask0;
 667	context->gReg[2] = sdmac->per_addr;
 668	context->gReg[6] = sdmac->shp_addr;
 669	context->gReg[7] = sdmac->watermark_level;
 670
 671	bd0->mode.command = C0_SETDM;
 672	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
 673	bd0->mode.count = sizeof(*context) / 4;
 674	bd0->buffer_addr = sdma->context_phys;
 675	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
 
 676
 677	ret = sdma_run_channel(&sdma->channel[0]);
 678
 679	return ret;
 680}
 681
 682static void sdma_disable_channel(struct sdma_channel *sdmac)
 
 
 
 
 
 683{
 
 684	struct sdma_engine *sdma = sdmac->sdma;
 685	int channel = sdmac->channel;
 686
 687	__raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
 688	sdmac->status = DMA_ERROR;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 689}
 690
 691static int sdma_config_channel(struct sdma_channel *sdmac)
 692{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 693	int ret;
 694
 695	sdma_disable_channel(sdmac);
 696
 697	sdmac->event_mask0 = 0;
 698	sdmac->event_mask1 = 0;
 699	sdmac->shp_addr = 0;
 700	sdmac->per_addr = 0;
 701
 702	if (sdmac->event_id0) {
 703		if (sdmac->event_id0 > 32)
 704			return -EINVAL;
 705		sdma_event_enable(sdmac, sdmac->event_id0);
 706	}
 707
 708	switch (sdmac->peripheral_type) {
 709	case IMX_DMATYPE_DSP:
 710		sdma_config_ownership(sdmac, false, true, true);
 711		break;
 712	case IMX_DMATYPE_MEMORY:
 713		sdma_config_ownership(sdmac, false, true, false);
 714		break;
 715	default:
 716		sdma_config_ownership(sdmac, true, true, false);
 717		break;
 718	}
 719
 720	sdma_get_pc(sdmac, sdmac->peripheral_type);
 
 
 721
 722	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
 723			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
 724		/* Handle multiple event channels differently */
 725		if (sdmac->event_id1) {
 726			sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
 727			if (sdmac->event_id1 > 31)
 728				sdmac->watermark_level |= 1 << 31;
 729			sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
 730			if (sdmac->event_id0 > 31)
 731				sdmac->watermark_level |= 1 << 30;
 732		} else {
 733			sdmac->event_mask0 = 1 << sdmac->event_id0;
 734			sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
 
 
 
 735		}
 736		/* Watermark Level */
 737		sdmac->watermark_level |= sdmac->watermark_level;
 738		/* Address */
 739		sdmac->shp_addr = sdmac->per_address;
 
 740	} else {
 741		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
 742	}
 743
 744	ret = sdma_load_context(sdmac);
 745
 746	return ret;
 747}
 748
 749static int sdma_set_channel_priority(struct sdma_channel *sdmac,
 750		unsigned int priority)
 751{
 752	struct sdma_engine *sdma = sdmac->sdma;
 753	int channel = sdmac->channel;
 754
 755	if (priority < MXC_SDMA_MIN_PRIORITY
 756	    || priority > MXC_SDMA_MAX_PRIORITY) {
 757		return -EINVAL;
 758	}
 759
 760	__raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
 761
 762	return 0;
 763}
 764
 765static int sdma_request_channel(struct sdma_channel *sdmac)
 766{
 767	struct sdma_engine *sdma = sdmac->sdma;
 768	int channel = sdmac->channel;
 769	int ret = -EBUSY;
 770
 771	sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
 772	if (!sdmac->bd) {
 
 773		ret = -ENOMEM;
 774		goto out;
 775	}
 776
 777	memset(sdmac->bd, 0, PAGE_SIZE);
 778
 779	sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
 780	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 781
 782	clk_enable(sdma->clk);
 783
 784	sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
 785
 786	init_completion(&sdmac->done);
 787
 788	sdmac->buf_tail = 0;
 789
 
 790	return 0;
 791out:
 792
 793	return ret;
 794}
 795
 796static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 797{
 798	__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
 799}
 800
 801static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
 802{
 803	dma_cookie_t cookie = sdmac->chan.cookie;
 804
 805	if (++cookie < 0)
 806		cookie = 1;
 807
 808	sdmac->chan.cookie = cookie;
 809	sdmac->desc.cookie = cookie;
 810
 811	return cookie;
 
 
 
 
 812}
 813
 814static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 815{
 816	return container_of(chan, struct sdma_channel, chan);
 
 
 
 817}
 818
 819static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 820{
 821	struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
 822	struct sdma_engine *sdma = sdmac->sdma;
 823	dma_cookie_t cookie;
 824
 825	spin_lock_irq(&sdmac->lock);
 826
 827	cookie = sdma_assign_cookie(sdmac);
 828
 829	sdma_enable_channel(sdma, sdmac->channel);
 830
 831	spin_unlock_irq(&sdmac->lock);
 832
 833	return cookie;
 834}
 835
 836static int sdma_alloc_chan_resources(struct dma_chan *chan)
 837{
 838	struct sdma_channel *sdmac = to_sdma_chan(chan);
 839	struct imx_dma_data *data = chan->private;
 
 840	int prio, ret;
 841
 842	if (!data)
 843		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 844
 845	switch (data->priority) {
 846	case DMA_PRIO_HIGH:
 847		prio = 3;
 848		break;
 849	case DMA_PRIO_MEDIUM:
 850		prio = 2;
 851		break;
 852	case DMA_PRIO_LOW:
 853	default:
 854		prio = 1;
 855		break;
 856	}
 857
 858	sdmac->peripheral_type = data->peripheral_type;
 859	sdmac->event_id0 = data->dma_request;
 860	ret = sdma_set_channel_priority(sdmac, prio);
 861	if (ret)
 862		return ret;
 863
 864	ret = sdma_request_channel(sdmac);
 865	if (ret)
 866		return ret;
 
 
 
 867
 868	dma_async_tx_descriptor_init(&sdmac->desc, chan);
 869	sdmac->desc.tx_submit = sdma_tx_submit;
 870	/* txd.flags will be overwritten in prep funcs */
 871	sdmac->desc.flags = DMA_CTRL_ACK;
 872
 873	return 0;
 
 
 
 
 
 
 874}
 875
 876static void sdma_free_chan_resources(struct dma_chan *chan)
 877{
 878	struct sdma_channel *sdmac = to_sdma_chan(chan);
 879	struct sdma_engine *sdma = sdmac->sdma;
 880
 881	sdma_disable_channel(sdmac);
 882
 883	if (sdmac->event_id0)
 884		sdma_event_disable(sdmac, sdmac->event_id0);
 
 885	if (sdmac->event_id1)
 886		sdma_event_disable(sdmac, sdmac->event_id1);
 887
 888	sdmac->event_id0 = 0;
 889	sdmac->event_id1 = 0;
 890
 891	sdma_set_channel_priority(sdmac, 0);
 892
 893	dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894
 895	clk_disable(sdma->clk);
 896}
 897
 898static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 899		struct dma_chan *chan, struct scatterlist *sgl,
 900		unsigned int sg_len, enum dma_data_direction direction,
 901		unsigned long flags)
 902{
 903	struct sdma_channel *sdmac = to_sdma_chan(chan);
 904	struct sdma_engine *sdma = sdmac->sdma;
 905	int ret, i, count;
 906	int channel = sdmac->channel;
 907	struct scatterlist *sg;
 
 908
 909	if (sdmac->status == DMA_IN_PROGRESS)
 910		return NULL;
 911	sdmac->status = DMA_IN_PROGRESS;
 912
 913	sdmac->flags = 0;
 
 
 914
 915	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
 916			sg_len, channel);
 917
 918	sdmac->direction = direction;
 919	ret = sdma_load_context(sdmac);
 920	if (ret)
 921		goto err_out;
 922
 923	if (sg_len > NUM_BD) {
 924		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
 925				channel, sg_len, NUM_BD);
 926		ret = -EINVAL;
 927		goto err_out;
 928	}
 929
 930	for_each_sg(sgl, sg, sg_len, i) {
 931		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
 932		int param;
 933
 934		bd->buffer_addr = sg->dma_address;
 935
 936		count = sg->length;
 937
 938		if (count > 0xffff) {
 939			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
 940					channel, count, 0xffff);
 941			ret = -EINVAL;
 942			goto err_out;
 943		}
 944
 945		bd->mode.count = count;
 
 946
 947		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
 948			ret =  -EINVAL;
 949			goto err_out;
 950		}
 951
 952		switch (sdmac->word_size) {
 953		case DMA_SLAVE_BUSWIDTH_4_BYTES:
 954			bd->mode.command = 0;
 955			if (count & 3 || sg->dma_address & 3)
 956				return NULL;
 957			break;
 958		case DMA_SLAVE_BUSWIDTH_2_BYTES:
 959			bd->mode.command = 2;
 960			if (count & 1 || sg->dma_address & 1)
 961				return NULL;
 962			break;
 963		case DMA_SLAVE_BUSWIDTH_1_BYTE:
 964			bd->mode.command = 1;
 965			break;
 966		default:
 967			return NULL;
 968		}
 969
 970		param = BD_DONE | BD_EXTD | BD_CONT;
 971
 972		if (i + 1 == sg_len) {
 973			param |= BD_INTR;
 974			param |= BD_LAST;
 975			param &= ~BD_CONT;
 976		}
 977
 978		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
 979				i, count, sg->dma_address,
 980				param & BD_WRAP ? "wrap" : "",
 981				param & BD_INTR ? " intr" : "");
 982
 983		bd->mode.status = param;
 984	}
 985
 986	sdmac->num_bd = sg_len;
 987	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 988
 989	return &sdmac->desc;
 990err_out:
 991	sdmac->status = DMA_ERROR;
 992	return NULL;
 993}
 994
 995static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 996		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
 997		size_t period_len, enum dma_data_direction direction)
 
 998{
 999	struct sdma_channel *sdmac = to_sdma_chan(chan);
1000	struct sdma_engine *sdma = sdmac->sdma;
1001	int num_periods = buf_len / period_len;
1002	int channel = sdmac->channel;
1003	int ret, i = 0, buf = 0;
 
1004
1005	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1006
1007	if (sdmac->status == DMA_IN_PROGRESS)
1008		return NULL;
1009
1010	sdmac->status = DMA_IN_PROGRESS;
1011
1012	sdmac->flags |= IMX_DMA_SG_LOOP;
1013	sdmac->direction = direction;
1014	ret = sdma_load_context(sdmac);
1015	if (ret)
1016		goto err_out;
1017
1018	if (num_periods > NUM_BD) {
1019		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1020				channel, num_periods, NUM_BD);
1021		goto err_out;
1022	}
1023
1024	if (period_len > 0xffff) {
1025		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1026				channel, period_len, 0xffff);
1027		goto err_out;
 
 
1028	}
1029
1030	while (buf < buf_len) {
1031		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1032		int param;
1033
1034		bd->buffer_addr = dma_addr;
1035
1036		bd->mode.count = period_len;
1037
1038		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1039			goto err_out;
1040		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1041			bd->mode.command = 0;
1042		else
1043			bd->mode.command = sdmac->word_size;
1044
1045		param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1046		if (i + 1 == num_periods)
1047			param |= BD_WRAP;
1048
1049		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1050				i, period_len, dma_addr,
1051				param & BD_WRAP ? "wrap" : "",
1052				param & BD_INTR ? " intr" : "");
1053
1054		bd->mode.status = param;
1055
1056		dma_addr += period_len;
1057		buf += period_len;
1058
1059		i++;
1060	}
1061
1062	sdmac->num_bd = num_periods;
1063	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1064
1065	return &sdmac->desc;
1066err_out:
1067	sdmac->status = DMA_ERROR;
1068	return NULL;
1069}
1070
1071static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1072		unsigned long arg)
 
1073{
1074	struct sdma_channel *sdmac = to_sdma_chan(chan);
1075	struct dma_slave_config *dmaengine_cfg = (void *)arg;
1076
1077	switch (cmd) {
1078	case DMA_TERMINATE_ALL:
1079		sdma_disable_channel(sdmac);
1080		return 0;
1081	case DMA_SLAVE_CONFIG:
1082		if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
1083			sdmac->per_address = dmaengine_cfg->src_addr;
1084			sdmac->watermark_level = dmaengine_cfg->src_maxburst;
1085			sdmac->word_size = dmaengine_cfg->src_addr_width;
1086		} else {
1087			sdmac->per_address = dmaengine_cfg->dst_addr;
1088			sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
1089			sdmac->word_size = dmaengine_cfg->dst_addr_width;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1090		}
1091		return sdma_config_channel(sdmac);
1092	default:
1093		return -ENOSYS;
 
 
 
1094	}
1095
1096	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
1097}
1098
1099static enum dma_status sdma_tx_status(struct dma_chan *chan,
1100					    dma_cookie_t cookie,
1101					    struct dma_tx_state *txstate)
1102{
1103	struct sdma_channel *sdmac = to_sdma_chan(chan);
1104	dma_cookie_t last_used;
 
 
 
 
 
 
 
 
1105
1106	last_used = chan->cookie;
1107
1108	dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1109
1110	return sdmac->status;
1111}
1112
1113static void sdma_issue_pending(struct dma_chan *chan)
1114{
1115	/*
1116	 * Nothing to do. We only have a single descriptor
1117	 */
 
 
 
 
1118}
1119
1120#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
 
 
 
1121
1122static void sdma_add_scripts(struct sdma_engine *sdma,
1123		const struct sdma_script_start_addrs *addr)
1124{
1125	s32 *addr_arr = (u32 *)addr;
1126	s32 *saddr_arr = (u32 *)sdma->script_addrs;
1127	int i;
1128
1129	for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
 
 
 
 
 
 
 
 
 
 
 
 
1130		if (addr_arr[i] > 0)
1131			saddr_arr[i] = addr_arr[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
1132}
1133
1134static int __init sdma_get_firmware(struct sdma_engine *sdma,
1135		const char *fw_name)
1136{
1137	const struct firmware *fw;
1138	const struct sdma_firmware_header *header;
1139	int ret;
1140	const struct sdma_script_start_addrs *addr;
1141	unsigned short *ram_code;
1142
1143	ret = request_firmware(&fw, fw_name, sdma->dev);
1144	if (ret)
1145		return ret;
 
 
1146
1147	if (fw->size < sizeof(*header))
1148		goto err_firmware;
1149
1150	header = (struct sdma_firmware_header *)fw->data;
1151
1152	if (header->magic != SDMA_FIRMWARE_MAGIC)
1153		goto err_firmware;
1154	if (header->ram_code_start + header->ram_code_size > fw->size)
1155		goto err_firmware;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1156
1157	addr = (void *)header + header->script_addrs_start;
1158	ram_code = (void *)header + header->ram_code_start;
1159
1160	clk_enable(sdma->clk);
 
1161	/* download the RAM image for SDMA */
1162	sdma_load_script(sdma, ram_code,
1163			header->ram_code_size,
1164			addr->ram_code_start_addr);
1165	clk_disable(sdma->clk);
 
1166
1167	sdma_add_scripts(sdma, addr);
1168
 
 
1169	dev_info(sdma->dev, "loaded firmware %d.%d\n",
1170			header->version_major,
1171			header->version_minor);
1172
1173err_firmware:
1174	release_firmware(fw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1175
1176	return ret;
1177}
1178
1179static int __init sdma_init(struct sdma_engine *sdma)
1180{
1181	int i, ret;
1182	dma_addr_t ccb_phys;
1183
1184	switch (sdma->devtype) {
1185	case IMX31_SDMA:
1186		sdma->num_events = 32;
1187		break;
1188	case IMX35_SDMA:
1189		sdma->num_events = 48;
1190		break;
1191	default:
1192		dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
1193			sdma->devtype);
1194		return -ENODEV;
1195	}
1196
1197	clk_enable(sdma->clk);
 
 
1198
1199	/* Be sure SDMA has not started yet */
1200	__raw_writel(0, sdma->regs + SDMA_H_C0PTR);
1201
1202	sdma->channel_control = dma_alloc_coherent(NULL,
1203			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1204			sizeof(struct sdma_context_data),
1205			&ccb_phys, GFP_KERNEL);
1206
1207	if (!sdma->channel_control) {
1208		ret = -ENOMEM;
1209		goto err_dma_alloc;
1210	}
1211
1212	sdma->context = (void *)sdma->channel_control +
1213		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1214	sdma->context_phys = ccb_phys +
1215		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1216
1217	/* Zero-out the CCB structures array just allocated */
1218	memset(sdma->channel_control, 0,
1219			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1220
1221	/* disable all channels */
1222	for (i = 0; i < sdma->num_events; i++)
1223		__raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
1224
1225	/* All channels have priority 0 */
1226	for (i = 0; i < MAX_DMA_CHANNELS; i++)
1227		__raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1228
1229	ret = sdma_request_channel(&sdma->channel[0]);
1230	if (ret)
1231		goto err_dma_alloc;
1232
1233	sdma_config_ownership(&sdma->channel[0], false, true, false);
1234
1235	/* Set Command Channel (Channel Zero) */
1236	__raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
1237
1238	/* Set bits of CONFIG register but with static context switching */
1239	/* FIXME: Check whether to set ACR bit depending on clock ratios */
1240	__raw_writel(0, sdma->regs + SDMA_H_CONFIG);
1241
1242	__raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1243
1244	/* Set bits of CONFIG register with given context switching mode */
1245	__raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1246
1247	/* Initializes channel's priorities */
1248	sdma_set_channel_priority(&sdma->channel[0], 7);
1249
1250	clk_disable(sdma->clk);
 
1251
1252	return 0;
1253
1254err_dma_alloc:
1255	clk_disable(sdma->clk);
 
 
1256	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1257	return ret;
1258}
1259
1260static int __init sdma_probe(struct platform_device *pdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1261{
1262	const struct of_device_id *of_id =
1263			of_match_device(sdma_dt_ids, &pdev->dev);
1264	struct device_node *np = pdev->dev.of_node;
 
1265	const char *fw_name;
1266	int ret;
1267	int irq;
1268	struct resource *iores;
1269	struct sdma_platform_data *pdata = pdev->dev.platform_data;
1270	int i;
1271	struct sdma_engine *sdma;
 
1272
1273	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
 
 
 
 
1274	if (!sdma)
1275		return -ENOMEM;
1276
 
 
1277	sdma->dev = &pdev->dev;
 
1278
1279	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1280	irq = platform_get_irq(pdev, 0);
1281	if (!iores || irq < 0) {
1282		ret = -EINVAL;
1283		goto err_irq;
1284	}
1285
1286	if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1287		ret = -EBUSY;
1288		goto err_request_region;
1289	}
 
 
 
 
 
 
 
 
1290
1291	sdma->clk = clk_get(&pdev->dev, NULL);
1292	if (IS_ERR(sdma->clk)) {
1293		ret = PTR_ERR(sdma->clk);
1294		goto err_clk;
1295	}
1296
1297	sdma->regs = ioremap(iores->start, resource_size(iores));
1298	if (!sdma->regs) {
1299		ret = -ENOMEM;
1300		goto err_ioremap;
1301	}
1302
1303	ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
 
1304	if (ret)
1305		goto err_request_irq;
 
 
1306
1307	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1308	if (!sdma->script_addrs) {
1309		ret = -ENOMEM;
1310		goto err_alloc;
1311	}
1312
1313	if (of_id)
1314		pdev->id_entry = of_id->data;
1315	sdma->devtype = pdev->id_entry->driver_data;
 
1316
1317	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1318	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
 
1319
1320	INIT_LIST_HEAD(&sdma->dma_device.channels);
1321	/* Initialize channel parameters */
1322	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1323		struct sdma_channel *sdmac = &sdma->channel[i];
1324
1325		sdmac->sdma = sdma;
1326		spin_lock_init(&sdmac->lock);
1327
1328		sdmac->chan.device = &sdma->dma_device;
1329		sdmac->channel = i;
1330
 
 
 
1331		/*
1332		 * Add the channel to the DMAC list. Do not add channel 0 though
1333		 * because we need it internally in the SDMA driver. This also means
1334		 * that channel 0 in dmaengine counting matches sdma channel 1.
1335		 */
1336		if (i)
1337			list_add_tail(&sdmac->chan.device_node,
1338					&sdma->dma_device.channels);
1339	}
1340
1341	ret = sdma_init(sdma);
1342	if (ret)
1343		goto err_init;
1344
1345	if (pdata && pdata->script_addrs)
1346		sdma_add_scripts(sdma, pdata->script_addrs);
 
1347
1348	if (pdata) {
1349		sdma_get_firmware(sdma, pdata->fw_name);
1350	} else {
1351		/*
1352		 * Because that device tree does not encode ROM script address,
1353		 * the RAM script in firmware is mandatory for device tree
1354		 * probe, otherwise it fails.
1355		 */
1356		ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1357					      &fw_name);
1358		if (ret) {
1359			dev_err(&pdev->dev, "failed to get firmware name\n");
1360			goto err_init;
1361		}
1362
1363		ret = sdma_get_firmware(sdma, fw_name);
1364		if (ret) {
1365			dev_err(&pdev->dev, "failed to get firmware\n");
1366			goto err_init;
1367		}
1368	}
1369
1370	sdma->dma_device.dev = &pdev->dev;
1371
1372	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1373	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1374	sdma->dma_device.device_tx_status = sdma_tx_status;
1375	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1376	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1377	sdma->dma_device.device_control = sdma_control;
 
 
 
 
 
 
 
1378	sdma->dma_device.device_issue_pending = sdma_issue_pending;
1379	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1380	dma_set_max_seg_size(sdma->dma_device.dev, 65535);
 
 
1381
1382	ret = dma_async_device_register(&sdma->dma_device);
1383	if (ret) {
1384		dev_err(&pdev->dev, "unable to register\n");
1385		goto err_init;
1386	}
1387
1388	dev_info(sdma->dev, "initialized\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389
1390	return 0;
1391
 
 
1392err_init:
1393	kfree(sdma->script_addrs);
1394err_alloc:
1395	free_irq(irq, sdma);
1396err_request_irq:
1397	iounmap(sdma->regs);
1398err_ioremap:
1399	clk_put(sdma->clk);
1400err_clk:
1401	release_mem_region(iores->start, resource_size(iores));
1402err_request_region:
1403err_irq:
1404	kfree(sdma);
 
 
1405	return ret;
1406}
1407
1408static int __exit sdma_remove(struct platform_device *pdev)
1409{
1410	return -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1411}
1412
1413static struct platform_driver sdma_driver = {
1414	.driver		= {
1415		.name	= "imx-sdma",
1416		.of_match_table = sdma_dt_ids,
1417	},
1418	.id_table	= sdma_devtypes,
1419	.remove		= __exit_p(sdma_remove),
1420};
1421
1422static int __init sdma_module_init(void)
1423{
1424	return platform_driver_probe(&sdma_driver, sdma_probe);
1425}
1426module_init(sdma_module_init);
1427
1428MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1429MODULE_DESCRIPTION("i.MX SDMA driver");
 
 
 
 
 
 
1430MODULE_LICENSE("GPL");
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// drivers/dma/imx-sdma.c
   4//
   5// This file contains a driver for the Freescale Smart DMA engine
   6//
   7// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   8//
   9// Based on code from Freescale:
  10//
  11// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
 
 
 
 
 
 
 
  12
  13#include <linux/init.h>
  14#include <linux/iopoll.h>
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/bitfield.h>
  18#include <linux/bitops.h>
  19#include <linux/mm.h>
  20#include <linux/interrupt.h>
  21#include <linux/clk.h>
  22#include <linux/delay.h>
  23#include <linux/sched.h>
  24#include <linux/semaphore.h>
  25#include <linux/spinlock.h>
  26#include <linux/device.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/firmware.h>
  29#include <linux/slab.h>
  30#include <linux/platform_device.h>
  31#include <linux/dmaengine.h>
  32#include <linux/of.h>
  33#include <linux/of_address.h>
  34#include <linux/of_device.h>
  35#include <linux/of_dma.h>
  36#include <linux/workqueue.h>
  37
  38#include <asm/irq.h>
  39#include <linux/dma/imx-dma.h>
  40#include <linux/regmap.h>
  41#include <linux/mfd/syscon.h>
  42#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  43
  44#include "dmaengine.h"
  45#include "virt-dma.h"
  46
  47/* SDMA registers */
  48#define SDMA_H_C0PTR		0x000
  49#define SDMA_H_INTR		0x004
  50#define SDMA_H_STATSTOP		0x008
  51#define SDMA_H_START		0x00c
  52#define SDMA_H_EVTOVR		0x010
  53#define SDMA_H_DSPOVR		0x014
  54#define SDMA_H_HOSTOVR		0x018
  55#define SDMA_H_EVTPEND		0x01c
  56#define SDMA_H_DSPENBL		0x020
  57#define SDMA_H_RESET		0x024
  58#define SDMA_H_EVTERR		0x028
  59#define SDMA_H_INTRMSK		0x02c
  60#define SDMA_H_PSW		0x030
  61#define SDMA_H_EVTERRDBG	0x034
  62#define SDMA_H_CONFIG		0x038
  63#define SDMA_ONCE_ENB		0x040
  64#define SDMA_ONCE_DATA		0x044
  65#define SDMA_ONCE_INSTR		0x048
  66#define SDMA_ONCE_STAT		0x04c
  67#define SDMA_ONCE_CMD		0x050
  68#define SDMA_EVT_MIRROR		0x054
  69#define SDMA_ILLINSTADDR	0x058
  70#define SDMA_CHN0ADDR		0x05c
  71#define SDMA_ONCE_RTB		0x060
  72#define SDMA_XTRIG_CONF1	0x070
  73#define SDMA_XTRIG_CONF2	0x074
  74#define SDMA_CHNENBL0_IMX35	0x200
  75#define SDMA_CHNENBL0_IMX31	0x080
  76#define SDMA_CHNPRI_0		0x100
  77#define SDMA_DONE0_CONFIG	0x1000
  78
  79/*
  80 * Buffer descriptor status values.
  81 */
  82#define BD_DONE  0x01
  83#define BD_WRAP  0x02
  84#define BD_CONT  0x04
  85#define BD_INTR  0x08
  86#define BD_RROR  0x10
  87#define BD_LAST  0x20
  88#define BD_EXTD  0x80
  89
  90/*
  91 * Data Node descriptor status values.
  92 */
  93#define DND_END_OF_FRAME  0x80
  94#define DND_END_OF_XFER   0x40
  95#define DND_DONE          0x20
  96#define DND_UNUSED        0x01
  97
  98/*
  99 * IPCV2 descriptor status values.
 100 */
 101#define BD_IPCV2_END_OF_FRAME  0x40
 102
 103#define IPCV2_MAX_NODES        50
 104/*
 105 * Error bit set in the CCB status field by the SDMA,
 106 * in setbd routine, in case of a transfer error
 107 */
 108#define DATA_ERROR  0x10000000
 109
 110/*
 111 * Buffer descriptor commands.
 112 */
 113#define C0_ADDR             0x01
 114#define C0_LOAD             0x02
 115#define C0_DUMP             0x03
 116#define C0_SETCTX           0x07
 117#define C0_GETCTX           0x03
 118#define C0_SETDM            0x01
 119#define C0_SETPM            0x04
 120#define C0_GETDM            0x02
 121#define C0_GETPM            0x08
 122/*
 123 * Change endianness indicator in the BD command field
 124 */
 125#define CHANGE_ENDIANNESS   0x80
 126
 127/*
 128 *  p_2_p watermark_level description
 129 *	Bits		Name			Description
 130 *	0-7		Lower WML		Lower watermark level
 131 *	8		PS			1: Pad Swallowing
 132 *						0: No Pad Swallowing
 133 *	9		PA			1: Pad Adding
 134 *						0: No Pad Adding
 135 *	10		SPDIF			If this bit is set both source
 136 *						and destination are on SPBA
 137 *	11		Source Bit(SP)		1: Source on SPBA
 138 *						0: Source on AIPS
 139 *	12		Destination Bit(DP)	1: Destination on SPBA
 140 *						0: Destination on AIPS
 141 *	13-15		---------		MUST BE 0
 142 *	16-23		Higher WML		HWML
 143 *	24-27		N			Total number of samples after
 144 *						which Pad adding/Swallowing
 145 *						must be done. It must be odd.
 146 *	28		Lower WML Event(LWE)	SDMA events reg to check for
 147 *						LWML event mask
 148 *						0: LWE in EVENTS register
 149 *						1: LWE in EVENTS2 register
 150 *	29		Higher WML Event(HWE)	SDMA events reg to check for
 151 *						HWML event mask
 152 *						0: HWE in EVENTS register
 153 *						1: HWE in EVENTS2 register
 154 *	30		---------		MUST BE 0
 155 *	31		CONT			1: Amount of samples to be
 156 *						transferred is unknown and
 157 *						script will keep on
 158 *						transferring samples as long as
 159 *						both events are detected and
 160 *						script must be manually stopped
 161 *						by the application
 162 *						0: The amount of samples to be
 163 *						transferred is equal to the
 164 *						count field of mode word
 165 */
 166#define SDMA_WATERMARK_LEVEL_LWML	0xFF
 167#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
 168#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
 169#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
 170#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
 171#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
 172#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
 173#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
 174#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
 175#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
 176
 177#define SDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
 178				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 179				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 180
 181#define SDMA_DMA_DIRECTIONS	(BIT(DMA_DEV_TO_MEM) | \
 182				 BIT(DMA_MEM_TO_DEV) | \
 183				 BIT(DMA_DEV_TO_DEV))
 184
 185#define SDMA_WATERMARK_LEVEL_N_FIFOS	GENMASK(15, 12)
 186#define SDMA_WATERMARK_LEVEL_OFF_FIFOS  GENMASK(19, 16)
 187#define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO   GENMASK(31, 28)
 188#define SDMA_WATERMARK_LEVEL_SW_DONE	BIT(23)
 189
 190#define SDMA_DONE0_CONFIG_DONE_SEL	BIT(7)
 191#define SDMA_DONE0_CONFIG_DONE_DIS	BIT(6)
 192
 193/*
 194 * struct sdma_script_start_addrs - SDMA script start pointers
 195 *
 196 * start addresses of the different functions in the physical
 197 * address space of the SDMA engine.
 198 */
 199struct sdma_script_start_addrs {
 200	s32 ap_2_ap_addr;
 201	s32 ap_2_bp_addr;
 202	s32 ap_2_ap_fixed_addr;
 203	s32 bp_2_ap_addr;
 204	s32 loopback_on_dsp_side_addr;
 205	s32 mcu_interrupt_only_addr;
 206	s32 firi_2_per_addr;
 207	s32 firi_2_mcu_addr;
 208	s32 per_2_firi_addr;
 209	s32 mcu_2_firi_addr;
 210	s32 uart_2_per_addr;
 211	s32 uart_2_mcu_addr;
 212	s32 per_2_app_addr;
 213	s32 mcu_2_app_addr;
 214	s32 per_2_per_addr;
 215	s32 uartsh_2_per_addr;
 216	s32 uartsh_2_mcu_addr;
 217	s32 per_2_shp_addr;
 218	s32 mcu_2_shp_addr;
 219	s32 ata_2_mcu_addr;
 220	s32 mcu_2_ata_addr;
 221	s32 app_2_per_addr;
 222	s32 app_2_mcu_addr;
 223	s32 shp_2_per_addr;
 224	s32 shp_2_mcu_addr;
 225	s32 mshc_2_mcu_addr;
 226	s32 mcu_2_mshc_addr;
 227	s32 spdif_2_mcu_addr;
 228	s32 mcu_2_spdif_addr;
 229	s32 asrc_2_mcu_addr;
 230	s32 ext_mem_2_ipu_addr;
 231	s32 descrambler_addr;
 232	s32 dptc_dvfs_addr;
 233	s32 utra_addr;
 234	s32 ram_code_start_addr;
 235	/* End of v1 array */
 236	s32 mcu_2_ssish_addr;
 237	s32 ssish_2_mcu_addr;
 238	s32 hdmi_dma_addr;
 239	/* End of v2 array */
 240	s32 zcanfd_2_mcu_addr;
 241	s32 zqspi_2_mcu_addr;
 242	s32 mcu_2_ecspi_addr;
 243	s32 mcu_2_sai_addr;
 244	s32 sai_2_mcu_addr;
 245	s32 uart_2_mcu_rom_addr;
 246	s32 uartsh_2_mcu_rom_addr;
 247	/* End of v3 array */
 248	s32 mcu_2_zqspi_addr;
 249	/* End of v4 array */
 250};
 251
 252/*
 253 * Mode/Count of data node descriptors - IPCv2
 254 */
 255struct sdma_mode_count {
 256#define SDMA_BD_MAX_CNT	0xffff
 257	u32 count   : 16; /* size of the buffer pointed by this BD */
 258	u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
 259	u32 command :  8; /* command mostly used for channel 0 */
 260};
 261
 262/*
 263 * Buffer descriptor
 264 */
 265struct sdma_buffer_descriptor {
 266	struct sdma_mode_count  mode;
 267	u32 buffer_addr;	/* address of the buffer described */
 268	u32 ext_buffer_addr;	/* extended buffer address */
 269} __attribute__ ((packed));
 270
 271/**
 272 * struct sdma_channel_control - Channel control Block
 273 *
 274 * @current_bd_ptr:	current buffer descriptor processed
 275 * @base_bd_ptr:	first element of buffer descriptor array
 276 * @unused:		padding. The SDMA engine expects an array of 128 byte
 277 *			control blocks
 278 */
 279struct sdma_channel_control {
 280	u32 current_bd_ptr;
 281	u32 base_bd_ptr;
 282	u32 unused[2];
 283} __attribute__ ((packed));
 284
 285/**
 286 * struct sdma_state_registers - SDMA context for a channel
 287 *
 288 * @pc:		program counter
 289 * @unused1:	unused
 290 * @t:		test bit: status of arithmetic & test instruction
 291 * @rpc:	return program counter
 292 * @unused0:	unused
 293 * @sf:		source fault while loading data
 294 * @spc:	loop start program counter
 295 * @unused2:	unused
 296 * @df:		destination fault while storing data
 297 * @epc:	loop end program counter
 298 * @lm:		loop mode
 299 */
 300struct sdma_state_registers {
 301	u32 pc     :14;
 302	u32 unused1: 1;
 303	u32 t      : 1;
 304	u32 rpc    :14;
 305	u32 unused0: 1;
 306	u32 sf     : 1;
 307	u32 spc    :14;
 308	u32 unused2: 1;
 309	u32 df     : 1;
 310	u32 epc    :14;
 311	u32 lm     : 2;
 312} __attribute__ ((packed));
 313
 314/**
 315 * struct sdma_context_data - sdma context specific to a channel
 316 *
 317 * @channel_state:	channel state bits
 318 * @gReg:		general registers
 319 * @mda:		burst dma destination address register
 320 * @msa:		burst dma source address register
 321 * @ms:			burst dma status register
 322 * @md:			burst dma data register
 323 * @pda:		peripheral dma destination address register
 324 * @psa:		peripheral dma source address register
 325 * @ps:			peripheral dma status register
 326 * @pd:			peripheral dma data register
 327 * @ca:			CRC polynomial register
 328 * @cs:			CRC accumulator register
 329 * @dda:		dedicated core destination address register
 330 * @dsa:		dedicated core source address register
 331 * @ds:			dedicated core status register
 332 * @dd:			dedicated core data register
 333 * @scratch0:		1st word of dedicated ram for context switch
 334 * @scratch1:		2nd word of dedicated ram for context switch
 335 * @scratch2:		3rd word of dedicated ram for context switch
 336 * @scratch3:		4th word of dedicated ram for context switch
 337 * @scratch4:		5th word of dedicated ram for context switch
 338 * @scratch5:		6th word of dedicated ram for context switch
 339 * @scratch6:		7th word of dedicated ram for context switch
 340 * @scratch7:		8th word of dedicated ram for context switch
 341 */
 342struct sdma_context_data {
 343	struct sdma_state_registers  channel_state;
 344	u32  gReg[8];
 345	u32  mda;
 346	u32  msa;
 347	u32  ms;
 348	u32  md;
 349	u32  pda;
 350	u32  psa;
 351	u32  ps;
 352	u32  pd;
 353	u32  ca;
 354	u32  cs;
 355	u32  dda;
 356	u32  dsa;
 357	u32  ds;
 358	u32  dd;
 359	u32  scratch0;
 360	u32  scratch1;
 361	u32  scratch2;
 362	u32  scratch3;
 363	u32  scratch4;
 364	u32  scratch5;
 365	u32  scratch6;
 366	u32  scratch7;
 367} __attribute__ ((packed));
 368
 
 369
 370struct sdma_engine;
 371
 372/**
 373 * struct sdma_desc - descriptor structor for one transfer
 374 * @vd:			descriptor for virt dma
 375 * @num_bd:		number of descriptors currently handling
 376 * @bd_phys:		physical address of bd
 377 * @buf_tail:		ID of the buffer that was processed
 378 * @buf_ptail:		ID of the previous buffer that was processed
 379 * @period_len:		period length, used in cyclic.
 380 * @chn_real_count:	the real count updated from bd->mode.count
 381 * @chn_count:		the transfer count set
 382 * @sdmac:		sdma_channel pointer
 383 * @bd:			pointer of allocate bd
 384 */
 385struct sdma_desc {
 386	struct virt_dma_desc	vd;
 387	unsigned int		num_bd;
 388	dma_addr_t		bd_phys;
 389	unsigned int		buf_tail;
 390	unsigned int		buf_ptail;
 391	unsigned int		period_len;
 392	unsigned int		chn_real_count;
 393	unsigned int		chn_count;
 394	struct sdma_channel	*sdmac;
 395	struct sdma_buffer_descriptor *bd;
 396};
 397
 398/**
 399 * struct sdma_channel - housekeeping for a SDMA channel
 400 *
 401 * @vc:			virt_dma base structure
 402 * @desc:		sdma description including vd and other special member
 403 * @sdma:		pointer to the SDMA engine for this channel
 404 * @channel:		the channel number, matches dmaengine chan_id + 1
 405 * @direction:		transfer type. Needed for setting SDMA script
 406 * @slave_config:	Slave configuration
 407 * @peripheral_type:	Peripheral type. Needed for setting SDMA script
 408 * @event_id0:		aka dma request line
 409 * @event_id1:		for channels that use 2 events
 410 * @word_size:		peripheral access size
 411 * @pc_from_device:	script address for those device_2_memory
 412 * @pc_to_device:	script address for those memory_2_device
 413 * @device_to_device:	script address for those device_2_device
 414 * @pc_to_pc:		script address for those memory_2_memory
 415 * @flags:		loop mode or not
 416 * @per_address:	peripheral source or destination address in common case
 417 *                      destination address in p_2_p case
 418 * @per_address2:	peripheral source address in p_2_p case
 419 * @event_mask:		event mask used in p_2_p script
 420 * @watermark_level:	value for gReg[7], some script will extend it from
 421 *			basic watermark such as p_2_p
 422 * @shp_addr:		value for gReg[6]
 423 * @per_addr:		value for gReg[2]
 424 * @status:		status of dma channel
 425 * @context_loaded:	ensure context is only loaded once
 426 * @data:		specific sdma interface structure
 427 * @bd_pool:		dma_pool for bd
 428 * @terminate_worker:	used to call back into terminate work function
 429 * @terminated:		terminated list
 430 * @is_ram_script:	flag for script in ram
 431 * @n_fifos_src:	number of source device fifos
 432 * @n_fifos_dst:	number of destination device fifos
 433 * @sw_done:		software done flag
 434 * @stride_fifos_src:	stride for source device FIFOs
 435 * @stride_fifos_dst:	stride for destination device FIFOs
 436 * @words_per_fifo:	copy number of words one time for one FIFO
 437 */
 438struct sdma_channel {
 439	struct virt_dma_chan		vc;
 440	struct sdma_desc		*desc;
 441	struct sdma_engine		*sdma;
 442	unsigned int			channel;
 443	enum dma_transfer_direction		direction;
 444	struct dma_slave_config		slave_config;
 445	enum sdma_peripheral_type	peripheral_type;
 446	unsigned int			event_id0;
 447	unsigned int			event_id1;
 448	enum dma_slave_buswidth		word_size;
 
 
 
 
 
 449	unsigned int			pc_from_device, pc_to_device;
 450	unsigned int			device_to_device;
 451	unsigned int                    pc_to_pc;
 452	unsigned long			flags;
 453	dma_addr_t			per_address, per_address2;
 454	unsigned long			event_mask[2];
 455	unsigned long			watermark_level;
 456	u32				shp_addr, per_addr;
 
 
 
 
 457	enum dma_status			status;
 458	struct imx_dma_data		data;
 459	struct work_struct		terminate_worker;
 460	struct list_head                terminated;
 461	bool				is_ram_script;
 462	unsigned int			n_fifos_src;
 463	unsigned int			n_fifos_dst;
 464	unsigned int			stride_fifos_src;
 465	unsigned int			stride_fifos_dst;
 466	unsigned int			words_per_fifo;
 467	bool				sw_done;
 468};
 469
 470#define IMX_DMA_SG_LOOP		BIT(0)
 471
 472#define MAX_DMA_CHANNELS 32
 473#define MXC_SDMA_DEFAULT_PRIORITY 1
 474#define MXC_SDMA_MIN_PRIORITY 1
 475#define MXC_SDMA_MAX_PRIORITY 7
 476
 477#define SDMA_FIRMWARE_MAGIC 0x414d4453
 478
 479/**
 480 * struct sdma_firmware_header - Layout of the firmware image
 481 *
 482 * @magic:		"SDMA"
 483 * @version_major:	increased whenever layout of struct
 484 *			sdma_script_start_addrs changes.
 485 * @version_minor:	firmware minor version (for binary compatible changes)
 486 * @script_addrs_start:	offset of struct sdma_script_start_addrs in this image
 487 * @num_script_addrs:	Number of script addresses in this image
 488 * @ram_code_start:	offset of SDMA ram image in this firmware image
 489 * @ram_code_size:	size of SDMA ram image
 490 * @script_addrs:	Stores the start address of the SDMA scripts
 491 *			(in SDMA memory space)
 492 */
 493struct sdma_firmware_header {
 494	u32	magic;
 495	u32	version_major;
 496	u32	version_minor;
 497	u32	script_addrs_start;
 498	u32	num_script_addrs;
 499	u32	ram_code_start;
 500	u32	ram_code_size;
 501};
 502
 503struct sdma_driver_data {
 504	int chnenbl0;
 505	int num_events;
 506	struct sdma_script_start_addrs	*script_addrs;
 507	bool check_ratio;
 508	/*
 509	 * ecspi ERR009165 fixed should be done in sdma script
 510	 * and it has been fixed in soc from i.mx6ul.
 511	 * please get more information from the below link:
 512	 * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
 513	 */
 514	bool ecspi_fixed;
 515};
 516
 517struct sdma_engine {
 518	struct device			*dev;
 
 519	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 520	struct sdma_channel_control	*channel_control;
 521	void __iomem			*regs;
 
 
 522	struct sdma_context_data	*context;
 523	dma_addr_t			context_phys;
 524	struct dma_device		dma_device;
 525	struct clk			*clk_ipg;
 526	struct clk			*clk_ahb;
 527	spinlock_t			channel_0_lock;
 528	u32				script_number;
 529	struct sdma_script_start_addrs	*script_addrs;
 530	const struct sdma_driver_data	*drvdata;
 531	u32				spba_start_addr;
 532	u32				spba_end_addr;
 533	unsigned int			irq;
 534	dma_addr_t			bd0_phys;
 535	struct sdma_buffer_descriptor	*bd0;
 536	/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
 537	bool				clk_ratio;
 538	bool                            fw_loaded;
 539};
 540
 541static int sdma_config_write(struct dma_chan *chan,
 542		       struct dma_slave_config *dmaengine_cfg,
 543		       enum dma_transfer_direction direction);
 544
 545static struct sdma_driver_data sdma_imx31 = {
 546	.chnenbl0 = SDMA_CHNENBL0_IMX31,
 547	.num_events = 32,
 548};
 549
 550static struct sdma_script_start_addrs sdma_script_imx25 = {
 551	.ap_2_ap_addr = 729,
 552	.uart_2_mcu_addr = 904,
 553	.per_2_app_addr = 1255,
 554	.mcu_2_app_addr = 834,
 555	.uartsh_2_mcu_addr = 1120,
 556	.per_2_shp_addr = 1329,
 557	.mcu_2_shp_addr = 1048,
 558	.ata_2_mcu_addr = 1560,
 559	.mcu_2_ata_addr = 1479,
 560	.app_2_per_addr = 1189,
 561	.app_2_mcu_addr = 770,
 562	.shp_2_per_addr = 1407,
 563	.shp_2_mcu_addr = 979,
 564};
 565
 566static struct sdma_driver_data sdma_imx25 = {
 567	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 568	.num_events = 48,
 569	.script_addrs = &sdma_script_imx25,
 570};
 571
 572static struct sdma_driver_data sdma_imx35 = {
 573	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 574	.num_events = 48,
 575};
 576
 577static struct sdma_script_start_addrs sdma_script_imx51 = {
 578	.ap_2_ap_addr = 642,
 579	.uart_2_mcu_addr = 817,
 580	.mcu_2_app_addr = 747,
 581	.mcu_2_shp_addr = 961,
 582	.ata_2_mcu_addr = 1473,
 583	.mcu_2_ata_addr = 1392,
 584	.app_2_per_addr = 1033,
 585	.app_2_mcu_addr = 683,
 586	.shp_2_per_addr = 1251,
 587	.shp_2_mcu_addr = 892,
 588};
 589
 590static struct sdma_driver_data sdma_imx51 = {
 591	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 592	.num_events = 48,
 593	.script_addrs = &sdma_script_imx51,
 594};
 595
 596static struct sdma_script_start_addrs sdma_script_imx53 = {
 597	.ap_2_ap_addr = 642,
 598	.app_2_mcu_addr = 683,
 599	.mcu_2_app_addr = 747,
 600	.uart_2_mcu_addr = 817,
 601	.shp_2_mcu_addr = 891,
 602	.mcu_2_shp_addr = 960,
 603	.uartsh_2_mcu_addr = 1032,
 604	.spdif_2_mcu_addr = 1100,
 605	.mcu_2_spdif_addr = 1134,
 606	.firi_2_mcu_addr = 1193,
 607	.mcu_2_firi_addr = 1290,
 608};
 609
 610static struct sdma_driver_data sdma_imx53 = {
 611	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 612	.num_events = 48,
 613	.script_addrs = &sdma_script_imx53,
 614};
 615
 616static struct sdma_script_start_addrs sdma_script_imx6q = {
 617	.ap_2_ap_addr = 642,
 618	.uart_2_mcu_addr = 817,
 619	.mcu_2_app_addr = 747,
 620	.per_2_per_addr = 6331,
 621	.uartsh_2_mcu_addr = 1032,
 622	.mcu_2_shp_addr = 960,
 623	.app_2_mcu_addr = 683,
 624	.shp_2_mcu_addr = 891,
 625	.spdif_2_mcu_addr = 1100,
 626	.mcu_2_spdif_addr = 1134,
 627};
 628
 629static struct sdma_driver_data sdma_imx6q = {
 630	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 631	.num_events = 48,
 632	.script_addrs = &sdma_script_imx6q,
 633};
 634
 635static struct sdma_driver_data sdma_imx6ul = {
 636	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 637	.num_events = 48,
 638	.script_addrs = &sdma_script_imx6q,
 639	.ecspi_fixed = true,
 640};
 641
 642static struct sdma_script_start_addrs sdma_script_imx7d = {
 643	.ap_2_ap_addr = 644,
 644	.uart_2_mcu_addr = 819,
 645	.mcu_2_app_addr = 749,
 646	.uartsh_2_mcu_addr = 1034,
 647	.mcu_2_shp_addr = 962,
 648	.app_2_mcu_addr = 685,
 649	.shp_2_mcu_addr = 893,
 650	.spdif_2_mcu_addr = 1102,
 651	.mcu_2_spdif_addr = 1136,
 652};
 653
 654static struct sdma_driver_data sdma_imx7d = {
 655	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 656	.num_events = 48,
 657	.script_addrs = &sdma_script_imx7d,
 658};
 659
 660static struct sdma_driver_data sdma_imx8mq = {
 661	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 662	.num_events = 48,
 663	.script_addrs = &sdma_script_imx7d,
 664	.check_ratio = 1,
 665};
 
 666
 667static const struct of_device_id sdma_dt_ids[] = {
 668	{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
 669	{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
 670	{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
 671	{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
 672	{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
 673	{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
 674	{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
 675	{ .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, },
 676	{ .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
 677	{ /* sentinel */ }
 678};
 679MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 680
 681#define SDMA_H_CONFIG_DSPDMA	BIT(12) /* indicates if the DSPDMA is used */
 682#define SDMA_H_CONFIG_RTD_PINS	BIT(11) /* indicates if Real-Time Debug pins are enabled */
 683#define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
 684#define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
 685
 686static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 687{
 688	u32 chnenbl0 = sdma->drvdata->chnenbl0;
 
 689	return chnenbl0 + event * 4;
 690}
 691
 692static int sdma_config_ownership(struct sdma_channel *sdmac,
 693		bool event_override, bool mcu_override, bool dsp_override)
 694{
 695	struct sdma_engine *sdma = sdmac->sdma;
 696	int channel = sdmac->channel;
 697	unsigned long evt, mcu, dsp;
 698
 699	if (event_override && mcu_override && dsp_override)
 700		return -EINVAL;
 701
 702	evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
 703	mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
 704	dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
 705
 706	if (dsp_override)
 707		__clear_bit(channel, &dsp);
 708	else
 709		__set_bit(channel, &dsp);
 710
 711	if (event_override)
 712		__clear_bit(channel, &evt);
 713	else
 714		__set_bit(channel, &evt);
 715
 716	if (mcu_override)
 717		__clear_bit(channel, &mcu);
 718	else
 719		__set_bit(channel, &mcu);
 720
 721	writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
 722	writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
 723	writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
 724
 725	return 0;
 726}
 727
 728static int is_sdma_channel_enabled(struct sdma_engine *sdma, int channel)
 729{
 730	return !!(readl(sdma->regs + SDMA_H_STATSTOP) & BIT(channel));
 731}
 732
 733static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 734{
 735	writel(BIT(channel), sdma->regs + SDMA_H_START);
 736}
 737
 738/*
 739 * sdma_run_channel0 - run a channel and wait till it's done
 740 */
 741static int sdma_run_channel0(struct sdma_engine *sdma)
 742{
 
 
 743	int ret;
 744	u32 reg;
 745
 746	sdma_enable_channel(sdma, 0);
 747
 748	ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
 749						reg, !(reg & 1), 1, 500);
 750	if (ret)
 751		dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
 752
 753	/* Set bits of CONFIG register with dynamic context switching */
 754	reg = readl(sdma->regs + SDMA_H_CONFIG);
 755	if ((reg & SDMA_H_CONFIG_CSM) == 0) {
 756		reg |= SDMA_H_CONFIG_CSM;
 757		writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
 758	}
 759
 760	return ret;
 761}
 762
 763static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 764		u32 address)
 765{
 766	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
 767	void *buf_virt;
 768	dma_addr_t buf_phys;
 769	int ret;
 770	unsigned long flags;
 771
 772	buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
 
 
 773	if (!buf_virt)
 774		return -ENOMEM;
 775
 776	spin_lock_irqsave(&sdma->channel_0_lock, flags);
 777
 778	bd0->mode.command = C0_SETPM;
 779	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
 780	bd0->mode.count = size / 2;
 781	bd0->buffer_addr = buf_phys;
 782	bd0->ext_buffer_addr = address;
 783
 784	memcpy(buf_virt, buf, size);
 785
 786	ret = sdma_run_channel0(sdma);
 787
 788	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 789
 790	dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
 791
 792	return ret;
 793}
 794
 795static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 796{
 797	struct sdma_engine *sdma = sdmac->sdma;
 798	int channel = sdmac->channel;
 799	unsigned long val;
 800	u32 chnenbl = chnenbl_ofs(sdma, event);
 801
 802	val = readl_relaxed(sdma->regs + chnenbl);
 803	__set_bit(channel, &val);
 804	writel_relaxed(val, sdma->regs + chnenbl);
 805
 806	/* Set SDMA_DONEx_CONFIG is sw_done enabled */
 807	if (sdmac->sw_done) {
 808		val = readl_relaxed(sdma->regs + SDMA_DONE0_CONFIG);
 809		val |= SDMA_DONE0_CONFIG_DONE_SEL;
 810		val &= ~SDMA_DONE0_CONFIG_DONE_DIS;
 811		writel_relaxed(val, sdma->regs + SDMA_DONE0_CONFIG);
 812	}
 813}
 814
 815static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 816{
 817	struct sdma_engine *sdma = sdmac->sdma;
 818	int channel = sdmac->channel;
 819	u32 chnenbl = chnenbl_ofs(sdma, event);
 820	unsigned long val;
 821
 822	val = readl_relaxed(sdma->regs + chnenbl);
 823	__clear_bit(channel, &val);
 824	writel_relaxed(val, sdma->regs + chnenbl);
 825}
 826
 827static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
 828{
 829	return container_of(t, struct sdma_desc, vd.tx);
 830}
 831
 832static void sdma_start_desc(struct sdma_channel *sdmac)
 833{
 834	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
 835	struct sdma_desc *desc;
 836	struct sdma_engine *sdma = sdmac->sdma;
 837	int channel = sdmac->channel;
 838
 839	if (!vd) {
 840		sdmac->desc = NULL;
 841		return;
 842	}
 843	sdmac->desc = desc = to_sdma_desc(&vd->tx);
 844
 845	list_del(&vd->node);
 846
 847	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
 848	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
 849	sdma_enable_channel(sdma, sdmac->channel);
 850}
 851
 852static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 853{
 854	struct sdma_buffer_descriptor *bd;
 855	int error = 0;
 856	enum dma_status	old_status = sdmac->status;
 857
 858	/*
 859	 * loop mode. Iterate over descriptors, re-setup them and
 860	 * call callback function.
 861	 */
 862	while (sdmac->desc) {
 863		struct sdma_desc *desc = sdmac->desc;
 864
 865		bd = &desc->bd[desc->buf_tail];
 866
 867		if (bd->mode.status & BD_DONE)
 868			break;
 869
 870		if (bd->mode.status & BD_RROR) {
 871			bd->mode.status &= ~BD_RROR;
 872			sdmac->status = DMA_ERROR;
 873			error = -EIO;
 874		}
 875
 876	       /*
 877		* We use bd->mode.count to calculate the residue, since contains
 878		* the number of bytes present in the current buffer descriptor.
 879		*/
 880
 881		desc->chn_real_count = bd->mode.count;
 882		bd->mode.count = desc->period_len;
 883		desc->buf_ptail = desc->buf_tail;
 884		desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
 885
 886		/*
 887		 * The callback is called from the interrupt context in order
 888		 * to reduce latency and to avoid the risk of altering the
 889		 * SDMA transaction status by the time the client tasklet is
 890		 * executed.
 891		 */
 892		spin_unlock(&sdmac->vc.lock);
 893		dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
 894		spin_lock(&sdmac->vc.lock);
 895
 896		/* Assign buffer ownership to SDMA */
 897		bd->mode.status |= BD_DONE;
 
 
 898
 899		if (error)
 900			sdmac->status = old_status;
 901	}
 902
 903	/*
 904	 * SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
 905	 * owned buffer is available (i.e. BD_DONE was set too late).
 906	 */
 907	if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
 908		dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
 909		sdma_enable_channel(sdmac->sdma, sdmac->channel);
 910	}
 911}
 912
 913static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
 914{
 915	struct sdma_channel *sdmac = (struct sdma_channel *) data;
 916	struct sdma_buffer_descriptor *bd;
 917	int i, error = 0;
 918
 919	sdmac->desc->chn_real_count = 0;
 920	/*
 921	 * non loop mode. Iterate over all descriptors, collect
 922	 * errors and call callback function
 923	 */
 924	for (i = 0; i < sdmac->desc->num_bd; i++) {
 925		bd = &sdmac->desc->bd[i];
 926
 927		if (bd->mode.status & (BD_DONE | BD_RROR))
 928			error = -EIO;
 929		sdmac->desc->chn_real_count += bd->mode.count;
 930	}
 931
 932	if (error)
 933		sdmac->status = DMA_ERROR;
 934	else
 935		sdmac->status = DMA_COMPLETE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 936}
 937
 938static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 939{
 940	struct sdma_engine *sdma = dev_id;
 941	unsigned long stat;
 942
 943	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
 944	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 945	/* channel 0 is special and not handled here, see run_channel0() */
 946	stat &= ~1;
 947
 948	while (stat) {
 949		int channel = fls(stat) - 1;
 950		struct sdma_channel *sdmac = &sdma->channel[channel];
 951		struct sdma_desc *desc;
 952
 953		spin_lock(&sdmac->vc.lock);
 954		desc = sdmac->desc;
 955		if (desc) {
 956			if (sdmac->flags & IMX_DMA_SG_LOOP) {
 957				sdma_update_channel_loop(sdmac);
 958			} else {
 959				mxc_sdma_handle_channel_normal(sdmac);
 960				vchan_cookie_complete(&desc->vd);
 961				sdma_start_desc(sdmac);
 962			}
 963		}
 964
 965		spin_unlock(&sdmac->vc.lock);
 966		__clear_bit(channel, &stat);
 967	}
 968
 969	return IRQ_HANDLED;
 970}
 971
 972/*
 973 * sets the pc of SDMA script according to the peripheral type
 974 */
 975static int sdma_get_pc(struct sdma_channel *sdmac,
 976		enum sdma_peripheral_type peripheral_type)
 977{
 978	struct sdma_engine *sdma = sdmac->sdma;
 979	int per_2_emi = 0, emi_2_per = 0;
 980	/*
 981	 * These are needed once we start to support transfers between
 982	 * two peripherals or memory-to-memory transfers
 983	 */
 984	int per_2_per = 0, emi_2_emi = 0;
 985
 986	sdmac->pc_from_device = 0;
 987	sdmac->pc_to_device = 0;
 988	sdmac->device_to_device = 0;
 989	sdmac->pc_to_pc = 0;
 990	sdmac->is_ram_script = false;
 991
 992	switch (peripheral_type) {
 993	case IMX_DMATYPE_MEMORY:
 994		emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
 995		break;
 996	case IMX_DMATYPE_DSP:
 997		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
 998		per_2_emi = sdma->script_addrs->ap_2_bp_addr;
 999		break;
1000	case IMX_DMATYPE_FIRI:
1001		per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
1002		emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
1003		break;
1004	case IMX_DMATYPE_UART:
1005		per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
1006		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1007		break;
1008	case IMX_DMATYPE_UART_SP:
1009		per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
1010		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1011		break;
1012	case IMX_DMATYPE_ATA:
1013		per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
1014		emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
1015		break;
1016	case IMX_DMATYPE_CSPI:
1017		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
1018
1019		/* Use rom script mcu_2_app if ERR009165 fixed */
1020		if (sdmac->sdma->drvdata->ecspi_fixed) {
1021			emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1022		} else {
1023			emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr;
1024			sdmac->is_ram_script = true;
1025		}
1026
1027		break;
1028	case IMX_DMATYPE_EXT:
1029	case IMX_DMATYPE_SSI:
1030	case IMX_DMATYPE_SAI:
1031		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
1032		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1033		break;
1034	case IMX_DMATYPE_SSI_DUAL:
1035		per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
1036		emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
1037		sdmac->is_ram_script = true;
1038		break;
1039	case IMX_DMATYPE_SSI_SP:
1040	case IMX_DMATYPE_MMC:
1041	case IMX_DMATYPE_SDHC:
1042	case IMX_DMATYPE_CSPI_SP:
1043	case IMX_DMATYPE_ESAI:
1044	case IMX_DMATYPE_MSHC_SP:
1045		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
1046		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1047		break;
1048	case IMX_DMATYPE_ASRC:
1049		per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
1050		emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
1051		per_2_per = sdma->script_addrs->per_2_per_addr;
1052		sdmac->is_ram_script = true;
1053		break;
1054	case IMX_DMATYPE_ASRC_SP:
1055		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
1056		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1057		per_2_per = sdma->script_addrs->per_2_per_addr;
1058		break;
1059	case IMX_DMATYPE_MSHC:
1060		per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
1061		emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
1062		break;
1063	case IMX_DMATYPE_CCM:
1064		per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
1065		break;
1066	case IMX_DMATYPE_SPDIF:
1067		per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
1068		emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
1069		break;
1070	case IMX_DMATYPE_IPU_MEMORY:
1071		emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
1072		break;
1073	case IMX_DMATYPE_MULTI_SAI:
1074		per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
1075		emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
1076		break;
1077	default:
1078		dev_err(sdma->dev, "Unsupported transfer type %d\n",
1079			peripheral_type);
1080		return -EINVAL;
1081	}
1082
1083	sdmac->pc_from_device = per_2_emi;
1084	sdmac->pc_to_device = emi_2_per;
1085	sdmac->device_to_device = per_2_per;
1086	sdmac->pc_to_pc = emi_2_emi;
1087
1088	return 0;
1089}
1090
1091static int sdma_load_context(struct sdma_channel *sdmac)
1092{
1093	struct sdma_engine *sdma = sdmac->sdma;
1094	int channel = sdmac->channel;
1095	int load_address;
1096	struct sdma_context_data *context = sdma->context;
1097	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
1098	int ret;
1099	unsigned long flags;
1100
1101	if (sdmac->direction == DMA_DEV_TO_MEM)
1102		load_address = sdmac->pc_from_device;
1103	else if (sdmac->direction == DMA_DEV_TO_DEV)
1104		load_address = sdmac->device_to_device;
1105	else if (sdmac->direction == DMA_MEM_TO_MEM)
1106		load_address = sdmac->pc_to_pc;
1107	else
1108		load_address = sdmac->pc_to_device;
 
1109
1110	if (load_address < 0)
1111		return load_address;
1112
1113	dev_dbg(sdma->dev, "load_address = %d\n", load_address);
1114	dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1115	dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
1116	dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
1117	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
1118	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1119
1120	spin_lock_irqsave(&sdma->channel_0_lock, flags);
1121
1122	memset(context, 0, sizeof(*context));
1123	context->channel_state.pc = load_address;
1124
1125	/* Send by context the event mask,base address for peripheral
1126	 * and watermark level
1127	 */
1128	context->gReg[0] = sdmac->event_mask[1];
1129	context->gReg[1] = sdmac->event_mask[0];
1130	context->gReg[2] = sdmac->per_addr;
1131	context->gReg[6] = sdmac->shp_addr;
1132	context->gReg[7] = sdmac->watermark_level;
1133
1134	bd0->mode.command = C0_SETDM;
1135	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
1136	bd0->mode.count = sizeof(*context) / 4;
1137	bd0->buffer_addr = sdma->context_phys;
1138	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
1139	ret = sdma_run_channel0(sdma);
1140
1141	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1142
1143	return ret;
1144}
1145
1146static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
1147{
1148	return container_of(chan, struct sdma_channel, vc.chan);
1149}
1150
1151static int sdma_disable_channel(struct dma_chan *chan)
1152{
1153	struct sdma_channel *sdmac = to_sdma_chan(chan);
1154	struct sdma_engine *sdma = sdmac->sdma;
1155	int channel = sdmac->channel;
1156
1157	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
1158	sdmac->status = DMA_ERROR;
1159
1160	return 0;
1161}
1162static void sdma_channel_terminate_work(struct work_struct *work)
1163{
1164	struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1165						  terminate_worker);
1166	/*
1167	 * According to NXP R&D team a delay of one BD SDMA cost time
1168	 * (maximum is 1ms) should be added after disable of the channel
1169	 * bit, to ensure SDMA core has really been stopped after SDMA
1170	 * clients call .device_terminate_all.
1171	 */
1172	usleep_range(1000, 2000);
1173
1174	vchan_dma_desc_free_list(&sdmac->vc, &sdmac->terminated);
1175}
1176
1177static int sdma_terminate_all(struct dma_chan *chan)
1178{
1179	struct sdma_channel *sdmac = to_sdma_chan(chan);
1180	unsigned long flags;
1181
1182	spin_lock_irqsave(&sdmac->vc.lock, flags);
1183
1184	sdma_disable_channel(chan);
1185
1186	if (sdmac->desc) {
1187		vchan_terminate_vdesc(&sdmac->desc->vd);
1188		/*
1189		 * move out current descriptor into terminated list so that
1190		 * it could be free in sdma_channel_terminate_work alone
1191		 * later without potential involving next descriptor raised
1192		 * up before the last descriptor terminated.
1193		 */
1194		vchan_get_all_descriptors(&sdmac->vc, &sdmac->terminated);
1195		sdmac->desc = NULL;
1196		schedule_work(&sdmac->terminate_worker);
1197	}
1198
1199	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1200
1201	return 0;
1202}
1203
1204static void sdma_channel_synchronize(struct dma_chan *chan)
1205{
1206	struct sdma_channel *sdmac = to_sdma_chan(chan);
1207
1208	vchan_synchronize(&sdmac->vc);
1209
1210	flush_work(&sdmac->terminate_worker);
1211}
1212
1213static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1214{
1215	struct sdma_engine *sdma = sdmac->sdma;
1216
1217	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1218	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1219
1220	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1221	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1222
1223	if (sdmac->event_id0 > 31)
1224		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1225
1226	if (sdmac->event_id1 > 31)
1227		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1228
1229	/*
1230	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
1231	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
1232	 * r0(event_mask[1]) and r1(event_mask[0]).
1233	 */
1234	if (lwml > hwml) {
1235		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1236						SDMA_WATERMARK_LEVEL_HWML);
1237		sdmac->watermark_level |= hwml;
1238		sdmac->watermark_level |= lwml << 16;
1239		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1240	}
1241
1242	if (sdmac->per_address2 >= sdma->spba_start_addr &&
1243			sdmac->per_address2 <= sdma->spba_end_addr)
1244		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1245
1246	if (sdmac->per_address >= sdma->spba_start_addr &&
1247			sdmac->per_address <= sdma->spba_end_addr)
1248		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1249
1250	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1251}
1252
1253static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
1254{
1255	unsigned int n_fifos;
1256	unsigned int stride_fifos;
1257	unsigned int words_per_fifo;
1258
1259	if (sdmac->sw_done)
1260		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
1261
1262	if (sdmac->direction == DMA_DEV_TO_MEM) {
1263		n_fifos = sdmac->n_fifos_src;
1264		stride_fifos = sdmac->stride_fifos_src;
1265	} else {
1266		n_fifos = sdmac->n_fifos_dst;
1267		stride_fifos = sdmac->stride_fifos_dst;
1268	}
1269
1270	words_per_fifo = sdmac->words_per_fifo;
1271
1272	sdmac->watermark_level |=
1273			FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
1274	sdmac->watermark_level |=
1275			FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
1276	if (words_per_fifo)
1277		sdmac->watermark_level |=
1278			FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
1279}
1280
1281static int sdma_config_channel(struct dma_chan *chan)
1282{
1283	struct sdma_channel *sdmac = to_sdma_chan(chan);
1284	int ret;
1285
1286	sdma_disable_channel(chan);
1287
1288	sdmac->event_mask[0] = 0;
1289	sdmac->event_mask[1] = 0;
1290	sdmac->shp_addr = 0;
1291	sdmac->per_addr = 0;
1292
 
 
 
 
 
 
1293	switch (sdmac->peripheral_type) {
1294	case IMX_DMATYPE_DSP:
1295		sdma_config_ownership(sdmac, false, true, true);
1296		break;
1297	case IMX_DMATYPE_MEMORY:
1298		sdma_config_ownership(sdmac, false, true, false);
1299		break;
1300	default:
1301		sdma_config_ownership(sdmac, true, true, false);
1302		break;
1303	}
1304
1305	ret = sdma_get_pc(sdmac, sdmac->peripheral_type);
1306	if (ret)
1307		return ret;
1308
1309	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1310			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1311		/* Handle multiple event channels differently */
1312		if (sdmac->event_id1) {
1313			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1314			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1315				sdma_set_watermarklevel_for_p2p(sdmac);
 
 
 
1316		} else {
1317			if (sdmac->peripheral_type ==
1318					IMX_DMATYPE_MULTI_SAI)
1319				sdma_set_watermarklevel_for_sais(sdmac);
1320
1321			__set_bit(sdmac->event_id0, sdmac->event_mask);
1322		}
1323
 
1324		/* Address */
1325		sdmac->shp_addr = sdmac->per_address;
1326		sdmac->per_addr = sdmac->per_address2;
1327	} else {
1328		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1329	}
1330
1331	return 0;
 
 
1332}
1333
1334static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1335				     unsigned int priority)
1336{
1337	struct sdma_engine *sdma = sdmac->sdma;
1338	int channel = sdmac->channel;
1339
1340	if (priority < MXC_SDMA_MIN_PRIORITY
1341	    || priority > MXC_SDMA_MAX_PRIORITY) {
1342		return -EINVAL;
1343	}
1344
1345	writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1346
1347	return 0;
1348}
1349
1350static int sdma_request_channel0(struct sdma_engine *sdma)
1351{
 
 
1352	int ret = -EBUSY;
1353
1354	sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
1355				       GFP_NOWAIT);
1356	if (!sdma->bd0) {
1357		ret = -ENOMEM;
1358		goto out;
1359	}
1360
1361	sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1362	sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
 
 
 
 
 
 
 
 
 
 
1363
1364	sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1365	return 0;
1366out:
1367
1368	return ret;
1369}
1370
 
 
 
 
1371
1372static int sdma_alloc_bd(struct sdma_desc *desc)
1373{
1374	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1375	int ret = 0;
 
 
1376
1377	desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
1378				      &desc->bd_phys, GFP_NOWAIT);
1379	if (!desc->bd) {
1380		ret = -ENOMEM;
1381		goto out;
1382	}
1383out:
1384	return ret;
1385}
1386
1387static void sdma_free_bd(struct sdma_desc *desc)
1388{
1389	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1390
1391	dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
1392			  desc->bd_phys);
1393}
1394
1395static void sdma_desc_free(struct virt_dma_desc *vd)
1396{
1397	struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
 
 
1398
1399	sdma_free_bd(desc);
1400	kfree(desc);
 
 
 
 
 
 
 
1401}
1402
1403static int sdma_alloc_chan_resources(struct dma_chan *chan)
1404{
1405	struct sdma_channel *sdmac = to_sdma_chan(chan);
1406	struct imx_dma_data *data = chan->private;
1407	struct imx_dma_data mem_data;
1408	int prio, ret;
1409
1410	/*
1411	 * MEMCPY may never setup chan->private by filter function such as
1412	 * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
1413	 * Please note in any other slave case, you have to setup chan->private
1414	 * with 'struct imx_dma_data' in your own filter function if you want to
1415	 * request dma channel by dma_request_channel() rather than
1416	 * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
1417	 * to warn you to correct your filter function.
1418	 */
1419	if (!data) {
1420		dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1421		mem_data.priority = 2;
1422		mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1423		mem_data.dma_request = 0;
1424		mem_data.dma_request2 = 0;
1425		data = &mem_data;
1426
1427		ret = sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1428		if (ret)
1429			return ret;
1430	}
1431
1432	switch (data->priority) {
1433	case DMA_PRIO_HIGH:
1434		prio = 3;
1435		break;
1436	case DMA_PRIO_MEDIUM:
1437		prio = 2;
1438		break;
1439	case DMA_PRIO_LOW:
1440	default:
1441		prio = 1;
1442		break;
1443	}
1444
1445	sdmac->peripheral_type = data->peripheral_type;
1446	sdmac->event_id0 = data->dma_request;
1447	sdmac->event_id1 = data->dma_request2;
 
 
1448
1449	ret = clk_enable(sdmac->sdma->clk_ipg);
1450	if (ret)
1451		return ret;
1452	ret = clk_enable(sdmac->sdma->clk_ahb);
1453	if (ret)
1454		goto disable_clk_ipg;
1455
1456	ret = sdma_set_channel_priority(sdmac, prio);
1457	if (ret)
1458		goto disable_clk_ahb;
 
1459
1460	return 0;
1461
1462disable_clk_ahb:
1463	clk_disable(sdmac->sdma->clk_ahb);
1464disable_clk_ipg:
1465	clk_disable(sdmac->sdma->clk_ipg);
1466	return ret;
1467}
1468
1469static void sdma_free_chan_resources(struct dma_chan *chan)
1470{
1471	struct sdma_channel *sdmac = to_sdma_chan(chan);
1472	struct sdma_engine *sdma = sdmac->sdma;
1473
1474	sdma_terminate_all(chan);
1475
1476	sdma_channel_synchronize(chan);
1477
1478	sdma_event_disable(sdmac, sdmac->event_id0);
1479	if (sdmac->event_id1)
1480		sdma_event_disable(sdmac, sdmac->event_id1);
1481
1482	sdmac->event_id0 = 0;
1483	sdmac->event_id1 = 0;
1484
1485	sdma_set_channel_priority(sdmac, 0);
1486
1487	clk_disable(sdma->clk_ipg);
1488	clk_disable(sdma->clk_ahb);
1489}
1490
1491static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1492				enum dma_transfer_direction direction, u32 bds)
1493{
1494	struct sdma_desc *desc;
1495
1496	if (!sdmac->sdma->fw_loaded && sdmac->is_ram_script) {
1497		dev_warn_once(sdmac->sdma->dev, "sdma firmware not ready!\n");
1498		goto err_out;
1499	}
1500
1501	desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1502	if (!desc)
1503		goto err_out;
1504
1505	sdmac->status = DMA_IN_PROGRESS;
1506	sdmac->direction = direction;
1507	sdmac->flags = 0;
1508
1509	desc->chn_count = 0;
1510	desc->chn_real_count = 0;
1511	desc->buf_tail = 0;
1512	desc->buf_ptail = 0;
1513	desc->sdmac = sdmac;
1514	desc->num_bd = bds;
1515
1516	if (sdma_alloc_bd(desc))
1517		goto err_desc_out;
1518
1519	/* No slave_config called in MEMCPY case, so do here */
1520	if (direction == DMA_MEM_TO_MEM)
1521		sdma_config_ownership(sdmac, false, true, false);
1522
1523	if (sdma_load_context(sdmac))
1524		goto err_bd_out;
1525
1526	return desc;
1527
1528err_bd_out:
1529	sdma_free_bd(desc);
1530err_desc_out:
1531	kfree(desc);
1532err_out:
1533	return NULL;
1534}
1535
1536static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1537		struct dma_chan *chan, dma_addr_t dma_dst,
1538		dma_addr_t dma_src, size_t len, unsigned long flags)
1539{
1540	struct sdma_channel *sdmac = to_sdma_chan(chan);
1541	struct sdma_engine *sdma = sdmac->sdma;
1542	int channel = sdmac->channel;
1543	size_t count;
1544	int i = 0, param;
1545	struct sdma_buffer_descriptor *bd;
1546	struct sdma_desc *desc;
1547
1548	if (!chan || !len)
1549		return NULL;
1550
1551	dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1552		&dma_src, &dma_dst, len, channel);
1553
1554	desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1555					len / SDMA_BD_MAX_CNT + 1);
1556	if (!desc)
1557		return NULL;
1558
1559	do {
1560		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1561		bd = &desc->bd[i];
1562		bd->buffer_addr = dma_src;
1563		bd->ext_buffer_addr = dma_dst;
1564		bd->mode.count = count;
1565		desc->chn_count += count;
1566		bd->mode.command = 0;
1567
1568		dma_src += count;
1569		dma_dst += count;
1570		len -= count;
1571		i++;
1572
1573		param = BD_DONE | BD_EXTD | BD_CONT;
1574		/* last bd */
1575		if (!len) {
1576			param |= BD_INTR;
1577			param |= BD_LAST;
1578			param &= ~BD_CONT;
1579		}
1580
1581		dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1582				i, count, bd->buffer_addr,
1583				param & BD_WRAP ? "wrap" : "",
1584				param & BD_INTR ? " intr" : "");
1585
1586		bd->mode.status = param;
1587	} while (len);
1588
1589	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1590}
1591
1592static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1593		struct dma_chan *chan, struct scatterlist *sgl,
1594		unsigned int sg_len, enum dma_transfer_direction direction,
1595		unsigned long flags, void *context)
1596{
1597	struct sdma_channel *sdmac = to_sdma_chan(chan);
1598	struct sdma_engine *sdma = sdmac->sdma;
1599	int i, count;
1600	int channel = sdmac->channel;
1601	struct scatterlist *sg;
1602	struct sdma_desc *desc;
1603
1604	sdma_config_write(chan, &sdmac->slave_config, direction);
 
 
1605
1606	desc = sdma_transfer_init(sdmac, direction, sg_len);
1607	if (!desc)
1608		goto err_out;
1609
1610	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1611			sg_len, channel);
1612
 
 
 
 
 
 
 
 
 
 
 
 
1613	for_each_sg(sgl, sg, sg_len, i) {
1614		struct sdma_buffer_descriptor *bd = &desc->bd[i];
1615		int param;
1616
1617		bd->buffer_addr = sg->dma_address;
1618
1619		count = sg_dma_len(sg);
1620
1621		if (count > SDMA_BD_MAX_CNT) {
1622			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1623					channel, count, SDMA_BD_MAX_CNT);
1624			goto err_bd_out;
 
1625		}
1626
1627		bd->mode.count = count;
1628		desc->chn_count += count;
1629
1630		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1631			goto err_bd_out;
 
 
1632
1633		switch (sdmac->word_size) {
1634		case DMA_SLAVE_BUSWIDTH_4_BYTES:
1635			bd->mode.command = 0;
1636			if (count & 3 || sg->dma_address & 3)
1637				goto err_bd_out;
1638			break;
1639		case DMA_SLAVE_BUSWIDTH_2_BYTES:
1640			bd->mode.command = 2;
1641			if (count & 1 || sg->dma_address & 1)
1642				goto err_bd_out;
1643			break;
1644		case DMA_SLAVE_BUSWIDTH_1_BYTE:
1645			bd->mode.command = 1;
1646			break;
1647		default:
1648			goto err_bd_out;
1649		}
1650
1651		param = BD_DONE | BD_EXTD | BD_CONT;
1652
1653		if (i + 1 == sg_len) {
1654			param |= BD_INTR;
1655			param |= BD_LAST;
1656			param &= ~BD_CONT;
1657		}
1658
1659		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1660				i, count, (u64)sg->dma_address,
1661				param & BD_WRAP ? "wrap" : "",
1662				param & BD_INTR ? " intr" : "");
1663
1664		bd->mode.status = param;
1665	}
1666
1667	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1668err_bd_out:
1669	sdma_free_bd(desc);
1670	kfree(desc);
1671err_out:
1672	sdmac->status = DMA_ERROR;
1673	return NULL;
1674}
1675
1676static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1677		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1678		size_t period_len, enum dma_transfer_direction direction,
1679		unsigned long flags)
1680{
1681	struct sdma_channel *sdmac = to_sdma_chan(chan);
1682	struct sdma_engine *sdma = sdmac->sdma;
1683	int num_periods = buf_len / period_len;
1684	int channel = sdmac->channel;
1685	int i = 0, buf = 0;
1686	struct sdma_desc *desc;
1687
1688	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1689
1690	sdma_config_write(chan, &sdmac->slave_config, direction);
 
 
 
1691
1692	desc = sdma_transfer_init(sdmac, direction, num_periods);
1693	if (!desc)
 
 
1694		goto err_out;
1695
1696	desc->period_len = period_len;
 
 
 
 
1697
1698	sdmac->flags |= IMX_DMA_SG_LOOP;
1699
1700	if (period_len > SDMA_BD_MAX_CNT) {
1701		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1702				channel, period_len, SDMA_BD_MAX_CNT);
1703		goto err_bd_out;
1704	}
1705
1706	while (buf < buf_len) {
1707		struct sdma_buffer_descriptor *bd = &desc->bd[i];
1708		int param;
1709
1710		bd->buffer_addr = dma_addr;
1711
1712		bd->mode.count = period_len;
1713
1714		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1715			goto err_bd_out;
1716		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1717			bd->mode.command = 0;
1718		else
1719			bd->mode.command = sdmac->word_size;
1720
1721		param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1722		if (i + 1 == num_periods)
1723			param |= BD_WRAP;
1724
1725		dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
1726				i, period_len, (u64)dma_addr,
1727				param & BD_WRAP ? "wrap" : "",
1728				param & BD_INTR ? " intr" : "");
1729
1730		bd->mode.status = param;
1731
1732		dma_addr += period_len;
1733		buf += period_len;
1734
1735		i++;
1736	}
1737
1738	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1739err_bd_out:
1740	sdma_free_bd(desc);
1741	kfree(desc);
1742err_out:
1743	sdmac->status = DMA_ERROR;
1744	return NULL;
1745}
1746
1747static int sdma_config_write(struct dma_chan *chan,
1748		       struct dma_slave_config *dmaengine_cfg,
1749		       enum dma_transfer_direction direction)
1750{
1751	struct sdma_channel *sdmac = to_sdma_chan(chan);
 
1752
1753	if (direction == DMA_DEV_TO_MEM) {
1754		sdmac->per_address = dmaengine_cfg->src_addr;
1755		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1756			dmaengine_cfg->src_addr_width;
1757		sdmac->word_size = dmaengine_cfg->src_addr_width;
1758	} else if (direction == DMA_DEV_TO_DEV) {
1759		sdmac->per_address2 = dmaengine_cfg->src_addr;
1760		sdmac->per_address = dmaengine_cfg->dst_addr;
1761		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1762			SDMA_WATERMARK_LEVEL_LWML;
1763		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1764			SDMA_WATERMARK_LEVEL_HWML;
1765		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1766	} else {
1767		sdmac->per_address = dmaengine_cfg->dst_addr;
1768		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1769			dmaengine_cfg->dst_addr_width;
1770		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1771	}
1772	sdmac->direction = direction;
1773	return sdma_config_channel(chan);
1774}
1775
1776static int sdma_config(struct dma_chan *chan,
1777		       struct dma_slave_config *dmaengine_cfg)
1778{
1779	struct sdma_channel *sdmac = to_sdma_chan(chan);
1780	struct sdma_engine *sdma = sdmac->sdma;
1781
1782	memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1783
1784	if (dmaengine_cfg->peripheral_config) {
1785		struct sdma_peripheral_config *sdmacfg = dmaengine_cfg->peripheral_config;
1786		if (dmaengine_cfg->peripheral_size != sizeof(struct sdma_peripheral_config)) {
1787			dev_err(sdma->dev, "Invalid peripheral size %zu, expected %zu\n",
1788				dmaengine_cfg->peripheral_size,
1789				sizeof(struct sdma_peripheral_config));
1790			return -EINVAL;
1791		}
1792		sdmac->n_fifos_src = sdmacfg->n_fifos_src;
1793		sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
1794		sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
1795		sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
1796		sdmac->words_per_fifo = sdmacfg->words_per_fifo;
1797		sdmac->sw_done = sdmacfg->sw_done;
1798	}
1799
1800	/* Set ENBLn earlier to make sure dma request triggered after that */
1801	if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1802		return -EINVAL;
1803	sdma_event_enable(sdmac, sdmac->event_id0);
1804
1805	if (sdmac->event_id1) {
1806		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1807			return -EINVAL;
1808		sdma_event_enable(sdmac, sdmac->event_id1);
1809	}
1810
1811	return 0;
1812}
1813
1814static enum dma_status sdma_tx_status(struct dma_chan *chan,
1815				      dma_cookie_t cookie,
1816				      struct dma_tx_state *txstate)
1817{
1818	struct sdma_channel *sdmac = to_sdma_chan(chan);
1819	struct sdma_desc *desc = NULL;
1820	u32 residue;
1821	struct virt_dma_desc *vd;
1822	enum dma_status ret;
1823	unsigned long flags;
1824
1825	ret = dma_cookie_status(chan, cookie, txstate);
1826	if (ret == DMA_COMPLETE || !txstate)
1827		return ret;
1828
1829	spin_lock_irqsave(&sdmac->vc.lock, flags);
1830
1831	vd = vchan_find_desc(&sdmac->vc, cookie);
1832	if (vd)
1833		desc = to_sdma_desc(&vd->tx);
1834	else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
1835		desc = sdmac->desc;
1836
1837	if (desc) {
1838		if (sdmac->flags & IMX_DMA_SG_LOOP)
1839			residue = (desc->num_bd - desc->buf_ptail) *
1840				desc->period_len - desc->chn_real_count;
1841		else
1842			residue = desc->chn_count - desc->chn_real_count;
1843	} else {
1844		residue = 0;
1845	}
1846
1847	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1848
1849	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1850			 residue);
1851
1852	return sdmac->status;
1853}
1854
1855static void sdma_issue_pending(struct dma_chan *chan)
1856{
1857	struct sdma_channel *sdmac = to_sdma_chan(chan);
1858	unsigned long flags;
1859
1860	spin_lock_irqsave(&sdmac->vc.lock, flags);
1861	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1862		sdma_start_desc(sdmac);
1863	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1864}
1865
1866#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
1867#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2	38
1868#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3	45
1869#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4	46
1870
1871static void sdma_add_scripts(struct sdma_engine *sdma,
1872			     const struct sdma_script_start_addrs *addr)
1873{
1874	s32 *addr_arr = (u32 *)addr;
1875	s32 *saddr_arr = (u32 *)sdma->script_addrs;
1876	int i;
1877
1878	/* use the default firmware in ROM if missing external firmware */
1879	if (!sdma->script_number)
1880		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1881
1882	if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
1883				  / sizeof(s32)) {
1884		dev_err(sdma->dev,
1885			"SDMA script number %d not match with firmware.\n",
1886			sdma->script_number);
1887		return;
1888	}
1889
1890	for (i = 0; i < sdma->script_number; i++)
1891		if (addr_arr[i] > 0)
1892			saddr_arr[i] = addr_arr[i];
1893
1894	/*
1895	 * For compatibility with NXP internal legacy kernel before 4.19 which
1896	 * is based on uart ram script and mainline kernel based on uart rom
1897	 * script, both uart ram/rom scripts are present in newer sdma
1898	 * firmware. Use the rom versions if they are present (V3 or newer).
1899	 */
1900	if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
1901		if (addr->uart_2_mcu_rom_addr)
1902			sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
1903		if (addr->uartsh_2_mcu_rom_addr)
1904			sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
1905	}
1906}
1907
1908static void sdma_load_firmware(const struct firmware *fw, void *context)
 
1909{
1910	struct sdma_engine *sdma = context;
1911	const struct sdma_firmware_header *header;
 
1912	const struct sdma_script_start_addrs *addr;
1913	unsigned short *ram_code;
1914
1915	if (!fw) {
1916		dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1917		/* In this case we just use the ROM firmware. */
1918		return;
1919	}
1920
1921	if (fw->size < sizeof(*header))
1922		goto err_firmware;
1923
1924	header = (struct sdma_firmware_header *)fw->data;
1925
1926	if (header->magic != SDMA_FIRMWARE_MAGIC)
1927		goto err_firmware;
1928	if (header->ram_code_start + header->ram_code_size > fw->size)
1929		goto err_firmware;
1930	switch (header->version_major) {
1931	case 1:
1932		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1933		break;
1934	case 2:
1935		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1936		break;
1937	case 3:
1938		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1939		break;
1940	case 4:
1941		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1942		break;
1943	default:
1944		dev_err(sdma->dev, "unknown firmware version\n");
1945		goto err_firmware;
1946	}
1947
1948	addr = (void *)header + header->script_addrs_start;
1949	ram_code = (void *)header + header->ram_code_start;
1950
1951	clk_enable(sdma->clk_ipg);
1952	clk_enable(sdma->clk_ahb);
1953	/* download the RAM image for SDMA */
1954	sdma_load_script(sdma, ram_code,
1955			 header->ram_code_size,
1956			 addr->ram_code_start_addr);
1957	clk_disable(sdma->clk_ipg);
1958	clk_disable(sdma->clk_ahb);
1959
1960	sdma_add_scripts(sdma, addr);
1961
1962	sdma->fw_loaded = true;
1963
1964	dev_info(sdma->dev, "loaded firmware %d.%d\n",
1965		 header->version_major,
1966		 header->version_minor);
1967
1968err_firmware:
1969	release_firmware(fw);
1970}
1971
1972#define EVENT_REMAP_CELLS 3
1973
1974static int sdma_event_remap(struct sdma_engine *sdma)
1975{
1976	struct device_node *np = sdma->dev->of_node;
1977	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1978	struct property *event_remap;
1979	struct regmap *gpr;
1980	char propname[] = "fsl,sdma-event-remap";
1981	u32 reg, val, shift, num_map, i;
1982	int ret = 0;
1983
1984	if (IS_ERR(np) || !gpr_np)
1985		goto out;
1986
1987	event_remap = of_find_property(np, propname, NULL);
1988	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1989	if (!num_map) {
1990		dev_dbg(sdma->dev, "no event needs to be remapped\n");
1991		goto out;
1992	} else if (num_map % EVENT_REMAP_CELLS) {
1993		dev_err(sdma->dev, "the property %s must modulo %d\n",
1994				propname, EVENT_REMAP_CELLS);
1995		ret = -EINVAL;
1996		goto out;
1997	}
1998
1999	gpr = syscon_node_to_regmap(gpr_np);
2000	if (IS_ERR(gpr)) {
2001		dev_err(sdma->dev, "failed to get gpr regmap\n");
2002		ret = PTR_ERR(gpr);
2003		goto out;
2004	}
2005
2006	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
2007		ret = of_property_read_u32_index(np, propname, i, &reg);
2008		if (ret) {
2009			dev_err(sdma->dev, "failed to read property %s index %d\n",
2010					propname, i);
2011			goto out;
2012		}
2013
2014		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
2015		if (ret) {
2016			dev_err(sdma->dev, "failed to read property %s index %d\n",
2017					propname, i + 1);
2018			goto out;
2019		}
2020
2021		ret = of_property_read_u32_index(np, propname, i + 2, &val);
2022		if (ret) {
2023			dev_err(sdma->dev, "failed to read property %s index %d\n",
2024					propname, i + 2);
2025			goto out;
2026		}
2027
2028		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
2029	}
2030
2031out:
2032	if (gpr_np)
2033		of_node_put(gpr_np);
2034
2035	return ret;
2036}
2037
2038static int sdma_get_firmware(struct sdma_engine *sdma,
2039		const char *fw_name)
2040{
2041	int ret;
2042
2043	ret = request_firmware_nowait(THIS_MODULE,
2044			FW_ACTION_UEVENT, fw_name, sdma->dev,
2045			GFP_KERNEL, sdma, sdma_load_firmware);
2046
2047	return ret;
2048}
2049
2050static int sdma_init(struct sdma_engine *sdma)
2051{
2052	int i, ret;
2053	dma_addr_t ccb_phys;
2054
2055	ret = clk_enable(sdma->clk_ipg);
2056	if (ret)
2057		return ret;
2058	ret = clk_enable(sdma->clk_ahb);
2059	if (ret)
2060		goto disable_clk_ipg;
 
 
 
 
 
 
2061
2062	if (sdma->drvdata->check_ratio &&
2063	    (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
2064		sdma->clk_ratio = 1;
2065
2066	/* Be sure SDMA has not started yet */
2067	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
2068
2069	sdma->channel_control = dma_alloc_coherent(sdma->dev,
2070			MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
2071			sizeof(struct sdma_context_data),
2072			&ccb_phys, GFP_KERNEL);
2073
2074	if (!sdma->channel_control) {
2075		ret = -ENOMEM;
2076		goto err_dma_alloc;
2077	}
2078
2079	sdma->context = (void *)sdma->channel_control +
2080		MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
2081	sdma->context_phys = ccb_phys +
2082		MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
 
 
 
 
2083
2084	/* disable all channels */
2085	for (i = 0; i < sdma->drvdata->num_events; i++)
2086		writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
2087
2088	/* All channels have priority 0 */
2089	for (i = 0; i < MAX_DMA_CHANNELS; i++)
2090		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
2091
2092	ret = sdma_request_channel0(sdma);
2093	if (ret)
2094		goto err_dma_alloc;
2095
2096	sdma_config_ownership(&sdma->channel[0], false, true, false);
2097
2098	/* Set Command Channel (Channel Zero) */
2099	writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
2100
2101	/* Set bits of CONFIG register but with static context switching */
2102	if (sdma->clk_ratio)
2103		writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
2104	else
2105		writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
2106
2107	writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
 
2108
2109	/* Initializes channel's priorities */
2110	sdma_set_channel_priority(&sdma->channel[0], 7);
2111
2112	clk_disable(sdma->clk_ipg);
2113	clk_disable(sdma->clk_ahb);
2114
2115	return 0;
2116
2117err_dma_alloc:
2118	clk_disable(sdma->clk_ahb);
2119disable_clk_ipg:
2120	clk_disable(sdma->clk_ipg);
2121	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
2122	return ret;
2123}
2124
2125static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
2126{
2127	struct sdma_channel *sdmac = to_sdma_chan(chan);
2128	struct imx_dma_data *data = fn_param;
2129
2130	if (!imx_dma_is_general_purpose(chan))
2131		return false;
2132
2133	sdmac->data = *data;
2134	chan->private = &sdmac->data;
2135
2136	return true;
2137}
2138
2139static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
2140				   struct of_dma *ofdma)
2141{
2142	struct sdma_engine *sdma = ofdma->of_dma_data;
2143	dma_cap_mask_t mask = sdma->dma_device.cap_mask;
2144	struct imx_dma_data data;
2145
2146	if (dma_spec->args_count != 3)
2147		return NULL;
2148
2149	data.dma_request = dma_spec->args[0];
2150	data.peripheral_type = dma_spec->args[1];
2151	data.priority = dma_spec->args[2];
2152	/*
2153	 * init dma_request2 to zero, which is not used by the dts.
2154	 * For P2P, dma_request2 is init from dma_request_channel(),
2155	 * chan->private will point to the imx_dma_data, and in
2156	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
2157	 * be set to sdmac->event_id1.
2158	 */
2159	data.dma_request2 = 0;
2160
2161	return __dma_request_channel(&mask, sdma_filter_fn, &data,
2162				     ofdma->of_node);
2163}
2164
2165static int sdma_probe(struct platform_device *pdev)
2166{
 
 
2167	struct device_node *np = pdev->dev.of_node;
2168	struct device_node *spba_bus;
2169	const char *fw_name;
2170	int ret;
2171	int irq;
2172	struct resource *iores;
2173	struct resource spba_res;
2174	int i;
2175	struct sdma_engine *sdma;
2176	s32 *saddr_arr;
2177
2178	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2179	if (ret)
2180		return ret;
2181
2182	sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
2183	if (!sdma)
2184		return -ENOMEM;
2185
2186	spin_lock_init(&sdma->channel_0_lock);
2187
2188	sdma->dev = &pdev->dev;
2189	sdma->drvdata = of_device_get_match_data(sdma->dev);
2190
 
2191	irq = platform_get_irq(pdev, 0);
2192	if (irq < 0)
2193		return irq;
 
 
2194
2195	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2196	sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
2197	if (IS_ERR(sdma->regs))
2198		return PTR_ERR(sdma->regs);
2199
2200	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2201	if (IS_ERR(sdma->clk_ipg))
2202		return PTR_ERR(sdma->clk_ipg);
2203
2204	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2205	if (IS_ERR(sdma->clk_ahb))
2206		return PTR_ERR(sdma->clk_ahb);
2207
2208	ret = clk_prepare(sdma->clk_ipg);
2209	if (ret)
2210		return ret;
 
 
2211
2212	ret = clk_prepare(sdma->clk_ahb);
2213	if (ret)
2214		goto err_clk;
 
 
2215
2216	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
2217				dev_name(&pdev->dev), sdma);
2218	if (ret)
2219		goto err_irq;
2220
2221	sdma->irq = irq;
2222
2223	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
2224	if (!sdma->script_addrs) {
2225		ret = -ENOMEM;
2226		goto err_irq;
2227	}
2228
2229	/* initially no scripts available */
2230	saddr_arr = (s32 *)sdma->script_addrs;
2231	for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++)
2232		saddr_arr[i] = -EINVAL;
2233
2234	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
2235	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
2236	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
2237
2238	INIT_LIST_HEAD(&sdma->dma_device.channels);
2239	/* Initialize channel parameters */
2240	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2241		struct sdma_channel *sdmac = &sdma->channel[i];
2242
2243		sdmac->sdma = sdma;
 
2244
 
2245		sdmac->channel = i;
2246		sdmac->vc.desc_free = sdma_desc_free;
2247		INIT_LIST_HEAD(&sdmac->terminated);
2248		INIT_WORK(&sdmac->terminate_worker,
2249				sdma_channel_terminate_work);
2250		/*
2251		 * Add the channel to the DMAC list. Do not add channel 0 though
2252		 * because we need it internally in the SDMA driver. This also means
2253		 * that channel 0 in dmaengine counting matches sdma channel 1.
2254		 */
2255		if (i)
2256			vchan_init(&sdmac->vc, &sdma->dma_device);
 
2257	}
2258
2259	ret = sdma_init(sdma);
2260	if (ret)
2261		goto err_init;
2262
2263	ret = sdma_event_remap(sdma);
2264	if (ret)
2265		goto err_init;
2266
2267	if (sdma->drvdata->script_addrs)
2268		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2269
2270	sdma->dma_device.dev = &pdev->dev;
2271
2272	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2273	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
2274	sdma->dma_device.device_tx_status = sdma_tx_status;
2275	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
2276	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
2277	sdma->dma_device.device_config = sdma_config;
2278	sdma->dma_device.device_terminate_all = sdma_terminate_all;
2279	sdma->dma_device.device_synchronize = sdma_channel_synchronize;
2280	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
2281	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
2282	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
2283	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2284	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
2285	sdma->dma_device.device_issue_pending = sdma_issue_pending;
2286	sdma->dma_device.copy_align = 2;
2287	dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
2288
2289	platform_set_drvdata(pdev, sdma);
2290
2291	ret = dma_async_device_register(&sdma->dma_device);
2292	if (ret) {
2293		dev_err(&pdev->dev, "unable to register\n");
2294		goto err_init;
2295	}
2296
2297	if (np) {
2298		ret = of_dma_controller_register(np, sdma_xlate, sdma);
2299		if (ret) {
2300			dev_err(&pdev->dev, "failed to register controller\n");
2301			goto err_register;
2302		}
2303
2304		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
2305		ret = of_address_to_resource(spba_bus, 0, &spba_res);
2306		if (!ret) {
2307			sdma->spba_start_addr = spba_res.start;
2308			sdma->spba_end_addr = spba_res.end;
2309		}
2310		of_node_put(spba_bus);
2311	}
2312
2313	/*
2314	 * Because that device tree does not encode ROM script address,
2315	 * the RAM script in firmware is mandatory for device tree
2316	 * probe, otherwise it fails.
2317	 */
2318	ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2319				      &fw_name);
2320	if (ret) {
2321		dev_warn(&pdev->dev, "failed to get firmware name\n");
2322	} else {
2323		ret = sdma_get_firmware(sdma, fw_name);
2324		if (ret)
2325			dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2326	}
2327
2328	return 0;
2329
2330err_register:
2331	dma_async_device_unregister(&sdma->dma_device);
2332err_init:
2333	kfree(sdma->script_addrs);
 
 
 
 
 
 
 
 
 
2334err_irq:
2335	clk_unprepare(sdma->clk_ahb);
2336err_clk:
2337	clk_unprepare(sdma->clk_ipg);
2338	return ret;
2339}
2340
2341static int sdma_remove(struct platform_device *pdev)
2342{
2343	struct sdma_engine *sdma = platform_get_drvdata(pdev);
2344	int i;
2345
2346	devm_free_irq(&pdev->dev, sdma->irq, sdma);
2347	dma_async_device_unregister(&sdma->dma_device);
2348	kfree(sdma->script_addrs);
2349	clk_unprepare(sdma->clk_ahb);
2350	clk_unprepare(sdma->clk_ipg);
2351	/* Kill the tasklet */
2352	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2353		struct sdma_channel *sdmac = &sdma->channel[i];
2354
2355		tasklet_kill(&sdmac->vc.task);
2356		sdma_free_chan_resources(&sdmac->vc.chan);
2357	}
2358
2359	platform_set_drvdata(pdev, NULL);
2360	return 0;
2361}
2362
2363static struct platform_driver sdma_driver = {
2364	.driver		= {
2365		.name	= "imx-sdma",
2366		.of_match_table = sdma_dt_ids,
2367	},
2368	.remove		= sdma_remove,
2369	.probe		= sdma_probe,
2370};
2371
2372module_platform_driver(sdma_driver);
 
 
 
 
2373
2374MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2375MODULE_DESCRIPTION("i.MX SDMA driver");
2376#if IS_ENABLED(CONFIG_SOC_IMX6Q)
2377MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2378#endif
2379#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
2380MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2381#endif
2382MODULE_LICENSE("GPL");