Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// drivers/dma/imx-sdma.c
   4//
   5// This file contains a driver for the Freescale Smart DMA engine
   6//
   7// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   8//
   9// Based on code from Freescale:
  10//
  11// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
  12
  13#include <linux/init.h>
  14#include <linux/iopoll.h>
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/bitfield.h>
  18#include <linux/bitops.h>
  19#include <linux/mm.h>
  20#include <linux/interrupt.h>
  21#include <linux/clk.h>
  22#include <linux/delay.h>
  23#include <linux/sched.h>
  24#include <linux/semaphore.h>
  25#include <linux/spinlock.h>
  26#include <linux/device.h>
 
  27#include <linux/dma-mapping.h>
  28#include <linux/firmware.h>
  29#include <linux/slab.h>
  30#include <linux/platform_device.h>
  31#include <linux/dmaengine.h>
  32#include <linux/of.h>
  33#include <linux/of_address.h>
  34#include <linux/of_dma.h>
  35#include <linux/workqueue.h>
  36
  37#include <asm/irq.h>
  38#include <linux/dma/imx-dma.h>
  39#include <linux/regmap.h>
  40#include <linux/mfd/syscon.h>
  41#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  42
  43#include "dmaengine.h"
  44#include "virt-dma.h"
  45
  46/* SDMA registers */
  47#define SDMA_H_C0PTR		0x000
  48#define SDMA_H_INTR		0x004
  49#define SDMA_H_STATSTOP		0x008
  50#define SDMA_H_START		0x00c
  51#define SDMA_H_EVTOVR		0x010
  52#define SDMA_H_DSPOVR		0x014
  53#define SDMA_H_HOSTOVR		0x018
  54#define SDMA_H_EVTPEND		0x01c
  55#define SDMA_H_DSPENBL		0x020
  56#define SDMA_H_RESET		0x024
  57#define SDMA_H_EVTERR		0x028
  58#define SDMA_H_INTRMSK		0x02c
  59#define SDMA_H_PSW		0x030
  60#define SDMA_H_EVTERRDBG	0x034
  61#define SDMA_H_CONFIG		0x038
  62#define SDMA_ONCE_ENB		0x040
  63#define SDMA_ONCE_DATA		0x044
  64#define SDMA_ONCE_INSTR		0x048
  65#define SDMA_ONCE_STAT		0x04c
  66#define SDMA_ONCE_CMD		0x050
  67#define SDMA_EVT_MIRROR		0x054
  68#define SDMA_ILLINSTADDR	0x058
  69#define SDMA_CHN0ADDR		0x05c
  70#define SDMA_ONCE_RTB		0x060
  71#define SDMA_XTRIG_CONF1	0x070
  72#define SDMA_XTRIG_CONF2	0x074
  73#define SDMA_CHNENBL0_IMX35	0x200
  74#define SDMA_CHNENBL0_IMX31	0x080
  75#define SDMA_CHNPRI_0		0x100
  76#define SDMA_DONE0_CONFIG	0x1000
  77
  78/*
  79 * Buffer descriptor status values.
  80 */
  81#define BD_DONE  0x01
  82#define BD_WRAP  0x02
  83#define BD_CONT  0x04
  84#define BD_INTR  0x08
  85#define BD_RROR  0x10
  86#define BD_LAST  0x20
  87#define BD_EXTD  0x80
  88
  89/*
  90 * Data Node descriptor status values.
  91 */
  92#define DND_END_OF_FRAME  0x80
  93#define DND_END_OF_XFER   0x40
  94#define DND_DONE          0x20
  95#define DND_UNUSED        0x01
  96
  97/*
  98 * IPCV2 descriptor status values.
  99 */
 100#define BD_IPCV2_END_OF_FRAME  0x40
 101
 102#define IPCV2_MAX_NODES        50
 103/*
 104 * Error bit set in the CCB status field by the SDMA,
 105 * in setbd routine, in case of a transfer error
 106 */
 107#define DATA_ERROR  0x10000000
 108
 109/*
 110 * Buffer descriptor commands.
 111 */
 112#define C0_ADDR             0x01
 113#define C0_LOAD             0x02
 114#define C0_DUMP             0x03
 115#define C0_SETCTX           0x07
 116#define C0_GETCTX           0x03
 117#define C0_SETDM            0x01
 118#define C0_SETPM            0x04
 119#define C0_GETDM            0x02
 120#define C0_GETPM            0x08
 121/*
 122 * Change endianness indicator in the BD command field
 123 */
 124#define CHANGE_ENDIANNESS   0x80
 125
 126/*
 127 *  p_2_p watermark_level description
 128 *	Bits		Name			Description
 129 *	0-7		Lower WML		Lower watermark level
 130 *	8		PS			1: Pad Swallowing
 131 *						0: No Pad Swallowing
 132 *	9		PA			1: Pad Adding
 133 *						0: No Pad Adding
 134 *	10		SPDIF			If this bit is set both source
 135 *						and destination are on SPBA
 136 *	11		Source Bit(SP)		1: Source on SPBA
 137 *						0: Source on AIPS
 138 *	12		Destination Bit(DP)	1: Destination on SPBA
 139 *						0: Destination on AIPS
 140 *	13-15		---------		MUST BE 0
 
 
 
 
 141 *	16-23		Higher WML		HWML
 142 *	24-27		N			Total number of samples after
 143 *						which Pad adding/Swallowing
 144 *						must be done. It must be odd.
 145 *	28		Lower WML Event(LWE)	SDMA events reg to check for
 146 *						LWML event mask
 147 *						0: LWE in EVENTS register
 148 *						1: LWE in EVENTS2 register
 149 *	29		Higher WML Event(HWE)	SDMA events reg to check for
 150 *						HWML event mask
 151 *						0: HWE in EVENTS register
 152 *						1: HWE in EVENTS2 register
 153 *	30		---------		MUST BE 0
 154 *	31		CONT			1: Amount of samples to be
 155 *						transferred is unknown and
 156 *						script will keep on
 157 *						transferring samples as long as
 158 *						both events are detected and
 159 *						script must be manually stopped
 160 *						by the application
 161 *						0: The amount of samples to be
 162 *						transferred is equal to the
 163 *						count field of mode word
 164 */
 165#define SDMA_WATERMARK_LEVEL_LWML	0xFF
 166#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
 167#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
 168#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
 169#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
 170#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
 
 
 171#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
 172#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
 173#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
 174#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
 175
 176#define SDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
 177				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 
 178				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 179
 180#define SDMA_DMA_DIRECTIONS	(BIT(DMA_DEV_TO_MEM) | \
 181				 BIT(DMA_MEM_TO_DEV) | \
 182				 BIT(DMA_DEV_TO_DEV))
 183
 184#define SDMA_WATERMARK_LEVEL_N_FIFOS	GENMASK(15, 12)
 185#define SDMA_WATERMARK_LEVEL_OFF_FIFOS  GENMASK(19, 16)
 186#define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO   GENMASK(31, 28)
 187#define SDMA_WATERMARK_LEVEL_SW_DONE	BIT(23)
 188
 189#define SDMA_DONE0_CONFIG_DONE_SEL	BIT(7)
 190#define SDMA_DONE0_CONFIG_DONE_DIS	BIT(6)
 191
 192/*
 193 * struct sdma_script_start_addrs - SDMA script start pointers
 194 *
 195 * start addresses of the different functions in the physical
 196 * address space of the SDMA engine.
 197 */
 198struct sdma_script_start_addrs {
 199	s32 ap_2_ap_addr;
 200	s32 ap_2_bp_addr;
 201	s32 ap_2_ap_fixed_addr;
 202	s32 bp_2_ap_addr;
 203	s32 loopback_on_dsp_side_addr;
 204	s32 mcu_interrupt_only_addr;
 205	s32 firi_2_per_addr;
 206	s32 firi_2_mcu_addr;
 207	s32 per_2_firi_addr;
 208	s32 mcu_2_firi_addr;
 209	s32 uart_2_per_addr;
 210	s32 uart_2_mcu_addr;
 211	s32 per_2_app_addr;
 212	s32 mcu_2_app_addr;
 213	s32 per_2_per_addr;
 214	s32 uartsh_2_per_addr;
 215	s32 uartsh_2_mcu_addr;
 216	s32 per_2_shp_addr;
 217	s32 mcu_2_shp_addr;
 218	s32 ata_2_mcu_addr;
 219	s32 mcu_2_ata_addr;
 220	s32 app_2_per_addr;
 221	s32 app_2_mcu_addr;
 222	s32 shp_2_per_addr;
 223	s32 shp_2_mcu_addr;
 224	s32 mshc_2_mcu_addr;
 225	s32 mcu_2_mshc_addr;
 226	s32 spdif_2_mcu_addr;
 227	s32 mcu_2_spdif_addr;
 228	s32 asrc_2_mcu_addr;
 229	s32 ext_mem_2_ipu_addr;
 230	s32 descrambler_addr;
 231	s32 dptc_dvfs_addr;
 232	s32 utra_addr;
 233	s32 ram_code_start_addr;
 234	/* End of v1 array */
 235	s32 mcu_2_ssish_addr;
 236	s32 ssish_2_mcu_addr;
 237	s32 hdmi_dma_addr;
 238	/* End of v2 array */
 239	s32 zcanfd_2_mcu_addr;
 240	s32 zqspi_2_mcu_addr;
 241	s32 mcu_2_ecspi_addr;
 242	s32 mcu_2_sai_addr;
 243	s32 sai_2_mcu_addr;
 244	s32 uart_2_mcu_rom_addr;
 245	s32 uartsh_2_mcu_rom_addr;
 
 
 246	/* End of v3 array */
 247	s32 mcu_2_zqspi_addr;
 248	/* End of v4 array */
 
 249};
 250
 251/*
 252 * Mode/Count of data node descriptors - IPCv2
 253 */
 254struct sdma_mode_count {
 255#define SDMA_BD_MAX_CNT	0xffff
 256	u32 count   : 16; /* size of the buffer pointed by this BD */
 257	u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
 258	u32 command :  8; /* command mostly used for channel 0 */
 259};
 260
 261/*
 262 * Buffer descriptor
 263 */
 264struct sdma_buffer_descriptor {
 265	struct sdma_mode_count  mode;
 266	u32 buffer_addr;	/* address of the buffer described */
 267	u32 ext_buffer_addr;	/* extended buffer address */
 268} __attribute__ ((packed));
 269
 270/**
 271 * struct sdma_channel_control - Channel control Block
 272 *
 273 * @current_bd_ptr:	current buffer descriptor processed
 274 * @base_bd_ptr:	first element of buffer descriptor array
 275 * @unused:		padding. The SDMA engine expects an array of 128 byte
 276 *			control blocks
 277 */
 278struct sdma_channel_control {
 279	u32 current_bd_ptr;
 280	u32 base_bd_ptr;
 281	u32 unused[2];
 282} __attribute__ ((packed));
 283
 284/**
 285 * struct sdma_state_registers - SDMA context for a channel
 286 *
 287 * @pc:		program counter
 288 * @unused1:	unused
 289 * @t:		test bit: status of arithmetic & test instruction
 290 * @rpc:	return program counter
 291 * @unused0:	unused
 292 * @sf:		source fault while loading data
 293 * @spc:	loop start program counter
 294 * @unused2:	unused
 295 * @df:		destination fault while storing data
 296 * @epc:	loop end program counter
 297 * @lm:		loop mode
 298 */
 299struct sdma_state_registers {
 300	u32 pc     :14;
 301	u32 unused1: 1;
 302	u32 t      : 1;
 303	u32 rpc    :14;
 304	u32 unused0: 1;
 305	u32 sf     : 1;
 306	u32 spc    :14;
 307	u32 unused2: 1;
 308	u32 df     : 1;
 309	u32 epc    :14;
 310	u32 lm     : 2;
 311} __attribute__ ((packed));
 312
 313/**
 314 * struct sdma_context_data - sdma context specific to a channel
 315 *
 316 * @channel_state:	channel state bits
 317 * @gReg:		general registers
 318 * @mda:		burst dma destination address register
 319 * @msa:		burst dma source address register
 320 * @ms:			burst dma status register
 321 * @md:			burst dma data register
 322 * @pda:		peripheral dma destination address register
 323 * @psa:		peripheral dma source address register
 324 * @ps:			peripheral dma status register
 325 * @pd:			peripheral dma data register
 326 * @ca:			CRC polynomial register
 327 * @cs:			CRC accumulator register
 328 * @dda:		dedicated core destination address register
 329 * @dsa:		dedicated core source address register
 330 * @ds:			dedicated core status register
 331 * @dd:			dedicated core data register
 332 * @scratch0:		1st word of dedicated ram for context switch
 333 * @scratch1:		2nd word of dedicated ram for context switch
 334 * @scratch2:		3rd word of dedicated ram for context switch
 335 * @scratch3:		4th word of dedicated ram for context switch
 336 * @scratch4:		5th word of dedicated ram for context switch
 337 * @scratch5:		6th word of dedicated ram for context switch
 338 * @scratch6:		7th word of dedicated ram for context switch
 339 * @scratch7:		8th word of dedicated ram for context switch
 340 */
 341struct sdma_context_data {
 342	struct sdma_state_registers  channel_state;
 343	u32  gReg[8];
 344	u32  mda;
 345	u32  msa;
 346	u32  ms;
 347	u32  md;
 348	u32  pda;
 349	u32  psa;
 350	u32  ps;
 351	u32  pd;
 352	u32  ca;
 353	u32  cs;
 354	u32  dda;
 355	u32  dsa;
 356	u32  ds;
 357	u32  dd;
 358	u32  scratch0;
 359	u32  scratch1;
 360	u32  scratch2;
 361	u32  scratch3;
 362	u32  scratch4;
 363	u32  scratch5;
 364	u32  scratch6;
 365	u32  scratch7;
 366} __attribute__ ((packed));
 367
 368
 369struct sdma_engine;
 370
 371/**
 372 * struct sdma_desc - descriptor structor for one transfer
 373 * @vd:			descriptor for virt dma
 374 * @num_bd:		number of descriptors currently handling
 375 * @bd_phys:		physical address of bd
 376 * @buf_tail:		ID of the buffer that was processed
 377 * @buf_ptail:		ID of the previous buffer that was processed
 378 * @period_len:		period length, used in cyclic.
 379 * @chn_real_count:	the real count updated from bd->mode.count
 380 * @chn_count:		the transfer count set
 381 * @sdmac:		sdma_channel pointer
 382 * @bd:			pointer of allocate bd
 383 */
 384struct sdma_desc {
 385	struct virt_dma_desc	vd;
 386	unsigned int		num_bd;
 387	dma_addr_t		bd_phys;
 388	unsigned int		buf_tail;
 389	unsigned int		buf_ptail;
 390	unsigned int		period_len;
 391	unsigned int		chn_real_count;
 392	unsigned int		chn_count;
 393	struct sdma_channel	*sdmac;
 394	struct sdma_buffer_descriptor *bd;
 395};
 396
 397/**
 398 * struct sdma_channel - housekeeping for a SDMA channel
 399 *
 400 * @vc:			virt_dma base structure
 401 * @desc:		sdma description including vd and other special member
 402 * @sdma:		pointer to the SDMA engine for this channel
 403 * @channel:		the channel number, matches dmaengine chan_id + 1
 404 * @direction:		transfer type. Needed for setting SDMA script
 405 * @slave_config:	Slave configuration
 406 * @peripheral_type:	Peripheral type. Needed for setting SDMA script
 407 * @event_id0:		aka dma request line
 408 * @event_id1:		for channels that use 2 events
 409 * @word_size:		peripheral access size
 410 * @pc_from_device:	script address for those device_2_memory
 411 * @pc_to_device:	script address for those memory_2_device
 412 * @device_to_device:	script address for those device_2_device
 413 * @pc_to_pc:		script address for those memory_2_memory
 414 * @flags:		loop mode or not
 415 * @per_address:	peripheral source or destination address in common case
 416 *                      destination address in p_2_p case
 417 * @per_address2:	peripheral source address in p_2_p case
 418 * @event_mask:		event mask used in p_2_p script
 419 * @watermark_level:	value for gReg[7], some script will extend it from
 420 *			basic watermark such as p_2_p
 421 * @shp_addr:		value for gReg[6]
 422 * @per_addr:		value for gReg[2]
 423 * @status:		status of dma channel
 424 * @data:		specific sdma interface structure
 425 * @terminate_worker:	used to call back into terminate work function
 426 * @terminated:		terminated list
 427 * @is_ram_script:	flag for script in ram
 428 * @n_fifos_src:	number of source device fifos
 429 * @n_fifos_dst:	number of destination device fifos
 430 * @sw_done:		software done flag
 431 * @stride_fifos_src:	stride for source device FIFOs
 432 * @stride_fifos_dst:	stride for destination device FIFOs
 433 * @words_per_fifo:	copy number of words one time for one FIFO
 434 */
 435struct sdma_channel {
 436	struct virt_dma_chan		vc;
 437	struct sdma_desc		*desc;
 438	struct sdma_engine		*sdma;
 439	unsigned int			channel;
 440	enum dma_transfer_direction		direction;
 441	struct dma_slave_config		slave_config;
 442	enum sdma_peripheral_type	peripheral_type;
 443	unsigned int			event_id0;
 444	unsigned int			event_id1;
 445	enum dma_slave_buswidth		word_size;
 446	unsigned int			pc_from_device, pc_to_device;
 447	unsigned int			device_to_device;
 448	unsigned int                    pc_to_pc;
 449	unsigned long			flags;
 450	dma_addr_t			per_address, per_address2;
 451	unsigned long			event_mask[2];
 452	unsigned long			watermark_level;
 453	u32				shp_addr, per_addr;
 454	enum dma_status			status;
 455	struct imx_dma_data		data;
 456	struct work_struct		terminate_worker;
 457	struct list_head                terminated;
 458	bool				is_ram_script;
 459	unsigned int			n_fifos_src;
 460	unsigned int			n_fifos_dst;
 461	unsigned int			stride_fifos_src;
 462	unsigned int			stride_fifos_dst;
 463	unsigned int			words_per_fifo;
 464	bool				sw_done;
 465};
 466
 467#define IMX_DMA_SG_LOOP		BIT(0)
 468
 469#define MAX_DMA_CHANNELS 32
 470#define MXC_SDMA_DEFAULT_PRIORITY 1
 471#define MXC_SDMA_MIN_PRIORITY 1
 472#define MXC_SDMA_MAX_PRIORITY 7
 473
 474#define SDMA_FIRMWARE_MAGIC 0x414d4453
 475
 476/**
 477 * struct sdma_firmware_header - Layout of the firmware image
 478 *
 479 * @magic:		"SDMA"
 480 * @version_major:	increased whenever layout of struct
 481 *			sdma_script_start_addrs changes.
 482 * @version_minor:	firmware minor version (for binary compatible changes)
 483 * @script_addrs_start:	offset of struct sdma_script_start_addrs in this image
 484 * @num_script_addrs:	Number of script addresses in this image
 485 * @ram_code_start:	offset of SDMA ram image in this firmware image
 486 * @ram_code_size:	size of SDMA ram image
 487 */
 488struct sdma_firmware_header {
 489	u32	magic;
 490	u32	version_major;
 491	u32	version_minor;
 492	u32	script_addrs_start;
 493	u32	num_script_addrs;
 494	u32	ram_code_start;
 495	u32	ram_code_size;
 496};
 497
 498struct sdma_driver_data {
 499	int chnenbl0;
 500	int num_events;
 501	struct sdma_script_start_addrs	*script_addrs;
 502	bool check_ratio;
 503	/*
 504	 * ecspi ERR009165 fixed should be done in sdma script
 505	 * and it has been fixed in soc from i.mx6ul.
 506	 * please get more information from the below link:
 507	 * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
 508	 */
 509	bool ecspi_fixed;
 510};
 511
 512struct sdma_engine {
 513	struct device			*dev;
 514	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 515	struct sdma_channel_control	*channel_control;
 516	void __iomem			*regs;
 517	struct sdma_context_data	*context;
 518	dma_addr_t			context_phys;
 519	struct dma_device		dma_device;
 520	struct clk			*clk_ipg;
 521	struct clk			*clk_ahb;
 522	spinlock_t			channel_0_lock;
 523	u32				script_number;
 524	struct sdma_script_start_addrs	*script_addrs;
 525	const struct sdma_driver_data	*drvdata;
 526	u32				spba_start_addr;
 527	u32				spba_end_addr;
 528	unsigned int			irq;
 529	dma_addr_t			bd0_phys;
 530	struct sdma_buffer_descriptor	*bd0;
 531	/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
 532	bool				clk_ratio;
 533	bool                            fw_loaded;
 
 534};
 535
 536static int sdma_config_write(struct dma_chan *chan,
 537		       struct dma_slave_config *dmaengine_cfg,
 538		       enum dma_transfer_direction direction);
 539
 540static struct sdma_driver_data sdma_imx31 = {
 541	.chnenbl0 = SDMA_CHNENBL0_IMX31,
 542	.num_events = 32,
 543};
 544
 545static struct sdma_script_start_addrs sdma_script_imx25 = {
 546	.ap_2_ap_addr = 729,
 547	.uart_2_mcu_addr = 904,
 548	.per_2_app_addr = 1255,
 549	.mcu_2_app_addr = 834,
 550	.uartsh_2_mcu_addr = 1120,
 551	.per_2_shp_addr = 1329,
 552	.mcu_2_shp_addr = 1048,
 553	.ata_2_mcu_addr = 1560,
 554	.mcu_2_ata_addr = 1479,
 555	.app_2_per_addr = 1189,
 556	.app_2_mcu_addr = 770,
 557	.shp_2_per_addr = 1407,
 558	.shp_2_mcu_addr = 979,
 559};
 560
 561static struct sdma_driver_data sdma_imx25 = {
 562	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 563	.num_events = 48,
 564	.script_addrs = &sdma_script_imx25,
 565};
 566
 567static struct sdma_driver_data sdma_imx35 = {
 568	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 569	.num_events = 48,
 570};
 571
 572static struct sdma_script_start_addrs sdma_script_imx51 = {
 573	.ap_2_ap_addr = 642,
 574	.uart_2_mcu_addr = 817,
 575	.mcu_2_app_addr = 747,
 576	.mcu_2_shp_addr = 961,
 577	.ata_2_mcu_addr = 1473,
 578	.mcu_2_ata_addr = 1392,
 579	.app_2_per_addr = 1033,
 580	.app_2_mcu_addr = 683,
 581	.shp_2_per_addr = 1251,
 582	.shp_2_mcu_addr = 892,
 583};
 584
 585static struct sdma_driver_data sdma_imx51 = {
 586	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 587	.num_events = 48,
 588	.script_addrs = &sdma_script_imx51,
 589};
 590
 591static struct sdma_script_start_addrs sdma_script_imx53 = {
 592	.ap_2_ap_addr = 642,
 593	.app_2_mcu_addr = 683,
 594	.mcu_2_app_addr = 747,
 595	.uart_2_mcu_addr = 817,
 596	.shp_2_mcu_addr = 891,
 597	.mcu_2_shp_addr = 960,
 598	.uartsh_2_mcu_addr = 1032,
 599	.spdif_2_mcu_addr = 1100,
 600	.mcu_2_spdif_addr = 1134,
 601	.firi_2_mcu_addr = 1193,
 602	.mcu_2_firi_addr = 1290,
 603};
 604
 605static struct sdma_driver_data sdma_imx53 = {
 606	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 607	.num_events = 48,
 608	.script_addrs = &sdma_script_imx53,
 609};
 610
 611static struct sdma_script_start_addrs sdma_script_imx6q = {
 612	.ap_2_ap_addr = 642,
 613	.uart_2_mcu_addr = 817,
 614	.mcu_2_app_addr = 747,
 615	.per_2_per_addr = 6331,
 616	.uartsh_2_mcu_addr = 1032,
 617	.mcu_2_shp_addr = 960,
 618	.app_2_mcu_addr = 683,
 619	.shp_2_mcu_addr = 891,
 620	.spdif_2_mcu_addr = 1100,
 621	.mcu_2_spdif_addr = 1134,
 622};
 623
 624static struct sdma_driver_data sdma_imx6q = {
 625	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 626	.num_events = 48,
 627	.script_addrs = &sdma_script_imx6q,
 628};
 629
 630static struct sdma_driver_data sdma_imx6ul = {
 631	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 632	.num_events = 48,
 633	.script_addrs = &sdma_script_imx6q,
 634	.ecspi_fixed = true,
 635};
 636
 637static struct sdma_script_start_addrs sdma_script_imx7d = {
 638	.ap_2_ap_addr = 644,
 639	.uart_2_mcu_addr = 819,
 640	.mcu_2_app_addr = 749,
 641	.uartsh_2_mcu_addr = 1034,
 642	.mcu_2_shp_addr = 962,
 643	.app_2_mcu_addr = 685,
 644	.shp_2_mcu_addr = 893,
 645	.spdif_2_mcu_addr = 1102,
 646	.mcu_2_spdif_addr = 1136,
 647};
 648
 649static struct sdma_driver_data sdma_imx7d = {
 650	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 651	.num_events = 48,
 652	.script_addrs = &sdma_script_imx7d,
 653};
 654
 655static struct sdma_driver_data sdma_imx8mq = {
 656	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 657	.num_events = 48,
 658	.script_addrs = &sdma_script_imx7d,
 659	.check_ratio = 1,
 660};
 661
 662static const struct of_device_id sdma_dt_ids[] = {
 663	{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
 664	{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
 665	{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
 666	{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
 667	{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
 668	{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
 669	{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
 670	{ .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, },
 671	{ .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
 672	{ /* sentinel */ }
 673};
 674MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 675
 676#define SDMA_H_CONFIG_DSPDMA	BIT(12) /* indicates if the DSPDMA is used */
 677#define SDMA_H_CONFIG_RTD_PINS	BIT(11) /* indicates if Real-Time Debug pins are enabled */
 678#define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
 679#define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
 680
 681static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 682{
 683	u32 chnenbl0 = sdma->drvdata->chnenbl0;
 684	return chnenbl0 + event * 4;
 685}
 686
 687static int sdma_config_ownership(struct sdma_channel *sdmac,
 688		bool event_override, bool mcu_override, bool dsp_override)
 689{
 690	struct sdma_engine *sdma = sdmac->sdma;
 691	int channel = sdmac->channel;
 692	unsigned long evt, mcu, dsp;
 693
 694	if (event_override && mcu_override && dsp_override)
 695		return -EINVAL;
 696
 697	evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
 698	mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
 699	dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
 700
 701	if (dsp_override)
 702		__clear_bit(channel, &dsp);
 703	else
 704		__set_bit(channel, &dsp);
 705
 706	if (event_override)
 707		__clear_bit(channel, &evt);
 708	else
 709		__set_bit(channel, &evt);
 710
 711	if (mcu_override)
 712		__clear_bit(channel, &mcu);
 713	else
 714		__set_bit(channel, &mcu);
 715
 716	writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
 717	writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
 718	writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
 719
 720	return 0;
 721}
 722
 723static int is_sdma_channel_enabled(struct sdma_engine *sdma, int channel)
 724{
 725	return !!(readl(sdma->regs + SDMA_H_STATSTOP) & BIT(channel));
 726}
 727
 728static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 729{
 730	writel(BIT(channel), sdma->regs + SDMA_H_START);
 731}
 732
 733/*
 734 * sdma_run_channel0 - run a channel and wait till it's done
 735 */
 736static int sdma_run_channel0(struct sdma_engine *sdma)
 737{
 738	int ret;
 739	u32 reg;
 740
 741	sdma_enable_channel(sdma, 0);
 742
 743	ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
 744						reg, !(reg & 1), 1, 500);
 745	if (ret)
 746		dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
 747
 748	/* Set bits of CONFIG register with dynamic context switching */
 749	reg = readl(sdma->regs + SDMA_H_CONFIG);
 750	if ((reg & SDMA_H_CONFIG_CSM) == 0) {
 751		reg |= SDMA_H_CONFIG_CSM;
 752		writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
 753	}
 754
 755	return ret;
 756}
 757
 758static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 759		u32 address)
 760{
 761	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
 762	void *buf_virt;
 763	dma_addr_t buf_phys;
 764	int ret;
 765	unsigned long flags;
 766
 767	buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
 768	if (!buf_virt)
 769		return -ENOMEM;
 770
 771	spin_lock_irqsave(&sdma->channel_0_lock, flags);
 772
 773	bd0->mode.command = C0_SETPM;
 774	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
 775	bd0->mode.count = size / 2;
 776	bd0->buffer_addr = buf_phys;
 777	bd0->ext_buffer_addr = address;
 778
 779	memcpy(buf_virt, buf, size);
 780
 781	ret = sdma_run_channel0(sdma);
 782
 783	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 784
 785	dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
 786
 787	return ret;
 788}
 789
 790static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 791{
 792	struct sdma_engine *sdma = sdmac->sdma;
 793	int channel = sdmac->channel;
 794	unsigned long val;
 795	u32 chnenbl = chnenbl_ofs(sdma, event);
 796
 797	val = readl_relaxed(sdma->regs + chnenbl);
 798	__set_bit(channel, &val);
 799	writel_relaxed(val, sdma->regs + chnenbl);
 800
 801	/* Set SDMA_DONEx_CONFIG is sw_done enabled */
 802	if (sdmac->sw_done) {
 803		val = readl_relaxed(sdma->regs + SDMA_DONE0_CONFIG);
 804		val |= SDMA_DONE0_CONFIG_DONE_SEL;
 805		val &= ~SDMA_DONE0_CONFIG_DONE_DIS;
 806		writel_relaxed(val, sdma->regs + SDMA_DONE0_CONFIG);
 807	}
 808}
 809
 810static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 811{
 812	struct sdma_engine *sdma = sdmac->sdma;
 813	int channel = sdmac->channel;
 814	u32 chnenbl = chnenbl_ofs(sdma, event);
 815	unsigned long val;
 816
 817	val = readl_relaxed(sdma->regs + chnenbl);
 818	__clear_bit(channel, &val);
 819	writel_relaxed(val, sdma->regs + chnenbl);
 820}
 821
 822static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
 823{
 824	return container_of(t, struct sdma_desc, vd.tx);
 825}
 826
 827static void sdma_start_desc(struct sdma_channel *sdmac)
 828{
 829	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
 830	struct sdma_desc *desc;
 831	struct sdma_engine *sdma = sdmac->sdma;
 832	int channel = sdmac->channel;
 833
 834	if (!vd) {
 835		sdmac->desc = NULL;
 836		return;
 837	}
 838	sdmac->desc = desc = to_sdma_desc(&vd->tx);
 839
 840	list_del(&vd->node);
 841
 842	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
 843	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
 844	sdma_enable_channel(sdma, sdmac->channel);
 845}
 846
 847static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 848{
 849	struct sdma_buffer_descriptor *bd;
 850	int error = 0;
 851	enum dma_status	old_status = sdmac->status;
 852
 853	/*
 854	 * loop mode. Iterate over descriptors, re-setup them and
 855	 * call callback function.
 856	 */
 857	while (sdmac->desc) {
 858		struct sdma_desc *desc = sdmac->desc;
 859
 860		bd = &desc->bd[desc->buf_tail];
 861
 862		if (bd->mode.status & BD_DONE)
 863			break;
 864
 865		if (bd->mode.status & BD_RROR) {
 866			bd->mode.status &= ~BD_RROR;
 867			sdmac->status = DMA_ERROR;
 868			error = -EIO;
 869		}
 870
 871	       /*
 872		* We use bd->mode.count to calculate the residue, since contains
 873		* the number of bytes present in the current buffer descriptor.
 874		*/
 875
 876		desc->chn_real_count = bd->mode.count;
 877		bd->mode.count = desc->period_len;
 878		desc->buf_ptail = desc->buf_tail;
 879		desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
 880
 881		/*
 882		 * The callback is called from the interrupt context in order
 883		 * to reduce latency and to avoid the risk of altering the
 884		 * SDMA transaction status by the time the client tasklet is
 885		 * executed.
 886		 */
 887		spin_unlock(&sdmac->vc.lock);
 888		dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
 889		spin_lock(&sdmac->vc.lock);
 890
 891		/* Assign buffer ownership to SDMA */
 892		bd->mode.status |= BD_DONE;
 893
 894		if (error)
 895			sdmac->status = old_status;
 896	}
 897
 898	/*
 899	 * SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
 900	 * owned buffer is available (i.e. BD_DONE was set too late).
 901	 */
 902	if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
 903		dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
 904		sdma_enable_channel(sdmac->sdma, sdmac->channel);
 905	}
 906}
 907
 908static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
 909{
 910	struct sdma_channel *sdmac = (struct sdma_channel *) data;
 911	struct sdma_buffer_descriptor *bd;
 912	int i, error = 0;
 913
 914	sdmac->desc->chn_real_count = 0;
 915	/*
 916	 * non loop mode. Iterate over all descriptors, collect
 917	 * errors and call callback function
 918	 */
 919	for (i = 0; i < sdmac->desc->num_bd; i++) {
 920		bd = &sdmac->desc->bd[i];
 921
 922		if (bd->mode.status & (BD_DONE | BD_RROR))
 923			error = -EIO;
 924		sdmac->desc->chn_real_count += bd->mode.count;
 925	}
 926
 927	if (error)
 928		sdmac->status = DMA_ERROR;
 929	else
 930		sdmac->status = DMA_COMPLETE;
 931}
 932
 933static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 934{
 935	struct sdma_engine *sdma = dev_id;
 936	unsigned long stat;
 937
 938	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
 939	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 940	/* channel 0 is special and not handled here, see run_channel0() */
 941	stat &= ~1;
 942
 943	while (stat) {
 944		int channel = fls(stat) - 1;
 945		struct sdma_channel *sdmac = &sdma->channel[channel];
 946		struct sdma_desc *desc;
 947
 948		spin_lock(&sdmac->vc.lock);
 949		desc = sdmac->desc;
 950		if (desc) {
 951			if (sdmac->flags & IMX_DMA_SG_LOOP) {
 952				if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
 953					sdma_update_channel_loop(sdmac);
 954				else
 955					vchan_cyclic_callback(&desc->vd);
 956			} else {
 957				mxc_sdma_handle_channel_normal(sdmac);
 958				vchan_cookie_complete(&desc->vd);
 959				sdma_start_desc(sdmac);
 960			}
 961		}
 962
 963		spin_unlock(&sdmac->vc.lock);
 964		__clear_bit(channel, &stat);
 965	}
 966
 967	return IRQ_HANDLED;
 968}
 969
 970/*
 971 * sets the pc of SDMA script according to the peripheral type
 972 */
 973static int sdma_get_pc(struct sdma_channel *sdmac,
 974		enum sdma_peripheral_type peripheral_type)
 975{
 976	struct sdma_engine *sdma = sdmac->sdma;
 977	int per_2_emi = 0, emi_2_per = 0;
 978	/*
 979	 * These are needed once we start to support transfers between
 980	 * two peripherals or memory-to-memory transfers
 981	 */
 982	int per_2_per = 0, emi_2_emi = 0;
 983
 984	sdmac->pc_from_device = 0;
 985	sdmac->pc_to_device = 0;
 986	sdmac->device_to_device = 0;
 987	sdmac->pc_to_pc = 0;
 988	sdmac->is_ram_script = false;
 989
 990	switch (peripheral_type) {
 991	case IMX_DMATYPE_MEMORY:
 992		emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
 993		break;
 994	case IMX_DMATYPE_DSP:
 995		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
 996		per_2_emi = sdma->script_addrs->ap_2_bp_addr;
 997		break;
 998	case IMX_DMATYPE_FIRI:
 999		per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
1000		emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
1001		break;
1002	case IMX_DMATYPE_UART:
1003		per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
1004		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1005		break;
1006	case IMX_DMATYPE_UART_SP:
1007		per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
1008		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1009		break;
1010	case IMX_DMATYPE_ATA:
1011		per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
1012		emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
1013		break;
1014	case IMX_DMATYPE_CSPI:
1015		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
1016
1017		/* Use rom script mcu_2_app if ERR009165 fixed */
1018		if (sdmac->sdma->drvdata->ecspi_fixed) {
1019			emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1020		} else {
1021			emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr;
1022			sdmac->is_ram_script = true;
1023		}
1024
1025		break;
1026	case IMX_DMATYPE_EXT:
1027	case IMX_DMATYPE_SSI:
1028	case IMX_DMATYPE_SAI:
1029		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
1030		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1031		break;
1032	case IMX_DMATYPE_SSI_DUAL:
1033		per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
1034		emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
1035		sdmac->is_ram_script = true;
1036		break;
1037	case IMX_DMATYPE_SSI_SP:
1038	case IMX_DMATYPE_MMC:
1039	case IMX_DMATYPE_SDHC:
1040	case IMX_DMATYPE_CSPI_SP:
1041	case IMX_DMATYPE_ESAI:
1042	case IMX_DMATYPE_MSHC_SP:
1043		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
1044		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1045		break;
1046	case IMX_DMATYPE_ASRC:
1047		per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
1048		emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
1049		per_2_per = sdma->script_addrs->per_2_per_addr;
1050		sdmac->is_ram_script = true;
1051		break;
1052	case IMX_DMATYPE_ASRC_SP:
1053		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
1054		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1055		per_2_per = sdma->script_addrs->per_2_per_addr;
1056		break;
1057	case IMX_DMATYPE_MSHC:
1058		per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
1059		emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
1060		break;
1061	case IMX_DMATYPE_CCM:
1062		per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
1063		break;
1064	case IMX_DMATYPE_SPDIF:
1065		per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
1066		emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
1067		break;
1068	case IMX_DMATYPE_IPU_MEMORY:
1069		emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
1070		break;
1071	case IMX_DMATYPE_MULTI_SAI:
1072		per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
1073		emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
1074		break;
 
 
 
 
 
1075	case IMX_DMATYPE_HDMI:
1076		emi_2_per = sdma->script_addrs->hdmi_dma_addr;
1077		sdmac->is_ram_script = true;
1078		break;
1079	default:
1080		dev_err(sdma->dev, "Unsupported transfer type %d\n",
1081			peripheral_type);
1082		return -EINVAL;
1083	}
1084
1085	sdmac->pc_from_device = per_2_emi;
1086	sdmac->pc_to_device = emi_2_per;
1087	sdmac->device_to_device = per_2_per;
1088	sdmac->pc_to_pc = emi_2_emi;
1089
1090	return 0;
1091}
1092
1093static int sdma_load_context(struct sdma_channel *sdmac)
1094{
1095	struct sdma_engine *sdma = sdmac->sdma;
1096	int channel = sdmac->channel;
1097	int load_address;
1098	struct sdma_context_data *context = sdma->context;
1099	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
1100	int ret;
1101	unsigned long flags;
1102
1103	if (sdmac->direction == DMA_DEV_TO_MEM)
1104		load_address = sdmac->pc_from_device;
1105	else if (sdmac->direction == DMA_DEV_TO_DEV)
1106		load_address = sdmac->device_to_device;
1107	else if (sdmac->direction == DMA_MEM_TO_MEM)
1108		load_address = sdmac->pc_to_pc;
1109	else
1110		load_address = sdmac->pc_to_device;
1111
1112	if (load_address < 0)
1113		return load_address;
1114
1115	dev_dbg(sdma->dev, "load_address = %d\n", load_address);
1116	dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1117	dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
1118	dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
1119	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
1120	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1121
1122	spin_lock_irqsave(&sdma->channel_0_lock, flags);
1123
1124	memset(context, 0, sizeof(*context));
1125	context->channel_state.pc = load_address;
1126
1127	/* Send by context the event mask,base address for peripheral
1128	 * and watermark level
1129	 */
1130	if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
1131		context->gReg[4] = sdmac->per_addr;
1132		context->gReg[6] = sdmac->shp_addr;
1133	} else {
1134		context->gReg[0] = sdmac->event_mask[1];
1135		context->gReg[1] = sdmac->event_mask[0];
1136		context->gReg[2] = sdmac->per_addr;
1137		context->gReg[6] = sdmac->shp_addr;
1138		context->gReg[7] = sdmac->watermark_level;
1139	}
1140
1141	bd0->mode.command = C0_SETDM;
1142	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
1143	bd0->mode.count = sizeof(*context) / 4;
1144	bd0->buffer_addr = sdma->context_phys;
1145	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
1146	ret = sdma_run_channel0(sdma);
1147
1148	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1149
1150	return ret;
1151}
1152
1153static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
1154{
1155	return container_of(chan, struct sdma_channel, vc.chan);
1156}
1157
1158static int sdma_disable_channel(struct dma_chan *chan)
1159{
1160	struct sdma_channel *sdmac = to_sdma_chan(chan);
1161	struct sdma_engine *sdma = sdmac->sdma;
1162	int channel = sdmac->channel;
1163
1164	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
1165	sdmac->status = DMA_ERROR;
1166
1167	return 0;
1168}
1169static void sdma_channel_terminate_work(struct work_struct *work)
1170{
1171	struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1172						  terminate_worker);
1173	/*
1174	 * According to NXP R&D team a delay of one BD SDMA cost time
1175	 * (maximum is 1ms) should be added after disable of the channel
1176	 * bit, to ensure SDMA core has really been stopped after SDMA
1177	 * clients call .device_terminate_all.
1178	 */
1179	usleep_range(1000, 2000);
1180
1181	vchan_dma_desc_free_list(&sdmac->vc, &sdmac->terminated);
1182}
1183
1184static int sdma_terminate_all(struct dma_chan *chan)
1185{
1186	struct sdma_channel *sdmac = to_sdma_chan(chan);
1187	unsigned long flags;
1188
1189	spin_lock_irqsave(&sdmac->vc.lock, flags);
1190
1191	sdma_disable_channel(chan);
1192
1193	if (sdmac->desc) {
1194		vchan_terminate_vdesc(&sdmac->desc->vd);
1195		/*
1196		 * move out current descriptor into terminated list so that
1197		 * it could be free in sdma_channel_terminate_work alone
1198		 * later without potential involving next descriptor raised
1199		 * up before the last descriptor terminated.
1200		 */
1201		vchan_get_all_descriptors(&sdmac->vc, &sdmac->terminated);
1202		sdmac->desc = NULL;
1203		schedule_work(&sdmac->terminate_worker);
1204	}
1205
1206	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1207
1208	return 0;
1209}
1210
1211static void sdma_channel_synchronize(struct dma_chan *chan)
1212{
1213	struct sdma_channel *sdmac = to_sdma_chan(chan);
1214
1215	vchan_synchronize(&sdmac->vc);
1216
1217	flush_work(&sdmac->terminate_worker);
1218}
1219
1220static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1221{
1222	struct sdma_engine *sdma = sdmac->sdma;
1223
1224	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1225	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1226
1227	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1228	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1229
1230	if (sdmac->event_id0 > 31)
1231		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1232
1233	if (sdmac->event_id1 > 31)
1234		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1235
1236	/*
1237	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
1238	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
1239	 * r0(event_mask[1]) and r1(event_mask[0]).
1240	 */
1241	if (lwml > hwml) {
1242		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1243						SDMA_WATERMARK_LEVEL_HWML);
1244		sdmac->watermark_level |= hwml;
1245		sdmac->watermark_level |= lwml << 16;
1246		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1247	}
1248
1249	if (sdmac->per_address2 >= sdma->spba_start_addr &&
1250			sdmac->per_address2 <= sdma->spba_end_addr)
1251		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1252
1253	if (sdmac->per_address >= sdma->spba_start_addr &&
1254			sdmac->per_address <= sdma->spba_end_addr)
1255		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1256
1257	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
 
 
 
 
 
 
 
 
 
 
1258}
1259
1260static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
1261{
1262	unsigned int n_fifos;
1263	unsigned int stride_fifos;
1264	unsigned int words_per_fifo;
1265
1266	if (sdmac->sw_done)
1267		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
1268
1269	if (sdmac->direction == DMA_DEV_TO_MEM) {
1270		n_fifos = sdmac->n_fifos_src;
1271		stride_fifos = sdmac->stride_fifos_src;
1272	} else {
1273		n_fifos = sdmac->n_fifos_dst;
1274		stride_fifos = sdmac->stride_fifos_dst;
1275	}
1276
1277	words_per_fifo = sdmac->words_per_fifo;
1278
1279	sdmac->watermark_level |=
1280			FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
1281	sdmac->watermark_level |=
1282			FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
1283	if (words_per_fifo)
1284		sdmac->watermark_level |=
1285			FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
1286}
1287
1288static int sdma_config_channel(struct dma_chan *chan)
1289{
1290	struct sdma_channel *sdmac = to_sdma_chan(chan);
1291	int ret;
1292
1293	sdma_disable_channel(chan);
1294
1295	sdmac->event_mask[0] = 0;
1296	sdmac->event_mask[1] = 0;
1297	sdmac->shp_addr = 0;
1298	sdmac->per_addr = 0;
1299
1300	switch (sdmac->peripheral_type) {
1301	case IMX_DMATYPE_DSP:
1302		sdma_config_ownership(sdmac, false, true, true);
1303		break;
1304	case IMX_DMATYPE_MEMORY:
1305		sdma_config_ownership(sdmac, false, true, false);
1306		break;
1307	default:
1308		sdma_config_ownership(sdmac, true, true, false);
1309		break;
1310	}
1311
1312	ret = sdma_get_pc(sdmac, sdmac->peripheral_type);
1313	if (ret)
1314		return ret;
1315
1316	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1317			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1318		/* Handle multiple event channels differently */
1319		if (sdmac->event_id1) {
1320			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1321			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1322				sdma_set_watermarklevel_for_p2p(sdmac);
1323		} else {
1324			if (sdmac->peripheral_type ==
1325					IMX_DMATYPE_MULTI_SAI)
1326				sdma_set_watermarklevel_for_sais(sdmac);
1327
1328			__set_bit(sdmac->event_id0, sdmac->event_mask);
1329		}
1330
1331		/* Address */
1332		sdmac->shp_addr = sdmac->per_address;
1333		sdmac->per_addr = sdmac->per_address2;
1334	} else {
1335		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1336	}
1337
1338	return 0;
1339}
1340
1341static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1342				     unsigned int priority)
1343{
1344	struct sdma_engine *sdma = sdmac->sdma;
1345	int channel = sdmac->channel;
1346
1347	if (priority < MXC_SDMA_MIN_PRIORITY
1348	    || priority > MXC_SDMA_MAX_PRIORITY) {
1349		return -EINVAL;
1350	}
1351
1352	writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1353
1354	return 0;
1355}
1356
1357static int sdma_request_channel0(struct sdma_engine *sdma)
1358{
1359	int ret = -EBUSY;
1360
1361	sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
1362				       GFP_NOWAIT);
 
 
 
 
 
 
1363	if (!sdma->bd0) {
1364		ret = -ENOMEM;
1365		goto out;
1366	}
1367
1368	sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1369	sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
1370
1371	sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1372	return 0;
1373out:
1374
1375	return ret;
1376}
1377
1378
1379static int sdma_alloc_bd(struct sdma_desc *desc)
1380{
1381	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
 
1382	int ret = 0;
1383
1384	desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
1385				      &desc->bd_phys, GFP_NOWAIT);
 
 
 
1386	if (!desc->bd) {
1387		ret = -ENOMEM;
1388		goto out;
1389	}
1390out:
1391	return ret;
1392}
1393
1394static void sdma_free_bd(struct sdma_desc *desc)
1395{
1396	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
 
1397
1398	dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
1399			  desc->bd_phys);
 
 
1400}
1401
1402static void sdma_desc_free(struct virt_dma_desc *vd)
1403{
1404	struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1405
1406	sdma_free_bd(desc);
1407	kfree(desc);
1408}
1409
1410static int sdma_alloc_chan_resources(struct dma_chan *chan)
1411{
1412	struct sdma_channel *sdmac = to_sdma_chan(chan);
1413	struct imx_dma_data *data = chan->private;
1414	struct imx_dma_data mem_data;
1415	int prio, ret;
1416
1417	/*
1418	 * MEMCPY may never setup chan->private by filter function such as
1419	 * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
1420	 * Please note in any other slave case, you have to setup chan->private
1421	 * with 'struct imx_dma_data' in your own filter function if you want to
1422	 * request dma channel by dma_request_channel() rather than
1423	 * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
1424	 * to warn you to correct your filter function.
1425	 */
1426	if (!data) {
1427		dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1428		mem_data.priority = 2;
1429		mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1430		mem_data.dma_request = 0;
1431		mem_data.dma_request2 = 0;
1432		data = &mem_data;
1433
1434		ret = sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1435		if (ret)
1436			return ret;
1437	}
1438
1439	switch (data->priority) {
1440	case DMA_PRIO_HIGH:
1441		prio = 3;
1442		break;
1443	case DMA_PRIO_MEDIUM:
1444		prio = 2;
1445		break;
1446	case DMA_PRIO_LOW:
1447	default:
1448		prio = 1;
1449		break;
1450	}
1451
1452	sdmac->peripheral_type = data->peripheral_type;
1453	sdmac->event_id0 = data->dma_request;
1454	sdmac->event_id1 = data->dma_request2;
1455
1456	ret = clk_enable(sdmac->sdma->clk_ipg);
1457	if (ret)
1458		return ret;
1459	ret = clk_enable(sdmac->sdma->clk_ahb);
1460	if (ret)
1461		goto disable_clk_ipg;
1462
1463	ret = sdma_set_channel_priority(sdmac, prio);
1464	if (ret)
1465		goto disable_clk_ahb;
1466
1467	return 0;
1468
1469disable_clk_ahb:
1470	clk_disable(sdmac->sdma->clk_ahb);
1471disable_clk_ipg:
1472	clk_disable(sdmac->sdma->clk_ipg);
1473	return ret;
1474}
1475
1476static void sdma_free_chan_resources(struct dma_chan *chan)
1477{
1478	struct sdma_channel *sdmac = to_sdma_chan(chan);
1479	struct sdma_engine *sdma = sdmac->sdma;
1480
1481	sdma_terminate_all(chan);
1482
1483	sdma_channel_synchronize(chan);
1484
1485	sdma_event_disable(sdmac, sdmac->event_id0);
1486	if (sdmac->event_id1)
1487		sdma_event_disable(sdmac, sdmac->event_id1);
1488
1489	sdmac->event_id0 = 0;
1490	sdmac->event_id1 = 0;
1491
1492	sdma_set_channel_priority(sdmac, 0);
1493
1494	clk_disable(sdma->clk_ipg);
1495	clk_disable(sdma->clk_ahb);
1496}
1497
1498static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1499				enum dma_transfer_direction direction, u32 bds)
1500{
1501	struct sdma_desc *desc;
1502
1503	if (!sdmac->sdma->fw_loaded && sdmac->is_ram_script) {
1504		dev_warn_once(sdmac->sdma->dev, "sdma firmware not ready!\n");
1505		goto err_out;
1506	}
1507
1508	desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1509	if (!desc)
1510		goto err_out;
1511
1512	sdmac->status = DMA_IN_PROGRESS;
1513	sdmac->direction = direction;
1514	sdmac->flags = 0;
1515
1516	desc->chn_count = 0;
1517	desc->chn_real_count = 0;
1518	desc->buf_tail = 0;
1519	desc->buf_ptail = 0;
1520	desc->sdmac = sdmac;
1521	desc->num_bd = bds;
1522
1523	if (bds && sdma_alloc_bd(desc))
1524		goto err_desc_out;
1525
1526	/* No slave_config called in MEMCPY case, so do here */
1527	if (direction == DMA_MEM_TO_MEM)
1528		sdma_config_ownership(sdmac, false, true, false);
1529
1530	if (sdma_load_context(sdmac))
1531		goto err_bd_out;
1532
1533	return desc;
1534
1535err_bd_out:
1536	sdma_free_bd(desc);
1537err_desc_out:
1538	kfree(desc);
1539err_out:
1540	return NULL;
1541}
1542
1543static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1544		struct dma_chan *chan, dma_addr_t dma_dst,
1545		dma_addr_t dma_src, size_t len, unsigned long flags)
1546{
1547	struct sdma_channel *sdmac = to_sdma_chan(chan);
1548	struct sdma_engine *sdma = sdmac->sdma;
1549	int channel = sdmac->channel;
1550	size_t count;
1551	int i = 0, param;
1552	struct sdma_buffer_descriptor *bd;
1553	struct sdma_desc *desc;
1554
1555	if (!chan || !len)
1556		return NULL;
1557
1558	dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1559		&dma_src, &dma_dst, len, channel);
1560
1561	desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1562					len / SDMA_BD_MAX_CNT + 1);
1563	if (!desc)
1564		return NULL;
1565
1566	do {
1567		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1568		bd = &desc->bd[i];
1569		bd->buffer_addr = dma_src;
1570		bd->ext_buffer_addr = dma_dst;
1571		bd->mode.count = count;
1572		desc->chn_count += count;
1573		bd->mode.command = 0;
1574
1575		dma_src += count;
1576		dma_dst += count;
1577		len -= count;
1578		i++;
1579
1580		param = BD_DONE | BD_EXTD | BD_CONT;
1581		/* last bd */
1582		if (!len) {
1583			param |= BD_INTR;
1584			param |= BD_LAST;
1585			param &= ~BD_CONT;
1586		}
1587
1588		dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1589				i, count, bd->buffer_addr,
1590				param & BD_WRAP ? "wrap" : "",
1591				param & BD_INTR ? " intr" : "");
1592
1593		bd->mode.status = param;
1594	} while (len);
1595
1596	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1597}
1598
1599static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1600		struct dma_chan *chan, struct scatterlist *sgl,
1601		unsigned int sg_len, enum dma_transfer_direction direction,
1602		unsigned long flags, void *context)
1603{
1604	struct sdma_channel *sdmac = to_sdma_chan(chan);
1605	struct sdma_engine *sdma = sdmac->sdma;
1606	int i, count;
1607	int channel = sdmac->channel;
1608	struct scatterlist *sg;
1609	struct sdma_desc *desc;
1610
1611	sdma_config_write(chan, &sdmac->slave_config, direction);
1612
1613	desc = sdma_transfer_init(sdmac, direction, sg_len);
1614	if (!desc)
1615		goto err_out;
1616
1617	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1618			sg_len, channel);
1619
1620	for_each_sg(sgl, sg, sg_len, i) {
1621		struct sdma_buffer_descriptor *bd = &desc->bd[i];
1622		int param;
1623
1624		bd->buffer_addr = sg->dma_address;
1625
1626		count = sg_dma_len(sg);
1627
1628		if (count > SDMA_BD_MAX_CNT) {
1629			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1630					channel, count, SDMA_BD_MAX_CNT);
1631			goto err_bd_out;
1632		}
1633
1634		bd->mode.count = count;
1635		desc->chn_count += count;
1636
1637		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1638			goto err_bd_out;
1639
1640		switch (sdmac->word_size) {
1641		case DMA_SLAVE_BUSWIDTH_4_BYTES:
1642			bd->mode.command = 0;
1643			if (count & 3 || sg->dma_address & 3)
1644				goto err_bd_out;
1645			break;
 
 
 
1646		case DMA_SLAVE_BUSWIDTH_2_BYTES:
1647			bd->mode.command = 2;
1648			if (count & 1 || sg->dma_address & 1)
1649				goto err_bd_out;
1650			break;
1651		case DMA_SLAVE_BUSWIDTH_1_BYTE:
1652			bd->mode.command = 1;
1653			break;
1654		default:
1655			goto err_bd_out;
1656		}
1657
1658		param = BD_DONE | BD_EXTD | BD_CONT;
1659
1660		if (i + 1 == sg_len) {
1661			param |= BD_INTR;
1662			param |= BD_LAST;
1663			param &= ~BD_CONT;
1664		}
1665
1666		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1667				i, count, (u64)sg->dma_address,
1668				param & BD_WRAP ? "wrap" : "",
1669				param & BD_INTR ? " intr" : "");
1670
1671		bd->mode.status = param;
1672	}
1673
1674	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1675err_bd_out:
1676	sdma_free_bd(desc);
1677	kfree(desc);
1678err_out:
1679	sdmac->status = DMA_ERROR;
1680	return NULL;
1681}
1682
1683static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1684		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1685		size_t period_len, enum dma_transfer_direction direction,
1686		unsigned long flags)
1687{
1688	struct sdma_channel *sdmac = to_sdma_chan(chan);
1689	struct sdma_engine *sdma = sdmac->sdma;
1690	int num_periods = 0;
1691	int channel = sdmac->channel;
1692	int i = 0, buf = 0;
1693	struct sdma_desc *desc;
1694
1695	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1696
1697	if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
1698		num_periods = buf_len / period_len;
1699
1700	sdma_config_write(chan, &sdmac->slave_config, direction);
1701
1702	desc = sdma_transfer_init(sdmac, direction, num_periods);
1703	if (!desc)
1704		goto err_out;
1705
1706	desc->period_len = period_len;
1707
1708	sdmac->flags |= IMX_DMA_SG_LOOP;
1709
1710	if (period_len > SDMA_BD_MAX_CNT) {
1711		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1712				channel, period_len, SDMA_BD_MAX_CNT);
1713		goto err_bd_out;
1714	}
1715
1716	if (sdmac->peripheral_type == IMX_DMATYPE_HDMI)
1717		return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1718
1719	while (buf < buf_len) {
1720		struct sdma_buffer_descriptor *bd = &desc->bd[i];
1721		int param;
1722
1723		bd->buffer_addr = dma_addr;
1724
1725		bd->mode.count = period_len;
1726
1727		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1728			goto err_bd_out;
1729		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1730			bd->mode.command = 0;
1731		else
1732			bd->mode.command = sdmac->word_size;
1733
1734		param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1735		if (i + 1 == num_periods)
1736			param |= BD_WRAP;
1737
1738		dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
1739				i, period_len, (u64)dma_addr,
1740				param & BD_WRAP ? "wrap" : "",
1741				param & BD_INTR ? " intr" : "");
1742
1743		bd->mode.status = param;
1744
1745		dma_addr += period_len;
1746		buf += period_len;
1747
1748		i++;
1749	}
1750
1751	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1752err_bd_out:
1753	sdma_free_bd(desc);
1754	kfree(desc);
1755err_out:
1756	sdmac->status = DMA_ERROR;
1757	return NULL;
1758}
1759
1760static int sdma_config_write(struct dma_chan *chan,
1761		       struct dma_slave_config *dmaengine_cfg,
1762		       enum dma_transfer_direction direction)
1763{
1764	struct sdma_channel *sdmac = to_sdma_chan(chan);
1765
1766	if (direction == DMA_DEV_TO_MEM) {
1767		sdmac->per_address = dmaengine_cfg->src_addr;
1768		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1769			dmaengine_cfg->src_addr_width;
1770		sdmac->word_size = dmaengine_cfg->src_addr_width;
1771	} else if (direction == DMA_DEV_TO_DEV) {
1772		sdmac->per_address2 = dmaengine_cfg->src_addr;
1773		sdmac->per_address = dmaengine_cfg->dst_addr;
1774		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1775			SDMA_WATERMARK_LEVEL_LWML;
1776		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1777			SDMA_WATERMARK_LEVEL_HWML;
1778		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1779	} else if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
1780		sdmac->per_address = dmaengine_cfg->dst_addr;
1781		sdmac->per_address2 = dmaengine_cfg->src_addr;
1782		sdmac->watermark_level = 0;
1783	} else {
1784		sdmac->per_address = dmaengine_cfg->dst_addr;
1785		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1786			dmaengine_cfg->dst_addr_width;
1787		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1788	}
1789	sdmac->direction = direction;
1790	return sdma_config_channel(chan);
1791}
1792
1793static int sdma_config(struct dma_chan *chan,
1794		       struct dma_slave_config *dmaengine_cfg)
1795{
1796	struct sdma_channel *sdmac = to_sdma_chan(chan);
1797	struct sdma_engine *sdma = sdmac->sdma;
1798
1799	memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1800
1801	if (dmaengine_cfg->peripheral_config) {
1802		struct sdma_peripheral_config *sdmacfg = dmaengine_cfg->peripheral_config;
1803		if (dmaengine_cfg->peripheral_size != sizeof(struct sdma_peripheral_config)) {
1804			dev_err(sdma->dev, "Invalid peripheral size %zu, expected %zu\n",
1805				dmaengine_cfg->peripheral_size,
1806				sizeof(struct sdma_peripheral_config));
1807			return -EINVAL;
1808		}
1809		sdmac->n_fifos_src = sdmacfg->n_fifos_src;
1810		sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
1811		sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
1812		sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
1813		sdmac->words_per_fifo = sdmacfg->words_per_fifo;
1814		sdmac->sw_done = sdmacfg->sw_done;
1815	}
1816
1817	/* Set ENBLn earlier to make sure dma request triggered after that */
1818	if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1819		return -EINVAL;
1820	sdma_event_enable(sdmac, sdmac->event_id0);
1821
1822	if (sdmac->event_id1) {
1823		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1824			return -EINVAL;
1825		sdma_event_enable(sdmac, sdmac->event_id1);
1826	}
1827
1828	return 0;
1829}
1830
1831static enum dma_status sdma_tx_status(struct dma_chan *chan,
1832				      dma_cookie_t cookie,
1833				      struct dma_tx_state *txstate)
1834{
1835	struct sdma_channel *sdmac = to_sdma_chan(chan);
1836	struct sdma_desc *desc = NULL;
1837	u32 residue;
1838	struct virt_dma_desc *vd;
1839	enum dma_status ret;
1840	unsigned long flags;
1841
1842	ret = dma_cookie_status(chan, cookie, txstate);
1843	if (ret == DMA_COMPLETE || !txstate)
1844		return ret;
1845
1846	spin_lock_irqsave(&sdmac->vc.lock, flags);
1847
1848	vd = vchan_find_desc(&sdmac->vc, cookie);
1849	if (vd)
1850		desc = to_sdma_desc(&vd->tx);
1851	else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
1852		desc = sdmac->desc;
1853
1854	if (desc) {
1855		if (sdmac->flags & IMX_DMA_SG_LOOP)
1856			residue = (desc->num_bd - desc->buf_ptail) *
1857				desc->period_len - desc->chn_real_count;
1858		else
1859			residue = desc->chn_count - desc->chn_real_count;
1860	} else {
1861		residue = 0;
1862	}
1863
1864	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1865
1866	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1867			 residue);
1868
1869	return sdmac->status;
1870}
1871
1872static void sdma_issue_pending(struct dma_chan *chan)
1873{
1874	struct sdma_channel *sdmac = to_sdma_chan(chan);
1875	unsigned long flags;
1876
1877	spin_lock_irqsave(&sdmac->vc.lock, flags);
1878	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1879		sdma_start_desc(sdmac);
1880	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1881}
1882
1883#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
1884#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2	38
1885#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3	45
1886#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4	46
 
 
 
 
 
 
 
1887
1888static void sdma_add_scripts(struct sdma_engine *sdma,
1889			     const struct sdma_script_start_addrs *addr)
1890{
1891	s32 *addr_arr = (u32 *)addr;
1892	s32 *saddr_arr = (u32 *)sdma->script_addrs;
1893	int i;
1894
1895	/* use the default firmware in ROM if missing external firmware */
1896	if (!sdma->script_number)
1897		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1898
1899	if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
1900				  / sizeof(s32)) {
1901		dev_err(sdma->dev,
1902			"SDMA script number %d not match with firmware.\n",
1903			sdma->script_number);
1904		return;
1905	}
1906
1907	for (i = 0; i < sdma->script_number; i++)
1908		if (addr_arr[i] > 0)
1909			saddr_arr[i] = addr_arr[i];
1910
1911	/*
1912	 * For compatibility with NXP internal legacy kernel before 4.19 which
1913	 * is based on uart ram script and mainline kernel based on uart rom
1914	 * script, both uart ram/rom scripts are present in newer sdma
1915	 * firmware. Use the rom versions if they are present (V3 or newer).
1916	 */
1917	if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
1918		if (addr->uart_2_mcu_rom_addr)
1919			sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
1920		if (addr->uartsh_2_mcu_rom_addr)
1921			sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
1922	}
1923}
1924
1925static void sdma_load_firmware(const struct firmware *fw, void *context)
1926{
1927	struct sdma_engine *sdma = context;
1928	const struct sdma_firmware_header *header;
1929	const struct sdma_script_start_addrs *addr;
1930	unsigned short *ram_code;
1931
1932	if (!fw) {
1933		dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1934		/* In this case we just use the ROM firmware. */
1935		return;
1936	}
1937
1938	if (fw->size < sizeof(*header))
1939		goto err_firmware;
1940
1941	header = (struct sdma_firmware_header *)fw->data;
1942
1943	if (header->magic != SDMA_FIRMWARE_MAGIC)
1944		goto err_firmware;
1945	if (header->ram_code_start + header->ram_code_size > fw->size)
1946		goto err_firmware;
1947	switch (header->version_major) {
1948	case 1:
1949		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1950		break;
1951	case 2:
1952		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1953		break;
1954	case 3:
1955		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1956		break;
1957	case 4:
1958		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1959		break;
1960	default:
1961		dev_err(sdma->dev, "unknown firmware version\n");
1962		goto err_firmware;
1963	}
1964
1965	addr = (void *)header + header->script_addrs_start;
1966	ram_code = (void *)header + header->ram_code_start;
1967
1968	clk_enable(sdma->clk_ipg);
1969	clk_enable(sdma->clk_ahb);
1970	/* download the RAM image for SDMA */
1971	sdma_load_script(sdma, ram_code,
1972			 header->ram_code_size,
1973			 addr->ram_code_start_addr);
1974	clk_disable(sdma->clk_ipg);
1975	clk_disable(sdma->clk_ahb);
1976
1977	sdma_add_scripts(sdma, addr);
1978
1979	sdma->fw_loaded = true;
1980
1981	dev_info(sdma->dev, "loaded firmware %d.%d\n",
1982		 header->version_major,
1983		 header->version_minor);
1984
1985err_firmware:
1986	release_firmware(fw);
1987}
1988
1989#define EVENT_REMAP_CELLS 3
1990
1991static int sdma_event_remap(struct sdma_engine *sdma)
1992{
1993	struct device_node *np = sdma->dev->of_node;
1994	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1995	struct property *event_remap;
1996	struct regmap *gpr;
1997	char propname[] = "fsl,sdma-event-remap";
1998	u32 reg, val, shift, num_map, i;
1999	int ret = 0;
2000
2001	if (IS_ERR(np) || !gpr_np)
2002		goto out;
2003
2004	event_remap = of_find_property(np, propname, NULL);
2005	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
2006	if (!num_map) {
2007		dev_dbg(sdma->dev, "no event needs to be remapped\n");
2008		goto out;
2009	} else if (num_map % EVENT_REMAP_CELLS) {
2010		dev_err(sdma->dev, "the property %s must modulo %d\n",
2011				propname, EVENT_REMAP_CELLS);
2012		ret = -EINVAL;
2013		goto out;
2014	}
2015
2016	gpr = syscon_node_to_regmap(gpr_np);
2017	if (IS_ERR(gpr)) {
2018		dev_err(sdma->dev, "failed to get gpr regmap\n");
2019		ret = PTR_ERR(gpr);
2020		goto out;
2021	}
2022
2023	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
2024		ret = of_property_read_u32_index(np, propname, i, &reg);
2025		if (ret) {
2026			dev_err(sdma->dev, "failed to read property %s index %d\n",
2027					propname, i);
2028			goto out;
2029		}
2030
2031		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
2032		if (ret) {
2033			dev_err(sdma->dev, "failed to read property %s index %d\n",
2034					propname, i + 1);
2035			goto out;
2036		}
2037
2038		ret = of_property_read_u32_index(np, propname, i + 2, &val);
2039		if (ret) {
2040			dev_err(sdma->dev, "failed to read property %s index %d\n",
2041					propname, i + 2);
2042			goto out;
2043		}
2044
2045		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
2046	}
2047
2048out:
2049	if (gpr_np)
2050		of_node_put(gpr_np);
2051
2052	return ret;
2053}
2054
2055static int sdma_get_firmware(struct sdma_engine *sdma,
2056		const char *fw_name)
2057{
2058	int ret;
2059
2060	ret = request_firmware_nowait(THIS_MODULE,
2061			FW_ACTION_UEVENT, fw_name, sdma->dev,
2062			GFP_KERNEL, sdma, sdma_load_firmware);
2063
2064	return ret;
2065}
2066
2067static int sdma_init(struct sdma_engine *sdma)
2068{
2069	int i, ret;
2070	dma_addr_t ccb_phys;
 
2071
2072	ret = clk_enable(sdma->clk_ipg);
2073	if (ret)
2074		return ret;
2075	ret = clk_enable(sdma->clk_ahb);
2076	if (ret)
2077		goto disable_clk_ipg;
2078
2079	if (sdma->drvdata->check_ratio &&
2080	    (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
2081		sdma->clk_ratio = 1;
2082
2083	/* Be sure SDMA has not started yet */
2084	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
2085
2086	sdma->channel_control = dma_alloc_coherent(sdma->dev,
2087			MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
2088			sizeof(struct sdma_context_data),
2089			&ccb_phys, GFP_KERNEL);
 
 
 
 
2090
2091	if (!sdma->channel_control) {
2092		ret = -ENOMEM;
2093		goto err_dma_alloc;
2094	}
2095
2096	sdma->context = (void *)sdma->channel_control +
2097		MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
2098	sdma->context_phys = ccb_phys +
2099		MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
2100
2101	/* disable all channels */
2102	for (i = 0; i < sdma->drvdata->num_events; i++)
2103		writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
2104
2105	/* All channels have priority 0 */
2106	for (i = 0; i < MAX_DMA_CHANNELS; i++)
2107		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
2108
2109	ret = sdma_request_channel0(sdma);
2110	if (ret)
2111		goto err_dma_alloc;
2112
2113	sdma_config_ownership(&sdma->channel[0], false, true, false);
2114
2115	/* Set Command Channel (Channel Zero) */
2116	writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
2117
2118	/* Set bits of CONFIG register but with static context switching */
2119	if (sdma->clk_ratio)
2120		writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
2121	else
2122		writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
2123
2124	writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
2125
2126	/* Initializes channel's priorities */
2127	sdma_set_channel_priority(&sdma->channel[0], 7);
2128
2129	clk_disable(sdma->clk_ipg);
2130	clk_disable(sdma->clk_ahb);
2131
2132	return 0;
2133
2134err_dma_alloc:
2135	clk_disable(sdma->clk_ahb);
2136disable_clk_ipg:
2137	clk_disable(sdma->clk_ipg);
2138	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
2139	return ret;
2140}
2141
2142static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
2143{
2144	struct sdma_channel *sdmac = to_sdma_chan(chan);
2145	struct imx_dma_data *data = fn_param;
2146
2147	if (!imx_dma_is_general_purpose(chan))
2148		return false;
2149
2150	sdmac->data = *data;
2151	chan->private = &sdmac->data;
2152
2153	return true;
2154}
2155
2156static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
2157				   struct of_dma *ofdma)
2158{
2159	struct sdma_engine *sdma = ofdma->of_dma_data;
2160	dma_cap_mask_t mask = sdma->dma_device.cap_mask;
2161	struct imx_dma_data data;
2162
2163	if (dma_spec->args_count != 3)
2164		return NULL;
2165
2166	data.dma_request = dma_spec->args[0];
2167	data.peripheral_type = dma_spec->args[1];
2168	data.priority = dma_spec->args[2];
2169	/*
2170	 * init dma_request2 to zero, which is not used by the dts.
2171	 * For P2P, dma_request2 is init from dma_request_channel(),
2172	 * chan->private will point to the imx_dma_data, and in
2173	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
2174	 * be set to sdmac->event_id1.
2175	 */
2176	data.dma_request2 = 0;
2177
2178	return __dma_request_channel(&mask, sdma_filter_fn, &data,
2179				     ofdma->of_node);
2180}
2181
2182static int sdma_probe(struct platform_device *pdev)
2183{
2184	struct device_node *np = pdev->dev.of_node;
2185	struct device_node *spba_bus;
2186	const char *fw_name;
2187	int ret;
2188	int irq;
2189	struct resource spba_res;
2190	int i;
2191	struct sdma_engine *sdma;
2192	s32 *saddr_arr;
2193
2194	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2195	if (ret)
2196		return ret;
2197
2198	sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
2199	if (!sdma)
2200		return -ENOMEM;
2201
2202	spin_lock_init(&sdma->channel_0_lock);
2203
2204	sdma->dev = &pdev->dev;
2205	sdma->drvdata = of_device_get_match_data(sdma->dev);
2206
2207	irq = platform_get_irq(pdev, 0);
2208	if (irq < 0)
2209		return irq;
2210
2211	sdma->regs = devm_platform_ioremap_resource(pdev, 0);
2212	if (IS_ERR(sdma->regs))
2213		return PTR_ERR(sdma->regs);
2214
2215	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2216	if (IS_ERR(sdma->clk_ipg))
2217		return PTR_ERR(sdma->clk_ipg);
2218
2219	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2220	if (IS_ERR(sdma->clk_ahb))
2221		return PTR_ERR(sdma->clk_ahb);
2222
2223	ret = clk_prepare(sdma->clk_ipg);
2224	if (ret)
2225		return ret;
2226
2227	ret = clk_prepare(sdma->clk_ahb);
2228	if (ret)
2229		goto err_clk;
2230
2231	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
2232				dev_name(&pdev->dev), sdma);
2233	if (ret)
2234		goto err_irq;
2235
2236	sdma->irq = irq;
2237
2238	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
2239	if (!sdma->script_addrs) {
2240		ret = -ENOMEM;
2241		goto err_irq;
2242	}
2243
2244	/* initially no scripts available */
2245	saddr_arr = (s32 *)sdma->script_addrs;
2246	for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++)
2247		saddr_arr[i] = -EINVAL;
2248
2249	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
2250	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
2251	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
2252	dma_cap_set(DMA_PRIVATE, sdma->dma_device.cap_mask);
2253
2254	INIT_LIST_HEAD(&sdma->dma_device.channels);
2255	/* Initialize channel parameters */
2256	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2257		struct sdma_channel *sdmac = &sdma->channel[i];
2258
2259		sdmac->sdma = sdma;
2260
2261		sdmac->channel = i;
2262		sdmac->vc.desc_free = sdma_desc_free;
2263		INIT_LIST_HEAD(&sdmac->terminated);
2264		INIT_WORK(&sdmac->terminate_worker,
2265				sdma_channel_terminate_work);
2266		/*
2267		 * Add the channel to the DMAC list. Do not add channel 0 though
2268		 * because we need it internally in the SDMA driver. This also means
2269		 * that channel 0 in dmaengine counting matches sdma channel 1.
2270		 */
2271		if (i)
2272			vchan_init(&sdmac->vc, &sdma->dma_device);
2273	}
2274
 
 
 
 
 
 
2275	ret = sdma_init(sdma);
2276	if (ret)
2277		goto err_init;
2278
2279	ret = sdma_event_remap(sdma);
2280	if (ret)
2281		goto err_init;
2282
2283	if (sdma->drvdata->script_addrs)
2284		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
2285
2286	sdma->dma_device.dev = &pdev->dev;
2287
2288	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2289	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
2290	sdma->dma_device.device_tx_status = sdma_tx_status;
2291	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
2292	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
2293	sdma->dma_device.device_config = sdma_config;
2294	sdma->dma_device.device_terminate_all = sdma_terminate_all;
2295	sdma->dma_device.device_synchronize = sdma_channel_synchronize;
2296	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
2297	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
2298	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
2299	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2300	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
2301	sdma->dma_device.device_issue_pending = sdma_issue_pending;
2302	sdma->dma_device.copy_align = 2;
2303	dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
2304
2305	platform_set_drvdata(pdev, sdma);
2306
2307	ret = dma_async_device_register(&sdma->dma_device);
2308	if (ret) {
2309		dev_err(&pdev->dev, "unable to register\n");
2310		goto err_init;
2311	}
2312
2313	if (np) {
2314		ret = of_dma_controller_register(np, sdma_xlate, sdma);
2315		if (ret) {
2316			dev_err(&pdev->dev, "failed to register controller\n");
2317			goto err_register;
2318		}
2319
2320		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
2321		ret = of_address_to_resource(spba_bus, 0, &spba_res);
2322		if (!ret) {
2323			sdma->spba_start_addr = spba_res.start;
2324			sdma->spba_end_addr = spba_res.end;
2325		}
2326		of_node_put(spba_bus);
2327	}
2328
2329	/*
2330	 * Because that device tree does not encode ROM script address,
2331	 * the RAM script in firmware is mandatory for device tree
2332	 * probe, otherwise it fails.
2333	 */
2334	ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2335				      &fw_name);
2336	if (ret) {
2337		dev_warn(&pdev->dev, "failed to get firmware name\n");
2338	} else {
2339		ret = sdma_get_firmware(sdma, fw_name);
2340		if (ret)
2341			dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2342	}
2343
2344	return 0;
2345
2346err_register:
2347	dma_async_device_unregister(&sdma->dma_device);
2348err_init:
2349	kfree(sdma->script_addrs);
2350err_irq:
2351	clk_unprepare(sdma->clk_ahb);
2352err_clk:
2353	clk_unprepare(sdma->clk_ipg);
2354	return ret;
2355}
2356
2357static void sdma_remove(struct platform_device *pdev)
2358{
2359	struct sdma_engine *sdma = platform_get_drvdata(pdev);
2360	int i;
2361
2362	devm_free_irq(&pdev->dev, sdma->irq, sdma);
2363	dma_async_device_unregister(&sdma->dma_device);
2364	kfree(sdma->script_addrs);
2365	clk_unprepare(sdma->clk_ahb);
2366	clk_unprepare(sdma->clk_ipg);
2367	/* Kill the tasklet */
2368	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2369		struct sdma_channel *sdmac = &sdma->channel[i];
2370
2371		tasklet_kill(&sdmac->vc.task);
2372		sdma_free_chan_resources(&sdmac->vc.chan);
2373	}
2374
2375	platform_set_drvdata(pdev, NULL);
2376}
2377
2378static struct platform_driver sdma_driver = {
2379	.driver		= {
2380		.name	= "imx-sdma",
2381		.of_match_table = sdma_dt_ids,
2382	},
2383	.remove_new	= sdma_remove,
2384	.probe		= sdma_probe,
2385};
2386
2387module_platform_driver(sdma_driver);
2388
2389MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2390MODULE_DESCRIPTION("i.MX SDMA driver");
2391#if IS_ENABLED(CONFIG_SOC_IMX6Q)
2392MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2393#endif
2394#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
2395MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2396#endif
2397MODULE_LICENSE("GPL");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// drivers/dma/imx-sdma.c
   4//
   5// This file contains a driver for the Freescale Smart DMA engine
   6//
   7// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   8//
   9// Based on code from Freescale:
  10//
  11// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
  12
  13#include <linux/init.h>
  14#include <linux/iopoll.h>
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/bitfield.h>
  18#include <linux/bitops.h>
  19#include <linux/mm.h>
  20#include <linux/interrupt.h>
  21#include <linux/clk.h>
  22#include <linux/delay.h>
  23#include <linux/sched.h>
  24#include <linux/semaphore.h>
  25#include <linux/spinlock.h>
  26#include <linux/device.h>
  27#include <linux/genalloc.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/firmware.h>
  30#include <linux/slab.h>
  31#include <linux/platform_device.h>
  32#include <linux/dmaengine.h>
  33#include <linux/of.h>
  34#include <linux/of_address.h>
  35#include <linux/of_dma.h>
  36#include <linux/workqueue.h>
  37
  38#include <asm/irq.h>
  39#include <linux/dma/imx-dma.h>
  40#include <linux/regmap.h>
  41#include <linux/mfd/syscon.h>
  42#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  43
  44#include "dmaengine.h"
  45#include "virt-dma.h"
  46
  47/* SDMA registers */
  48#define SDMA_H_C0PTR		0x000
  49#define SDMA_H_INTR		0x004
  50#define SDMA_H_STATSTOP		0x008
  51#define SDMA_H_START		0x00c
  52#define SDMA_H_EVTOVR		0x010
  53#define SDMA_H_DSPOVR		0x014
  54#define SDMA_H_HOSTOVR		0x018
  55#define SDMA_H_EVTPEND		0x01c
  56#define SDMA_H_DSPENBL		0x020
  57#define SDMA_H_RESET		0x024
  58#define SDMA_H_EVTERR		0x028
  59#define SDMA_H_INTRMSK		0x02c
  60#define SDMA_H_PSW		0x030
  61#define SDMA_H_EVTERRDBG	0x034
  62#define SDMA_H_CONFIG		0x038
  63#define SDMA_ONCE_ENB		0x040
  64#define SDMA_ONCE_DATA		0x044
  65#define SDMA_ONCE_INSTR		0x048
  66#define SDMA_ONCE_STAT		0x04c
  67#define SDMA_ONCE_CMD		0x050
  68#define SDMA_EVT_MIRROR		0x054
  69#define SDMA_ILLINSTADDR	0x058
  70#define SDMA_CHN0ADDR		0x05c
  71#define SDMA_ONCE_RTB		0x060
  72#define SDMA_XTRIG_CONF1	0x070
  73#define SDMA_XTRIG_CONF2	0x074
  74#define SDMA_CHNENBL0_IMX35	0x200
  75#define SDMA_CHNENBL0_IMX31	0x080
  76#define SDMA_CHNPRI_0		0x100
  77#define SDMA_DONE0_CONFIG	0x1000
  78
  79/*
  80 * Buffer descriptor status values.
  81 */
  82#define BD_DONE  0x01
  83#define BD_WRAP  0x02
  84#define BD_CONT  0x04
  85#define BD_INTR  0x08
  86#define BD_RROR  0x10
  87#define BD_LAST  0x20
  88#define BD_EXTD  0x80
  89
  90/*
  91 * Data Node descriptor status values.
  92 */
  93#define DND_END_OF_FRAME  0x80
  94#define DND_END_OF_XFER   0x40
  95#define DND_DONE          0x20
  96#define DND_UNUSED        0x01
  97
  98/*
  99 * IPCV2 descriptor status values.
 100 */
 101#define BD_IPCV2_END_OF_FRAME  0x40
 102
 103#define IPCV2_MAX_NODES        50
 104/*
 105 * Error bit set in the CCB status field by the SDMA,
 106 * in setbd routine, in case of a transfer error
 107 */
 108#define DATA_ERROR  0x10000000
 109
 110/*
 111 * Buffer descriptor commands.
 112 */
 113#define C0_ADDR             0x01
 114#define C0_LOAD             0x02
 115#define C0_DUMP             0x03
 116#define C0_SETCTX           0x07
 117#define C0_GETCTX           0x03
 118#define C0_SETDM            0x01
 119#define C0_SETPM            0x04
 120#define C0_GETDM            0x02
 121#define C0_GETPM            0x08
 122/*
 123 * Change endianness indicator in the BD command field
 124 */
 125#define CHANGE_ENDIANNESS   0x80
 126
 127/*
 128 *  p_2_p watermark_level description
 129 *	Bits		Name			Description
 130 *	0-7		Lower WML		Lower watermark level
 131 *	8		PS			1: Pad Swallowing
 132 *						0: No Pad Swallowing
 133 *	9		PA			1: Pad Adding
 134 *						0: No Pad Adding
 135 *	10		SPDIF			If this bit is set both source
 136 *						and destination are on SPBA
 137 *	11		Source Bit(SP)		1: Source on SPBA
 138 *						0: Source on AIPS
 139 *	12		Destination Bit(DP)	1: Destination on SPBA
 140 *						0: Destination on AIPS
 141 *	13		Source FIFO		1: Source is dual FIFO
 142 *						0: Source is single FIFO
 143 *	14		Destination FIFO	1: Destination is dual FIFO
 144 *						0: Destination is single FIFO
 145 *	15		---------		MUST BE 0
 146 *	16-23		Higher WML		HWML
 147 *	24-27		N			Total number of samples after
 148 *						which Pad adding/Swallowing
 149 *						must be done. It must be odd.
 150 *	28		Lower WML Event(LWE)	SDMA events reg to check for
 151 *						LWML event mask
 152 *						0: LWE in EVENTS register
 153 *						1: LWE in EVENTS2 register
 154 *	29		Higher WML Event(HWE)	SDMA events reg to check for
 155 *						HWML event mask
 156 *						0: HWE in EVENTS register
 157 *						1: HWE in EVENTS2 register
 158 *	30		---------		MUST BE 0
 159 *	31		CONT			1: Amount of samples to be
 160 *						transferred is unknown and
 161 *						script will keep on
 162 *						transferring samples as long as
 163 *						both events are detected and
 164 *						script must be manually stopped
 165 *						by the application
 166 *						0: The amount of samples to be
 167 *						transferred is equal to the
 168 *						count field of mode word
 169 */
 170#define SDMA_WATERMARK_LEVEL_LWML	0xFF
 171#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
 172#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
 173#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
 174#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
 175#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
 176#define SDMA_WATERMARK_LEVEL_SD		BIT(13)
 177#define SDMA_WATERMARK_LEVEL_DD		BIT(14)
 178#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
 179#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
 180#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
 181#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
 182
 183#define SDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
 184				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 185				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
 186				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 187
 188#define SDMA_DMA_DIRECTIONS	(BIT(DMA_DEV_TO_MEM) | \
 189				 BIT(DMA_MEM_TO_DEV) | \
 190				 BIT(DMA_DEV_TO_DEV))
 191
 192#define SDMA_WATERMARK_LEVEL_N_FIFOS	GENMASK(15, 12)
 193#define SDMA_WATERMARK_LEVEL_OFF_FIFOS  GENMASK(19, 16)
 194#define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO   GENMASK(31, 28)
 195#define SDMA_WATERMARK_LEVEL_SW_DONE	BIT(23)
 196
 197#define SDMA_DONE0_CONFIG_DONE_SEL	BIT(7)
 198#define SDMA_DONE0_CONFIG_DONE_DIS	BIT(6)
 199
 200/*
 201 * struct sdma_script_start_addrs - SDMA script start pointers
 202 *
 203 * start addresses of the different functions in the physical
 204 * address space of the SDMA engine.
 205 */
 206struct sdma_script_start_addrs {
 207	s32 ap_2_ap_addr;
 208	s32 ap_2_bp_addr;
 209	s32 ap_2_ap_fixed_addr;
 210	s32 bp_2_ap_addr;
 211	s32 loopback_on_dsp_side_addr;
 212	s32 mcu_interrupt_only_addr;
 213	s32 firi_2_per_addr;
 214	s32 firi_2_mcu_addr;
 215	s32 per_2_firi_addr;
 216	s32 mcu_2_firi_addr;
 217	s32 uart_2_per_addr;
 218	s32 uart_2_mcu_addr;
 219	s32 per_2_app_addr;
 220	s32 mcu_2_app_addr;
 221	s32 per_2_per_addr;
 222	s32 uartsh_2_per_addr;
 223	s32 uartsh_2_mcu_addr;
 224	s32 per_2_shp_addr;
 225	s32 mcu_2_shp_addr;
 226	s32 ata_2_mcu_addr;
 227	s32 mcu_2_ata_addr;
 228	s32 app_2_per_addr;
 229	s32 app_2_mcu_addr;
 230	s32 shp_2_per_addr;
 231	s32 shp_2_mcu_addr;
 232	s32 mshc_2_mcu_addr;
 233	s32 mcu_2_mshc_addr;
 234	s32 spdif_2_mcu_addr;
 235	s32 mcu_2_spdif_addr;
 236	s32 asrc_2_mcu_addr;
 237	s32 ext_mem_2_ipu_addr;
 238	s32 descrambler_addr;
 239	s32 dptc_dvfs_addr;
 240	s32 utra_addr;
 241	s32 ram_code_start_addr;
 242	/* End of v1 array */
 243	union {	s32 v1_end; s32 mcu_2_ssish_addr; };
 244	s32 ssish_2_mcu_addr;
 245	s32 hdmi_dma_addr;
 246	/* End of v2 array */
 247	union { s32 v2_end; s32 zcanfd_2_mcu_addr; };
 248	s32 zqspi_2_mcu_addr;
 249	s32 mcu_2_ecspi_addr;
 250	s32 mcu_2_sai_addr;
 251	s32 sai_2_mcu_addr;
 252	s32 uart_2_mcu_rom_addr;
 253	s32 uartsh_2_mcu_rom_addr;
 254	s32 i2c_2_mcu_addr;
 255	s32 mcu_2_i2c_addr;
 256	/* End of v3 array */
 257	union { s32 v3_end; s32 mcu_2_zqspi_addr; };
 258	/* End of v4 array */
 259	s32 v4_end[0];
 260};
 261
 262/*
 263 * Mode/Count of data node descriptors - IPCv2
 264 */
 265struct sdma_mode_count {
 266#define SDMA_BD_MAX_CNT	0xffff
 267	u32 count   : 16; /* size of the buffer pointed by this BD */
 268	u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
 269	u32 command :  8; /* command mostly used for channel 0 */
 270};
 271
 272/*
 273 * Buffer descriptor
 274 */
 275struct sdma_buffer_descriptor {
 276	struct sdma_mode_count  mode;
 277	u32 buffer_addr;	/* address of the buffer described */
 278	u32 ext_buffer_addr;	/* extended buffer address */
 279} __attribute__ ((packed));
 280
 281/**
 282 * struct sdma_channel_control - Channel control Block
 283 *
 284 * @current_bd_ptr:	current buffer descriptor processed
 285 * @base_bd_ptr:	first element of buffer descriptor array
 286 * @unused:		padding. The SDMA engine expects an array of 128 byte
 287 *			control blocks
 288 */
 289struct sdma_channel_control {
 290	u32 current_bd_ptr;
 291	u32 base_bd_ptr;
 292	u32 unused[2];
 293} __attribute__ ((packed));
 294
 295/**
 296 * struct sdma_state_registers - SDMA context for a channel
 297 *
 298 * @pc:		program counter
 299 * @unused1:	unused
 300 * @t:		test bit: status of arithmetic & test instruction
 301 * @rpc:	return program counter
 302 * @unused0:	unused
 303 * @sf:		source fault while loading data
 304 * @spc:	loop start program counter
 305 * @unused2:	unused
 306 * @df:		destination fault while storing data
 307 * @epc:	loop end program counter
 308 * @lm:		loop mode
 309 */
 310struct sdma_state_registers {
 311	u32 pc     :14;
 312	u32 unused1: 1;
 313	u32 t      : 1;
 314	u32 rpc    :14;
 315	u32 unused0: 1;
 316	u32 sf     : 1;
 317	u32 spc    :14;
 318	u32 unused2: 1;
 319	u32 df     : 1;
 320	u32 epc    :14;
 321	u32 lm     : 2;
 322} __attribute__ ((packed));
 323
 324/**
 325 * struct sdma_context_data - sdma context specific to a channel
 326 *
 327 * @channel_state:	channel state bits
 328 * @gReg:		general registers
 329 * @mda:		burst dma destination address register
 330 * @msa:		burst dma source address register
 331 * @ms:			burst dma status register
 332 * @md:			burst dma data register
 333 * @pda:		peripheral dma destination address register
 334 * @psa:		peripheral dma source address register
 335 * @ps:			peripheral dma status register
 336 * @pd:			peripheral dma data register
 337 * @ca:			CRC polynomial register
 338 * @cs:			CRC accumulator register
 339 * @dda:		dedicated core destination address register
 340 * @dsa:		dedicated core source address register
 341 * @ds:			dedicated core status register
 342 * @dd:			dedicated core data register
 343 * @scratch0:		1st word of dedicated ram for context switch
 344 * @scratch1:		2nd word of dedicated ram for context switch
 345 * @scratch2:		3rd word of dedicated ram for context switch
 346 * @scratch3:		4th word of dedicated ram for context switch
 347 * @scratch4:		5th word of dedicated ram for context switch
 348 * @scratch5:		6th word of dedicated ram for context switch
 349 * @scratch6:		7th word of dedicated ram for context switch
 350 * @scratch7:		8th word of dedicated ram for context switch
 351 */
 352struct sdma_context_data {
 353	struct sdma_state_registers  channel_state;
 354	u32  gReg[8];
 355	u32  mda;
 356	u32  msa;
 357	u32  ms;
 358	u32  md;
 359	u32  pda;
 360	u32  psa;
 361	u32  ps;
 362	u32  pd;
 363	u32  ca;
 364	u32  cs;
 365	u32  dda;
 366	u32  dsa;
 367	u32  ds;
 368	u32  dd;
 369	u32  scratch0;
 370	u32  scratch1;
 371	u32  scratch2;
 372	u32  scratch3;
 373	u32  scratch4;
 374	u32  scratch5;
 375	u32  scratch6;
 376	u32  scratch7;
 377} __attribute__ ((packed));
 378
 379
 380struct sdma_engine;
 381
 382/**
 383 * struct sdma_desc - descriptor structor for one transfer
 384 * @vd:			descriptor for virt dma
 385 * @num_bd:		number of descriptors currently handling
 386 * @bd_phys:		physical address of bd
 387 * @buf_tail:		ID of the buffer that was processed
 388 * @buf_ptail:		ID of the previous buffer that was processed
 389 * @period_len:		period length, used in cyclic.
 390 * @chn_real_count:	the real count updated from bd->mode.count
 391 * @chn_count:		the transfer count set
 392 * @sdmac:		sdma_channel pointer
 393 * @bd:			pointer of allocate bd
 394 */
 395struct sdma_desc {
 396	struct virt_dma_desc	vd;
 397	unsigned int		num_bd;
 398	dma_addr_t		bd_phys;
 399	unsigned int		buf_tail;
 400	unsigned int		buf_ptail;
 401	unsigned int		period_len;
 402	unsigned int		chn_real_count;
 403	unsigned int		chn_count;
 404	struct sdma_channel	*sdmac;
 405	struct sdma_buffer_descriptor *bd;
 406};
 407
 408/**
 409 * struct sdma_channel - housekeeping for a SDMA channel
 410 *
 411 * @vc:			virt_dma base structure
 412 * @desc:		sdma description including vd and other special member
 413 * @sdma:		pointer to the SDMA engine for this channel
 414 * @channel:		the channel number, matches dmaengine chan_id + 1
 415 * @direction:		transfer type. Needed for setting SDMA script
 416 * @slave_config:	Slave configuration
 417 * @peripheral_type:	Peripheral type. Needed for setting SDMA script
 418 * @event_id0:		aka dma request line
 419 * @event_id1:		for channels that use 2 events
 420 * @word_size:		peripheral access size
 421 * @pc_from_device:	script address for those device_2_memory
 422 * @pc_to_device:	script address for those memory_2_device
 423 * @device_to_device:	script address for those device_2_device
 424 * @pc_to_pc:		script address for those memory_2_memory
 425 * @flags:		loop mode or not
 426 * @per_address:	peripheral source or destination address in common case
 427 *                      destination address in p_2_p case
 428 * @per_address2:	peripheral source address in p_2_p case
 429 * @event_mask:		event mask used in p_2_p script
 430 * @watermark_level:	value for gReg[7], some script will extend it from
 431 *			basic watermark such as p_2_p
 432 * @shp_addr:		value for gReg[6]
 433 * @per_addr:		value for gReg[2]
 434 * @status:		status of dma channel
 435 * @data:		specific sdma interface structure
 436 * @terminate_worker:	used to call back into terminate work function
 437 * @terminated:		terminated list
 438 * @is_ram_script:	flag for script in ram
 439 * @n_fifos_src:	number of source device fifos
 440 * @n_fifos_dst:	number of destination device fifos
 441 * @sw_done:		software done flag
 442 * @stride_fifos_src:	stride for source device FIFOs
 443 * @stride_fifos_dst:	stride for destination device FIFOs
 444 * @words_per_fifo:	copy number of words one time for one FIFO
 445 */
 446struct sdma_channel {
 447	struct virt_dma_chan		vc;
 448	struct sdma_desc		*desc;
 449	struct sdma_engine		*sdma;
 450	unsigned int			channel;
 451	enum dma_transfer_direction		direction;
 452	struct dma_slave_config		slave_config;
 453	enum sdma_peripheral_type	peripheral_type;
 454	unsigned int			event_id0;
 455	unsigned int			event_id1;
 456	enum dma_slave_buswidth		word_size;
 457	unsigned int			pc_from_device, pc_to_device;
 458	unsigned int			device_to_device;
 459	unsigned int                    pc_to_pc;
 460	unsigned long			flags;
 461	dma_addr_t			per_address, per_address2;
 462	unsigned long			event_mask[2];
 463	unsigned long			watermark_level;
 464	u32				shp_addr, per_addr;
 465	enum dma_status			status;
 466	struct imx_dma_data		data;
 467	struct work_struct		terminate_worker;
 468	struct list_head                terminated;
 469	bool				is_ram_script;
 470	unsigned int			n_fifos_src;
 471	unsigned int			n_fifos_dst;
 472	unsigned int			stride_fifos_src;
 473	unsigned int			stride_fifos_dst;
 474	unsigned int			words_per_fifo;
 475	bool				sw_done;
 476};
 477
 478#define IMX_DMA_SG_LOOP		BIT(0)
 479
 480#define MAX_DMA_CHANNELS 32
 481#define MXC_SDMA_DEFAULT_PRIORITY 1
 482#define MXC_SDMA_MIN_PRIORITY 1
 483#define MXC_SDMA_MAX_PRIORITY 7
 484
 485#define SDMA_FIRMWARE_MAGIC 0x414d4453
 486
 487/**
 488 * struct sdma_firmware_header - Layout of the firmware image
 489 *
 490 * @magic:		"SDMA"
 491 * @version_major:	increased whenever layout of struct
 492 *			sdma_script_start_addrs changes.
 493 * @version_minor:	firmware minor version (for binary compatible changes)
 494 * @script_addrs_start:	offset of struct sdma_script_start_addrs in this image
 495 * @num_script_addrs:	Number of script addresses in this image
 496 * @ram_code_start:	offset of SDMA ram image in this firmware image
 497 * @ram_code_size:	size of SDMA ram image
 498 */
 499struct sdma_firmware_header {
 500	u32	magic;
 501	u32	version_major;
 502	u32	version_minor;
 503	u32	script_addrs_start;
 504	u32	num_script_addrs;
 505	u32	ram_code_start;
 506	u32	ram_code_size;
 507};
 508
 509struct sdma_driver_data {
 510	int chnenbl0;
 511	int num_events;
 512	struct sdma_script_start_addrs	*script_addrs;
 513	bool check_ratio;
 514	/*
 515	 * ecspi ERR009165 fixed should be done in sdma script
 516	 * and it has been fixed in soc from i.mx6ul.
 517	 * please get more information from the below link:
 518	 * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
 519	 */
 520	bool ecspi_fixed;
 521};
 522
 523struct sdma_engine {
 524	struct device			*dev;
 525	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 526	struct sdma_channel_control	*channel_control;
 527	void __iomem			*regs;
 528	struct sdma_context_data	*context;
 529	dma_addr_t			context_phys;
 530	struct dma_device		dma_device;
 531	struct clk			*clk_ipg;
 532	struct clk			*clk_ahb;
 533	spinlock_t			channel_0_lock;
 534	u32				script_number;
 535	struct sdma_script_start_addrs	*script_addrs;
 536	const struct sdma_driver_data	*drvdata;
 537	u32				spba_start_addr;
 538	u32				spba_end_addr;
 539	unsigned int			irq;
 540	dma_addr_t			bd0_phys;
 541	struct sdma_buffer_descriptor	*bd0;
 542	/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
 543	bool				clk_ratio;
 544	bool                            fw_loaded;
 545	struct gen_pool			*iram_pool;
 546};
 547
 548static int sdma_config_write(struct dma_chan *chan,
 549		       struct dma_slave_config *dmaengine_cfg,
 550		       enum dma_transfer_direction direction);
 551
 552static struct sdma_driver_data sdma_imx31 = {
 553	.chnenbl0 = SDMA_CHNENBL0_IMX31,
 554	.num_events = 32,
 555};
 556
 557static struct sdma_script_start_addrs sdma_script_imx25 = {
 558	.ap_2_ap_addr = 729,
 559	.uart_2_mcu_addr = 904,
 560	.per_2_app_addr = 1255,
 561	.mcu_2_app_addr = 834,
 562	.uartsh_2_mcu_addr = 1120,
 563	.per_2_shp_addr = 1329,
 564	.mcu_2_shp_addr = 1048,
 565	.ata_2_mcu_addr = 1560,
 566	.mcu_2_ata_addr = 1479,
 567	.app_2_per_addr = 1189,
 568	.app_2_mcu_addr = 770,
 569	.shp_2_per_addr = 1407,
 570	.shp_2_mcu_addr = 979,
 571};
 572
 573static struct sdma_driver_data sdma_imx25 = {
 574	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 575	.num_events = 48,
 576	.script_addrs = &sdma_script_imx25,
 577};
 578
 579static struct sdma_driver_data sdma_imx35 = {
 580	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 581	.num_events = 48,
 582};
 583
 584static struct sdma_script_start_addrs sdma_script_imx51 = {
 585	.ap_2_ap_addr = 642,
 586	.uart_2_mcu_addr = 817,
 587	.mcu_2_app_addr = 747,
 588	.mcu_2_shp_addr = 961,
 589	.ata_2_mcu_addr = 1473,
 590	.mcu_2_ata_addr = 1392,
 591	.app_2_per_addr = 1033,
 592	.app_2_mcu_addr = 683,
 593	.shp_2_per_addr = 1251,
 594	.shp_2_mcu_addr = 892,
 595};
 596
 597static struct sdma_driver_data sdma_imx51 = {
 598	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 599	.num_events = 48,
 600	.script_addrs = &sdma_script_imx51,
 601};
 602
 603static struct sdma_script_start_addrs sdma_script_imx53 = {
 604	.ap_2_ap_addr = 642,
 605	.app_2_mcu_addr = 683,
 606	.mcu_2_app_addr = 747,
 607	.uart_2_mcu_addr = 817,
 608	.shp_2_mcu_addr = 891,
 609	.mcu_2_shp_addr = 960,
 610	.uartsh_2_mcu_addr = 1032,
 611	.spdif_2_mcu_addr = 1100,
 612	.mcu_2_spdif_addr = 1134,
 613	.firi_2_mcu_addr = 1193,
 614	.mcu_2_firi_addr = 1290,
 615};
 616
 617static struct sdma_driver_data sdma_imx53 = {
 618	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 619	.num_events = 48,
 620	.script_addrs = &sdma_script_imx53,
 621};
 622
 623static struct sdma_script_start_addrs sdma_script_imx6q = {
 624	.ap_2_ap_addr = 642,
 625	.uart_2_mcu_addr = 817,
 626	.mcu_2_app_addr = 747,
 627	.per_2_per_addr = 6331,
 628	.uartsh_2_mcu_addr = 1032,
 629	.mcu_2_shp_addr = 960,
 630	.app_2_mcu_addr = 683,
 631	.shp_2_mcu_addr = 891,
 632	.spdif_2_mcu_addr = 1100,
 633	.mcu_2_spdif_addr = 1134,
 634};
 635
 636static struct sdma_driver_data sdma_imx6q = {
 637	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 638	.num_events = 48,
 639	.script_addrs = &sdma_script_imx6q,
 640};
 641
 642static struct sdma_driver_data sdma_imx6ul = {
 643	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 644	.num_events = 48,
 645	.script_addrs = &sdma_script_imx6q,
 646	.ecspi_fixed = true,
 647};
 648
 649static struct sdma_script_start_addrs sdma_script_imx7d = {
 650	.ap_2_ap_addr = 644,
 651	.uart_2_mcu_addr = 819,
 652	.mcu_2_app_addr = 749,
 653	.uartsh_2_mcu_addr = 1034,
 654	.mcu_2_shp_addr = 962,
 655	.app_2_mcu_addr = 685,
 656	.shp_2_mcu_addr = 893,
 657	.spdif_2_mcu_addr = 1102,
 658	.mcu_2_spdif_addr = 1136,
 659};
 660
 661static struct sdma_driver_data sdma_imx7d = {
 662	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 663	.num_events = 48,
 664	.script_addrs = &sdma_script_imx7d,
 665};
 666
 667static struct sdma_driver_data sdma_imx8mq = {
 668	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 669	.num_events = 48,
 670	.script_addrs = &sdma_script_imx7d,
 671	.check_ratio = 1,
 672};
 673
 674static const struct of_device_id sdma_dt_ids[] = {
 675	{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
 676	{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
 677	{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
 678	{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
 679	{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
 680	{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
 681	{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
 682	{ .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, },
 683	{ .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
 684	{ /* sentinel */ }
 685};
 686MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 687
 688#define SDMA_H_CONFIG_DSPDMA	BIT(12) /* indicates if the DSPDMA is used */
 689#define SDMA_H_CONFIG_RTD_PINS	BIT(11) /* indicates if Real-Time Debug pins are enabled */
 690#define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
 691#define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
 692
 693static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 694{
 695	u32 chnenbl0 = sdma->drvdata->chnenbl0;
 696	return chnenbl0 + event * 4;
 697}
 698
 699static int sdma_config_ownership(struct sdma_channel *sdmac,
 700		bool event_override, bool mcu_override, bool dsp_override)
 701{
 702	struct sdma_engine *sdma = sdmac->sdma;
 703	int channel = sdmac->channel;
 704	unsigned long evt, mcu, dsp;
 705
 706	if (event_override && mcu_override && dsp_override)
 707		return -EINVAL;
 708
 709	evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
 710	mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
 711	dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
 712
 713	if (dsp_override)
 714		__clear_bit(channel, &dsp);
 715	else
 716		__set_bit(channel, &dsp);
 717
 718	if (event_override)
 719		__clear_bit(channel, &evt);
 720	else
 721		__set_bit(channel, &evt);
 722
 723	if (mcu_override)
 724		__clear_bit(channel, &mcu);
 725	else
 726		__set_bit(channel, &mcu);
 727
 728	writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
 729	writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
 730	writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
 731
 732	return 0;
 733}
 734
 735static int is_sdma_channel_enabled(struct sdma_engine *sdma, int channel)
 736{
 737	return !!(readl(sdma->regs + SDMA_H_STATSTOP) & BIT(channel));
 738}
 739
 740static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 741{
 742	writel(BIT(channel), sdma->regs + SDMA_H_START);
 743}
 744
 745/*
 746 * sdma_run_channel0 - run a channel and wait till it's done
 747 */
 748static int sdma_run_channel0(struct sdma_engine *sdma)
 749{
 750	int ret;
 751	u32 reg;
 752
 753	sdma_enable_channel(sdma, 0);
 754
 755	ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
 756						reg, !(reg & 1), 1, 500);
 757	if (ret)
 758		dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
 759
 760	/* Set bits of CONFIG register with dynamic context switching */
 761	reg = readl(sdma->regs + SDMA_H_CONFIG);
 762	if ((reg & SDMA_H_CONFIG_CSM) == 0) {
 763		reg |= SDMA_H_CONFIG_CSM;
 764		writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
 765	}
 766
 767	return ret;
 768}
 769
 770static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 771		u32 address)
 772{
 773	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
 774	void *buf_virt;
 775	dma_addr_t buf_phys;
 776	int ret;
 777	unsigned long flags;
 778
 779	buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
 780	if (!buf_virt)
 781		return -ENOMEM;
 782
 783	spin_lock_irqsave(&sdma->channel_0_lock, flags);
 784
 785	bd0->mode.command = C0_SETPM;
 786	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
 787	bd0->mode.count = size / 2;
 788	bd0->buffer_addr = buf_phys;
 789	bd0->ext_buffer_addr = address;
 790
 791	memcpy(buf_virt, buf, size);
 792
 793	ret = sdma_run_channel0(sdma);
 794
 795	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 796
 797	dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
 798
 799	return ret;
 800}
 801
 802static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 803{
 804	struct sdma_engine *sdma = sdmac->sdma;
 805	int channel = sdmac->channel;
 806	unsigned long val;
 807	u32 chnenbl = chnenbl_ofs(sdma, event);
 808
 809	val = readl_relaxed(sdma->regs + chnenbl);
 810	__set_bit(channel, &val);
 811	writel_relaxed(val, sdma->regs + chnenbl);
 812
 813	/* Set SDMA_DONEx_CONFIG is sw_done enabled */
 814	if (sdmac->sw_done) {
 815		val = readl_relaxed(sdma->regs + SDMA_DONE0_CONFIG);
 816		val |= SDMA_DONE0_CONFIG_DONE_SEL;
 817		val &= ~SDMA_DONE0_CONFIG_DONE_DIS;
 818		writel_relaxed(val, sdma->regs + SDMA_DONE0_CONFIG);
 819	}
 820}
 821
 822static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 823{
 824	struct sdma_engine *sdma = sdmac->sdma;
 825	int channel = sdmac->channel;
 826	u32 chnenbl = chnenbl_ofs(sdma, event);
 827	unsigned long val;
 828
 829	val = readl_relaxed(sdma->regs + chnenbl);
 830	__clear_bit(channel, &val);
 831	writel_relaxed(val, sdma->regs + chnenbl);
 832}
 833
 834static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
 835{
 836	return container_of(t, struct sdma_desc, vd.tx);
 837}
 838
 839static void sdma_start_desc(struct sdma_channel *sdmac)
 840{
 841	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
 842	struct sdma_desc *desc;
 843	struct sdma_engine *sdma = sdmac->sdma;
 844	int channel = sdmac->channel;
 845
 846	if (!vd) {
 847		sdmac->desc = NULL;
 848		return;
 849	}
 850	sdmac->desc = desc = to_sdma_desc(&vd->tx);
 851
 852	list_del(&vd->node);
 853
 854	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
 855	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
 856	sdma_enable_channel(sdma, sdmac->channel);
 857}
 858
 859static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 860{
 861	struct sdma_buffer_descriptor *bd;
 862	int error = 0;
 863	enum dma_status	old_status = sdmac->status;
 864
 865	/*
 866	 * loop mode. Iterate over descriptors, re-setup them and
 867	 * call callback function.
 868	 */
 869	while (sdmac->desc) {
 870		struct sdma_desc *desc = sdmac->desc;
 871
 872		bd = &desc->bd[desc->buf_tail];
 873
 874		if (bd->mode.status & BD_DONE)
 875			break;
 876
 877		if (bd->mode.status & BD_RROR) {
 878			bd->mode.status &= ~BD_RROR;
 879			sdmac->status = DMA_ERROR;
 880			error = -EIO;
 881		}
 882
 883	       /*
 884		* We use bd->mode.count to calculate the residue, since contains
 885		* the number of bytes present in the current buffer descriptor.
 886		*/
 887
 888		desc->chn_real_count = bd->mode.count;
 889		bd->mode.count = desc->period_len;
 890		desc->buf_ptail = desc->buf_tail;
 891		desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
 892
 893		/*
 894		 * The callback is called from the interrupt context in order
 895		 * to reduce latency and to avoid the risk of altering the
 896		 * SDMA transaction status by the time the client tasklet is
 897		 * executed.
 898		 */
 899		spin_unlock(&sdmac->vc.lock);
 900		dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
 901		spin_lock(&sdmac->vc.lock);
 902
 903		/* Assign buffer ownership to SDMA */
 904		bd->mode.status |= BD_DONE;
 905
 906		if (error)
 907			sdmac->status = old_status;
 908	}
 909
 910	/*
 911	 * SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
 912	 * owned buffer is available (i.e. BD_DONE was set too late).
 913	 */
 914	if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
 915		dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
 916		sdma_enable_channel(sdmac->sdma, sdmac->channel);
 917	}
 918}
 919
 920static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
 921{
 922	struct sdma_channel *sdmac = (struct sdma_channel *) data;
 923	struct sdma_buffer_descriptor *bd;
 924	int i, error = 0;
 925
 926	sdmac->desc->chn_real_count = 0;
 927	/*
 928	 * non loop mode. Iterate over all descriptors, collect
 929	 * errors and call callback function
 930	 */
 931	for (i = 0; i < sdmac->desc->num_bd; i++) {
 932		bd = &sdmac->desc->bd[i];
 933
 934		if (bd->mode.status & (BD_DONE | BD_RROR))
 935			error = -EIO;
 936		sdmac->desc->chn_real_count += bd->mode.count;
 937	}
 938
 939	if (error)
 940		sdmac->status = DMA_ERROR;
 941	else
 942		sdmac->status = DMA_COMPLETE;
 943}
 944
 945static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 946{
 947	struct sdma_engine *sdma = dev_id;
 948	unsigned long stat;
 949
 950	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
 951	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 952	/* channel 0 is special and not handled here, see run_channel0() */
 953	stat &= ~1;
 954
 955	while (stat) {
 956		int channel = fls(stat) - 1;
 957		struct sdma_channel *sdmac = &sdma->channel[channel];
 958		struct sdma_desc *desc;
 959
 960		spin_lock(&sdmac->vc.lock);
 961		desc = sdmac->desc;
 962		if (desc) {
 963			if (sdmac->flags & IMX_DMA_SG_LOOP) {
 964				if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
 965					sdma_update_channel_loop(sdmac);
 966				else
 967					vchan_cyclic_callback(&desc->vd);
 968			} else {
 969				mxc_sdma_handle_channel_normal(sdmac);
 970				vchan_cookie_complete(&desc->vd);
 971				sdma_start_desc(sdmac);
 972			}
 973		}
 974
 975		spin_unlock(&sdmac->vc.lock);
 976		__clear_bit(channel, &stat);
 977	}
 978
 979	return IRQ_HANDLED;
 980}
 981
 982/*
 983 * sets the pc of SDMA script according to the peripheral type
 984 */
 985static int sdma_get_pc(struct sdma_channel *sdmac,
 986		enum sdma_peripheral_type peripheral_type)
 987{
 988	struct sdma_engine *sdma = sdmac->sdma;
 989	int per_2_emi = 0, emi_2_per = 0;
 990	/*
 991	 * These are needed once we start to support transfers between
 992	 * two peripherals or memory-to-memory transfers
 993	 */
 994	int per_2_per = 0, emi_2_emi = 0;
 995
 996	sdmac->pc_from_device = 0;
 997	sdmac->pc_to_device = 0;
 998	sdmac->device_to_device = 0;
 999	sdmac->pc_to_pc = 0;
1000	sdmac->is_ram_script = false;
1001
1002	switch (peripheral_type) {
1003	case IMX_DMATYPE_MEMORY:
1004		emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
1005		break;
1006	case IMX_DMATYPE_DSP:
1007		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
1008		per_2_emi = sdma->script_addrs->ap_2_bp_addr;
1009		break;
1010	case IMX_DMATYPE_FIRI:
1011		per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
1012		emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
1013		break;
1014	case IMX_DMATYPE_UART:
1015		per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
1016		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1017		break;
1018	case IMX_DMATYPE_UART_SP:
1019		per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
1020		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1021		break;
1022	case IMX_DMATYPE_ATA:
1023		per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
1024		emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
1025		break;
1026	case IMX_DMATYPE_CSPI:
1027		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
1028
1029		/* Use rom script mcu_2_app if ERR009165 fixed */
1030		if (sdmac->sdma->drvdata->ecspi_fixed) {
1031			emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1032		} else {
1033			emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr;
1034			sdmac->is_ram_script = true;
1035		}
1036
1037		break;
1038	case IMX_DMATYPE_EXT:
1039	case IMX_DMATYPE_SSI:
1040	case IMX_DMATYPE_SAI:
1041		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
1042		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
1043		break;
1044	case IMX_DMATYPE_SSI_DUAL:
1045		per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
1046		emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
1047		sdmac->is_ram_script = true;
1048		break;
1049	case IMX_DMATYPE_SSI_SP:
1050	case IMX_DMATYPE_MMC:
1051	case IMX_DMATYPE_SDHC:
1052	case IMX_DMATYPE_CSPI_SP:
1053	case IMX_DMATYPE_ESAI:
1054	case IMX_DMATYPE_MSHC_SP:
1055		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
1056		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1057		break;
1058	case IMX_DMATYPE_ASRC:
1059		per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
1060		emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
1061		per_2_per = sdma->script_addrs->per_2_per_addr;
1062		sdmac->is_ram_script = true;
1063		break;
1064	case IMX_DMATYPE_ASRC_SP:
1065		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
1066		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
1067		per_2_per = sdma->script_addrs->per_2_per_addr;
1068		break;
1069	case IMX_DMATYPE_MSHC:
1070		per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
1071		emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
1072		break;
1073	case IMX_DMATYPE_CCM:
1074		per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
1075		break;
1076	case IMX_DMATYPE_SPDIF:
1077		per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
1078		emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
1079		break;
1080	case IMX_DMATYPE_IPU_MEMORY:
1081		emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
1082		break;
1083	case IMX_DMATYPE_MULTI_SAI:
1084		per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
1085		emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
1086		break;
1087	case IMX_DMATYPE_I2C:
1088		per_2_emi = sdma->script_addrs->i2c_2_mcu_addr;
1089		emi_2_per = sdma->script_addrs->mcu_2_i2c_addr;
1090		sdmac->is_ram_script = true;
1091		break;
1092	case IMX_DMATYPE_HDMI:
1093		emi_2_per = sdma->script_addrs->hdmi_dma_addr;
1094		sdmac->is_ram_script = true;
1095		break;
1096	default:
1097		dev_err(sdma->dev, "Unsupported transfer type %d\n",
1098			peripheral_type);
1099		return -EINVAL;
1100	}
1101
1102	sdmac->pc_from_device = per_2_emi;
1103	sdmac->pc_to_device = emi_2_per;
1104	sdmac->device_to_device = per_2_per;
1105	sdmac->pc_to_pc = emi_2_emi;
1106
1107	return 0;
1108}
1109
1110static int sdma_load_context(struct sdma_channel *sdmac)
1111{
1112	struct sdma_engine *sdma = sdmac->sdma;
1113	int channel = sdmac->channel;
1114	int load_address;
1115	struct sdma_context_data *context = sdma->context;
1116	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
1117	int ret;
1118	unsigned long flags;
1119
1120	if (sdmac->direction == DMA_DEV_TO_MEM)
1121		load_address = sdmac->pc_from_device;
1122	else if (sdmac->direction == DMA_DEV_TO_DEV)
1123		load_address = sdmac->device_to_device;
1124	else if (sdmac->direction == DMA_MEM_TO_MEM)
1125		load_address = sdmac->pc_to_pc;
1126	else
1127		load_address = sdmac->pc_to_device;
1128
1129	if (load_address < 0)
1130		return load_address;
1131
1132	dev_dbg(sdma->dev, "load_address = %d\n", load_address);
1133	dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1134	dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
1135	dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
1136	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
1137	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1138
1139	spin_lock_irqsave(&sdma->channel_0_lock, flags);
1140
1141	memset(context, 0, sizeof(*context));
1142	context->channel_state.pc = load_address;
1143
1144	/* Send by context the event mask,base address for peripheral
1145	 * and watermark level
1146	 */
1147	if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
1148		context->gReg[4] = sdmac->per_addr;
1149		context->gReg[6] = sdmac->shp_addr;
1150	} else {
1151		context->gReg[0] = sdmac->event_mask[1];
1152		context->gReg[1] = sdmac->event_mask[0];
1153		context->gReg[2] = sdmac->per_addr;
1154		context->gReg[6] = sdmac->shp_addr;
1155		context->gReg[7] = sdmac->watermark_level;
1156	}
1157
1158	bd0->mode.command = C0_SETDM;
1159	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
1160	bd0->mode.count = sizeof(*context) / 4;
1161	bd0->buffer_addr = sdma->context_phys;
1162	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
1163	ret = sdma_run_channel0(sdma);
1164
1165	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1166
1167	return ret;
1168}
1169
1170static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
1171{
1172	return container_of(chan, struct sdma_channel, vc.chan);
1173}
1174
1175static int sdma_disable_channel(struct dma_chan *chan)
1176{
1177	struct sdma_channel *sdmac = to_sdma_chan(chan);
1178	struct sdma_engine *sdma = sdmac->sdma;
1179	int channel = sdmac->channel;
1180
1181	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
1182	sdmac->status = DMA_ERROR;
1183
1184	return 0;
1185}
1186static void sdma_channel_terminate_work(struct work_struct *work)
1187{
1188	struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1189						  terminate_worker);
1190	/*
1191	 * According to NXP R&D team a delay of one BD SDMA cost time
1192	 * (maximum is 1ms) should be added after disable of the channel
1193	 * bit, to ensure SDMA core has really been stopped after SDMA
1194	 * clients call .device_terminate_all.
1195	 */
1196	usleep_range(1000, 2000);
1197
1198	vchan_dma_desc_free_list(&sdmac->vc, &sdmac->terminated);
1199}
1200
1201static int sdma_terminate_all(struct dma_chan *chan)
1202{
1203	struct sdma_channel *sdmac = to_sdma_chan(chan);
1204	unsigned long flags;
1205
1206	spin_lock_irqsave(&sdmac->vc.lock, flags);
1207
1208	sdma_disable_channel(chan);
1209
1210	if (sdmac->desc) {
1211		vchan_terminate_vdesc(&sdmac->desc->vd);
1212		/*
1213		 * move out current descriptor into terminated list so that
1214		 * it could be free in sdma_channel_terminate_work alone
1215		 * later without potential involving next descriptor raised
1216		 * up before the last descriptor terminated.
1217		 */
1218		vchan_get_all_descriptors(&sdmac->vc, &sdmac->terminated);
1219		sdmac->desc = NULL;
1220		schedule_work(&sdmac->terminate_worker);
1221	}
1222
1223	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1224
1225	return 0;
1226}
1227
1228static void sdma_channel_synchronize(struct dma_chan *chan)
1229{
1230	struct sdma_channel *sdmac = to_sdma_chan(chan);
1231
1232	vchan_synchronize(&sdmac->vc);
1233
1234	flush_work(&sdmac->terminate_worker);
1235}
1236
1237static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1238{
1239	struct sdma_engine *sdma = sdmac->sdma;
1240
1241	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1242	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1243
1244	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1245	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1246
1247	if (sdmac->event_id0 > 31)
1248		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1249
1250	if (sdmac->event_id1 > 31)
1251		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1252
1253	/*
1254	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
1255	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
1256	 * r0(event_mask[1]) and r1(event_mask[0]).
1257	 */
1258	if (lwml > hwml) {
1259		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1260						SDMA_WATERMARK_LEVEL_HWML);
1261		sdmac->watermark_level |= hwml;
1262		sdmac->watermark_level |= lwml << 16;
1263		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1264	}
1265
1266	if (sdmac->per_address2 >= sdma->spba_start_addr &&
1267			sdmac->per_address2 <= sdma->spba_end_addr)
1268		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1269
1270	if (sdmac->per_address >= sdma->spba_start_addr &&
1271			sdmac->per_address <= sdma->spba_end_addr)
1272		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1273
1274	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1275
1276	/*
1277	 * Limitation: The p2p script support dual fifos in maximum,
1278	 * So when fifo number is larger than 1, force enable dual
1279	 * fifos.
1280	 */
1281	if (sdmac->n_fifos_src > 1)
1282		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SD;
1283	if (sdmac->n_fifos_dst > 1)
1284		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DD;
1285}
1286
1287static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
1288{
1289	unsigned int n_fifos;
1290	unsigned int stride_fifos;
1291	unsigned int words_per_fifo;
1292
1293	if (sdmac->sw_done)
1294		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
1295
1296	if (sdmac->direction == DMA_DEV_TO_MEM) {
1297		n_fifos = sdmac->n_fifos_src;
1298		stride_fifos = sdmac->stride_fifos_src;
1299	} else {
1300		n_fifos = sdmac->n_fifos_dst;
1301		stride_fifos = sdmac->stride_fifos_dst;
1302	}
1303
1304	words_per_fifo = sdmac->words_per_fifo;
1305
1306	sdmac->watermark_level |=
1307			FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
1308	sdmac->watermark_level |=
1309			FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
1310	if (words_per_fifo)
1311		sdmac->watermark_level |=
1312			FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
1313}
1314
1315static int sdma_config_channel(struct dma_chan *chan)
1316{
1317	struct sdma_channel *sdmac = to_sdma_chan(chan);
1318	int ret;
1319
1320	sdma_disable_channel(chan);
1321
1322	sdmac->event_mask[0] = 0;
1323	sdmac->event_mask[1] = 0;
1324	sdmac->shp_addr = 0;
1325	sdmac->per_addr = 0;
1326
1327	switch (sdmac->peripheral_type) {
1328	case IMX_DMATYPE_DSP:
1329		sdma_config_ownership(sdmac, false, true, true);
1330		break;
1331	case IMX_DMATYPE_MEMORY:
1332		sdma_config_ownership(sdmac, false, true, false);
1333		break;
1334	default:
1335		sdma_config_ownership(sdmac, true, true, false);
1336		break;
1337	}
1338
1339	ret = sdma_get_pc(sdmac, sdmac->peripheral_type);
1340	if (ret)
1341		return ret;
1342
1343	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1344			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1345		/* Handle multiple event channels differently */
1346		if (sdmac->event_id1) {
1347			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1348			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1349				sdma_set_watermarklevel_for_p2p(sdmac);
1350		} else {
1351			if (sdmac->peripheral_type ==
1352					IMX_DMATYPE_MULTI_SAI)
1353				sdma_set_watermarklevel_for_sais(sdmac);
1354
1355			__set_bit(sdmac->event_id0, sdmac->event_mask);
1356		}
1357
1358		/* Address */
1359		sdmac->shp_addr = sdmac->per_address;
1360		sdmac->per_addr = sdmac->per_address2;
1361	} else {
1362		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1363	}
1364
1365	return 0;
1366}
1367
1368static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1369				     unsigned int priority)
1370{
1371	struct sdma_engine *sdma = sdmac->sdma;
1372	int channel = sdmac->channel;
1373
1374	if (priority < MXC_SDMA_MIN_PRIORITY
1375	    || priority > MXC_SDMA_MAX_PRIORITY) {
1376		return -EINVAL;
1377	}
1378
1379	writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1380
1381	return 0;
1382}
1383
1384static int sdma_request_channel0(struct sdma_engine *sdma)
1385{
1386	int ret = -EBUSY;
1387
1388	if (sdma->iram_pool)
1389		sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool,
1390					sizeof(struct sdma_buffer_descriptor),
1391					&sdma->bd0_phys);
1392	else
1393		sdma->bd0 = dma_alloc_coherent(sdma->dev,
1394					sizeof(struct sdma_buffer_descriptor),
1395					&sdma->bd0_phys, GFP_NOWAIT);
1396	if (!sdma->bd0) {
1397		ret = -ENOMEM;
1398		goto out;
1399	}
1400
1401	sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1402	sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
1403
1404	sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1405	return 0;
1406out:
1407
1408	return ret;
1409}
1410
1411
1412static int sdma_alloc_bd(struct sdma_desc *desc)
1413{
1414	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1415	struct sdma_engine *sdma = desc->sdmac->sdma;
1416	int ret = 0;
1417
1418	if (sdma->iram_pool)
1419		desc->bd = gen_pool_dma_alloc(sdma->iram_pool, bd_size, &desc->bd_phys);
1420	else
1421		desc->bd = dma_alloc_coherent(sdma->dev, bd_size, &desc->bd_phys, GFP_NOWAIT);
1422
1423	if (!desc->bd) {
1424		ret = -ENOMEM;
1425		goto out;
1426	}
1427out:
1428	return ret;
1429}
1430
1431static void sdma_free_bd(struct sdma_desc *desc)
1432{
1433	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1434	struct sdma_engine *sdma = desc->sdmac->sdma;
1435
1436	if (sdma->iram_pool)
1437		gen_pool_free(sdma->iram_pool, (unsigned long)desc->bd, bd_size);
1438	else
1439		dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, desc->bd_phys);
1440}
1441
1442static void sdma_desc_free(struct virt_dma_desc *vd)
1443{
1444	struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1445
1446	sdma_free_bd(desc);
1447	kfree(desc);
1448}
1449
1450static int sdma_alloc_chan_resources(struct dma_chan *chan)
1451{
1452	struct sdma_channel *sdmac = to_sdma_chan(chan);
1453	struct imx_dma_data *data = chan->private;
1454	struct imx_dma_data mem_data;
1455	int prio, ret;
1456
1457	/*
1458	 * MEMCPY may never setup chan->private by filter function such as
1459	 * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
1460	 * Please note in any other slave case, you have to setup chan->private
1461	 * with 'struct imx_dma_data' in your own filter function if you want to
1462	 * request dma channel by dma_request_channel() rather than
1463	 * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
1464	 * to warn you to correct your filter function.
1465	 */
1466	if (!data) {
1467		dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1468		mem_data.priority = 2;
1469		mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1470		mem_data.dma_request = 0;
1471		mem_data.dma_request2 = 0;
1472		data = &mem_data;
1473
1474		ret = sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1475		if (ret)
1476			return ret;
1477	}
1478
1479	switch (data->priority) {
1480	case DMA_PRIO_HIGH:
1481		prio = 3;
1482		break;
1483	case DMA_PRIO_MEDIUM:
1484		prio = 2;
1485		break;
1486	case DMA_PRIO_LOW:
1487	default:
1488		prio = 1;
1489		break;
1490	}
1491
1492	sdmac->peripheral_type = data->peripheral_type;
1493	sdmac->event_id0 = data->dma_request;
1494	sdmac->event_id1 = data->dma_request2;
1495
1496	ret = clk_enable(sdmac->sdma->clk_ipg);
1497	if (ret)
1498		return ret;
1499	ret = clk_enable(sdmac->sdma->clk_ahb);
1500	if (ret)
1501		goto disable_clk_ipg;
1502
1503	ret = sdma_set_channel_priority(sdmac, prio);
1504	if (ret)
1505		goto disable_clk_ahb;
1506
1507	return 0;
1508
1509disable_clk_ahb:
1510	clk_disable(sdmac->sdma->clk_ahb);
1511disable_clk_ipg:
1512	clk_disable(sdmac->sdma->clk_ipg);
1513	return ret;
1514}
1515
1516static void sdma_free_chan_resources(struct dma_chan *chan)
1517{
1518	struct sdma_channel *sdmac = to_sdma_chan(chan);
1519	struct sdma_engine *sdma = sdmac->sdma;
1520
1521	sdma_terminate_all(chan);
1522
1523	sdma_channel_synchronize(chan);
1524
1525	sdma_event_disable(sdmac, sdmac->event_id0);
1526	if (sdmac->event_id1)
1527		sdma_event_disable(sdmac, sdmac->event_id1);
1528
1529	sdmac->event_id0 = 0;
1530	sdmac->event_id1 = 0;
1531
1532	sdma_set_channel_priority(sdmac, 0);
1533
1534	clk_disable(sdma->clk_ipg);
1535	clk_disable(sdma->clk_ahb);
1536}
1537
1538static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1539				enum dma_transfer_direction direction, u32 bds)
1540{
1541	struct sdma_desc *desc;
1542
1543	if (!sdmac->sdma->fw_loaded && sdmac->is_ram_script) {
1544		dev_warn_once(sdmac->sdma->dev, "sdma firmware not ready!\n");
1545		goto err_out;
1546	}
1547
1548	desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1549	if (!desc)
1550		goto err_out;
1551
1552	sdmac->status = DMA_IN_PROGRESS;
1553	sdmac->direction = direction;
1554	sdmac->flags = 0;
1555
1556	desc->chn_count = 0;
1557	desc->chn_real_count = 0;
1558	desc->buf_tail = 0;
1559	desc->buf_ptail = 0;
1560	desc->sdmac = sdmac;
1561	desc->num_bd = bds;
1562
1563	if (bds && sdma_alloc_bd(desc))
1564		goto err_desc_out;
1565
1566	/* No slave_config called in MEMCPY case, so do here */
1567	if (direction == DMA_MEM_TO_MEM)
1568		sdma_config_ownership(sdmac, false, true, false);
1569
1570	if (sdma_load_context(sdmac))
1571		goto err_bd_out;
1572
1573	return desc;
1574
1575err_bd_out:
1576	sdma_free_bd(desc);
1577err_desc_out:
1578	kfree(desc);
1579err_out:
1580	return NULL;
1581}
1582
1583static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1584		struct dma_chan *chan, dma_addr_t dma_dst,
1585		dma_addr_t dma_src, size_t len, unsigned long flags)
1586{
1587	struct sdma_channel *sdmac = to_sdma_chan(chan);
1588	struct sdma_engine *sdma = sdmac->sdma;
1589	int channel = sdmac->channel;
1590	size_t count;
1591	int i = 0, param;
1592	struct sdma_buffer_descriptor *bd;
1593	struct sdma_desc *desc;
1594
1595	if (!chan || !len)
1596		return NULL;
1597
1598	dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1599		&dma_src, &dma_dst, len, channel);
1600
1601	desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1602					len / SDMA_BD_MAX_CNT + 1);
1603	if (!desc)
1604		return NULL;
1605
1606	do {
1607		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1608		bd = &desc->bd[i];
1609		bd->buffer_addr = dma_src;
1610		bd->ext_buffer_addr = dma_dst;
1611		bd->mode.count = count;
1612		desc->chn_count += count;
1613		bd->mode.command = 0;
1614
1615		dma_src += count;
1616		dma_dst += count;
1617		len -= count;
1618		i++;
1619
1620		param = BD_DONE | BD_EXTD | BD_CONT;
1621		/* last bd */
1622		if (!len) {
1623			param |= BD_INTR;
1624			param |= BD_LAST;
1625			param &= ~BD_CONT;
1626		}
1627
1628		dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1629				i, count, bd->buffer_addr,
1630				param & BD_WRAP ? "wrap" : "",
1631				param & BD_INTR ? " intr" : "");
1632
1633		bd->mode.status = param;
1634	} while (len);
1635
1636	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1637}
1638
1639static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1640		struct dma_chan *chan, struct scatterlist *sgl,
1641		unsigned int sg_len, enum dma_transfer_direction direction,
1642		unsigned long flags, void *context)
1643{
1644	struct sdma_channel *sdmac = to_sdma_chan(chan);
1645	struct sdma_engine *sdma = sdmac->sdma;
1646	int i, count;
1647	int channel = sdmac->channel;
1648	struct scatterlist *sg;
1649	struct sdma_desc *desc;
1650
1651	sdma_config_write(chan, &sdmac->slave_config, direction);
1652
1653	desc = sdma_transfer_init(sdmac, direction, sg_len);
1654	if (!desc)
1655		goto err_out;
1656
1657	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1658			sg_len, channel);
1659
1660	for_each_sg(sgl, sg, sg_len, i) {
1661		struct sdma_buffer_descriptor *bd = &desc->bd[i];
1662		int param;
1663
1664		bd->buffer_addr = sg->dma_address;
1665
1666		count = sg_dma_len(sg);
1667
1668		if (count > SDMA_BD_MAX_CNT) {
1669			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1670					channel, count, SDMA_BD_MAX_CNT);
1671			goto err_bd_out;
1672		}
1673
1674		bd->mode.count = count;
1675		desc->chn_count += count;
1676
1677		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1678			goto err_bd_out;
1679
1680		switch (sdmac->word_size) {
1681		case DMA_SLAVE_BUSWIDTH_4_BYTES:
1682			bd->mode.command = 0;
1683			if (count & 3 || sg->dma_address & 3)
1684				goto err_bd_out;
1685			break;
1686		case DMA_SLAVE_BUSWIDTH_3_BYTES:
1687			bd->mode.command = 3;
1688			break;
1689		case DMA_SLAVE_BUSWIDTH_2_BYTES:
1690			bd->mode.command = 2;
1691			if (count & 1 || sg->dma_address & 1)
1692				goto err_bd_out;
1693			break;
1694		case DMA_SLAVE_BUSWIDTH_1_BYTE:
1695			bd->mode.command = 1;
1696			break;
1697		default:
1698			goto err_bd_out;
1699		}
1700
1701		param = BD_DONE | BD_EXTD | BD_CONT;
1702
1703		if (i + 1 == sg_len) {
1704			param |= BD_INTR;
1705			param |= BD_LAST;
1706			param &= ~BD_CONT;
1707		}
1708
1709		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1710				i, count, (u64)sg->dma_address,
1711				param & BD_WRAP ? "wrap" : "",
1712				param & BD_INTR ? " intr" : "");
1713
1714		bd->mode.status = param;
1715	}
1716
1717	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1718err_bd_out:
1719	sdma_free_bd(desc);
1720	kfree(desc);
1721err_out:
1722	sdmac->status = DMA_ERROR;
1723	return NULL;
1724}
1725
1726static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1727		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1728		size_t period_len, enum dma_transfer_direction direction,
1729		unsigned long flags)
1730{
1731	struct sdma_channel *sdmac = to_sdma_chan(chan);
1732	struct sdma_engine *sdma = sdmac->sdma;
1733	int num_periods = 0;
1734	int channel = sdmac->channel;
1735	int i = 0, buf = 0;
1736	struct sdma_desc *desc;
1737
1738	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1739
1740	if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
1741		num_periods = buf_len / period_len;
1742
1743	sdma_config_write(chan, &sdmac->slave_config, direction);
1744
1745	desc = sdma_transfer_init(sdmac, direction, num_periods);
1746	if (!desc)
1747		goto err_out;
1748
1749	desc->period_len = period_len;
1750
1751	sdmac->flags |= IMX_DMA_SG_LOOP;
1752
1753	if (period_len > SDMA_BD_MAX_CNT) {
1754		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1755				channel, period_len, SDMA_BD_MAX_CNT);
1756		goto err_bd_out;
1757	}
1758
1759	if (sdmac->peripheral_type == IMX_DMATYPE_HDMI)
1760		return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1761
1762	while (buf < buf_len) {
1763		struct sdma_buffer_descriptor *bd = &desc->bd[i];
1764		int param;
1765
1766		bd->buffer_addr = dma_addr;
1767
1768		bd->mode.count = period_len;
1769
1770		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1771			goto err_bd_out;
1772		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1773			bd->mode.command = 0;
1774		else
1775			bd->mode.command = sdmac->word_size;
1776
1777		param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1778		if (i + 1 == num_periods)
1779			param |= BD_WRAP;
1780
1781		dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
1782				i, period_len, (u64)dma_addr,
1783				param & BD_WRAP ? "wrap" : "",
1784				param & BD_INTR ? " intr" : "");
1785
1786		bd->mode.status = param;
1787
1788		dma_addr += period_len;
1789		buf += period_len;
1790
1791		i++;
1792	}
1793
1794	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1795err_bd_out:
1796	sdma_free_bd(desc);
1797	kfree(desc);
1798err_out:
1799	sdmac->status = DMA_ERROR;
1800	return NULL;
1801}
1802
1803static int sdma_config_write(struct dma_chan *chan,
1804		       struct dma_slave_config *dmaengine_cfg,
1805		       enum dma_transfer_direction direction)
1806{
1807	struct sdma_channel *sdmac = to_sdma_chan(chan);
1808
1809	if (direction == DMA_DEV_TO_MEM) {
1810		sdmac->per_address = dmaengine_cfg->src_addr;
1811		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1812			dmaengine_cfg->src_addr_width;
1813		sdmac->word_size = dmaengine_cfg->src_addr_width;
1814	} else if (direction == DMA_DEV_TO_DEV) {
1815		sdmac->per_address2 = dmaengine_cfg->src_addr;
1816		sdmac->per_address = dmaengine_cfg->dst_addr;
1817		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1818			SDMA_WATERMARK_LEVEL_LWML;
1819		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1820			SDMA_WATERMARK_LEVEL_HWML;
1821		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1822	} else if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
1823		sdmac->per_address = dmaengine_cfg->dst_addr;
1824		sdmac->per_address2 = dmaengine_cfg->src_addr;
1825		sdmac->watermark_level = 0;
1826	} else {
1827		sdmac->per_address = dmaengine_cfg->dst_addr;
1828		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1829			dmaengine_cfg->dst_addr_width;
1830		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1831	}
1832	sdmac->direction = direction;
1833	return sdma_config_channel(chan);
1834}
1835
1836static int sdma_config(struct dma_chan *chan,
1837		       struct dma_slave_config *dmaengine_cfg)
1838{
1839	struct sdma_channel *sdmac = to_sdma_chan(chan);
1840	struct sdma_engine *sdma = sdmac->sdma;
1841
1842	memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1843
1844	if (dmaengine_cfg->peripheral_config) {
1845		struct sdma_peripheral_config *sdmacfg = dmaengine_cfg->peripheral_config;
1846		if (dmaengine_cfg->peripheral_size != sizeof(struct sdma_peripheral_config)) {
1847			dev_err(sdma->dev, "Invalid peripheral size %zu, expected %zu\n",
1848				dmaengine_cfg->peripheral_size,
1849				sizeof(struct sdma_peripheral_config));
1850			return -EINVAL;
1851		}
1852		sdmac->n_fifos_src = sdmacfg->n_fifos_src;
1853		sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
1854		sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
1855		sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
1856		sdmac->words_per_fifo = sdmacfg->words_per_fifo;
1857		sdmac->sw_done = sdmacfg->sw_done;
1858	}
1859
1860	/* Set ENBLn earlier to make sure dma request triggered after that */
1861	if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1862		return -EINVAL;
1863	sdma_event_enable(sdmac, sdmac->event_id0);
1864
1865	if (sdmac->event_id1) {
1866		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1867			return -EINVAL;
1868		sdma_event_enable(sdmac, sdmac->event_id1);
1869	}
1870
1871	return 0;
1872}
1873
1874static enum dma_status sdma_tx_status(struct dma_chan *chan,
1875				      dma_cookie_t cookie,
1876				      struct dma_tx_state *txstate)
1877{
1878	struct sdma_channel *sdmac = to_sdma_chan(chan);
1879	struct sdma_desc *desc = NULL;
1880	u32 residue;
1881	struct virt_dma_desc *vd;
1882	enum dma_status ret;
1883	unsigned long flags;
1884
1885	ret = dma_cookie_status(chan, cookie, txstate);
1886	if (ret == DMA_COMPLETE || !txstate)
1887		return ret;
1888
1889	spin_lock_irqsave(&sdmac->vc.lock, flags);
1890
1891	vd = vchan_find_desc(&sdmac->vc, cookie);
1892	if (vd)
1893		desc = to_sdma_desc(&vd->tx);
1894	else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
1895		desc = sdmac->desc;
1896
1897	if (desc) {
1898		if (sdmac->flags & IMX_DMA_SG_LOOP)
1899			residue = (desc->num_bd - desc->buf_ptail) *
1900				desc->period_len - desc->chn_real_count;
1901		else
1902			residue = desc->chn_count - desc->chn_real_count;
1903	} else {
1904		residue = 0;
1905	}
1906
1907	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1908
1909	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1910			 residue);
1911
1912	return sdmac->status;
1913}
1914
1915static void sdma_issue_pending(struct dma_chan *chan)
1916{
1917	struct sdma_channel *sdmac = to_sdma_chan(chan);
1918	unsigned long flags;
1919
1920	spin_lock_irqsave(&sdmac->vc.lock, flags);
1921	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1922		sdma_start_desc(sdmac);
1923	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1924}
1925
1926#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	\
1927(offsetof(struct sdma_script_start_addrs, v1_end) / sizeof(s32))
1928
1929#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 \
1930(offsetof(struct sdma_script_start_addrs, v2_end) / sizeof(s32))
1931
1932#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 \
1933(offsetof(struct sdma_script_start_addrs, v3_end) / sizeof(s32))
1934
1935#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 \
1936(offsetof(struct sdma_script_start_addrs, v4_end) / sizeof(s32))
1937
1938static void sdma_add_scripts(struct sdma_engine *sdma,
1939			     const struct sdma_script_start_addrs *addr)
1940{
1941	s32 *addr_arr = (u32 *)addr;
1942	s32 *saddr_arr = (u32 *)sdma->script_addrs;
1943	int i;
1944
1945	/* use the default firmware in ROM if missing external firmware */
1946	if (!sdma->script_number)
1947		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1948
1949	if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
1950				  / sizeof(s32)) {
1951		dev_err(sdma->dev,
1952			"SDMA script number %d not match with firmware.\n",
1953			sdma->script_number);
1954		return;
1955	}
1956
1957	for (i = 0; i < sdma->script_number; i++)
1958		if (addr_arr[i] > 0)
1959			saddr_arr[i] = addr_arr[i];
1960
1961	/*
1962	 * For compatibility with NXP internal legacy kernel before 4.19 which
1963	 * is based on uart ram script and mainline kernel based on uart rom
1964	 * script, both uart ram/rom scripts are present in newer sdma
1965	 * firmware. Use the rom versions if they are present (V3 or newer).
1966	 */
1967	if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
1968		if (addr->uart_2_mcu_rom_addr)
1969			sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
1970		if (addr->uartsh_2_mcu_rom_addr)
1971			sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
1972	}
1973}
1974
1975static void sdma_load_firmware(const struct firmware *fw, void *context)
1976{
1977	struct sdma_engine *sdma = context;
1978	const struct sdma_firmware_header *header;
1979	const struct sdma_script_start_addrs *addr;
1980	unsigned short *ram_code;
1981
1982	if (!fw) {
1983		dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1984		/* In this case we just use the ROM firmware. */
1985		return;
1986	}
1987
1988	if (fw->size < sizeof(*header))
1989		goto err_firmware;
1990
1991	header = (struct sdma_firmware_header *)fw->data;
1992
1993	if (header->magic != SDMA_FIRMWARE_MAGIC)
1994		goto err_firmware;
1995	if (header->ram_code_start + header->ram_code_size > fw->size)
1996		goto err_firmware;
1997	switch (header->version_major) {
1998	case 1:
1999		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
2000		break;
2001	case 2:
2002		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
2003		break;
2004	case 3:
2005		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
2006		break;
2007	case 4:
2008		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
2009		break;
2010	default:
2011		dev_err(sdma->dev, "unknown firmware version\n");
2012		goto err_firmware;
2013	}
2014
2015	addr = (void *)header + header->script_addrs_start;
2016	ram_code = (void *)header + header->ram_code_start;
2017
2018	clk_enable(sdma->clk_ipg);
2019	clk_enable(sdma->clk_ahb);
2020	/* download the RAM image for SDMA */
2021	sdma_load_script(sdma, ram_code,
2022			 header->ram_code_size,
2023			 addr->ram_code_start_addr);
2024	clk_disable(sdma->clk_ipg);
2025	clk_disable(sdma->clk_ahb);
2026
2027	sdma_add_scripts(sdma, addr);
2028
2029	sdma->fw_loaded = true;
2030
2031	dev_info(sdma->dev, "loaded firmware %d.%d\n",
2032		 header->version_major,
2033		 header->version_minor);
2034
2035err_firmware:
2036	release_firmware(fw);
2037}
2038
2039#define EVENT_REMAP_CELLS 3
2040
2041static int sdma_event_remap(struct sdma_engine *sdma)
2042{
2043	struct device_node *np = sdma->dev->of_node;
2044	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
2045	struct property *event_remap;
2046	struct regmap *gpr;
2047	char propname[] = "fsl,sdma-event-remap";
2048	u32 reg, val, shift, num_map, i;
2049	int ret = 0;
2050
2051	if (IS_ERR(np) || !gpr_np)
2052		goto out;
2053
2054	event_remap = of_find_property(np, propname, NULL);
2055	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
2056	if (!num_map) {
2057		dev_dbg(sdma->dev, "no event needs to be remapped\n");
2058		goto out;
2059	} else if (num_map % EVENT_REMAP_CELLS) {
2060		dev_err(sdma->dev, "the property %s must modulo %d\n",
2061				propname, EVENT_REMAP_CELLS);
2062		ret = -EINVAL;
2063		goto out;
2064	}
2065
2066	gpr = syscon_node_to_regmap(gpr_np);
2067	if (IS_ERR(gpr)) {
2068		dev_err(sdma->dev, "failed to get gpr regmap\n");
2069		ret = PTR_ERR(gpr);
2070		goto out;
2071	}
2072
2073	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
2074		ret = of_property_read_u32_index(np, propname, i, &reg);
2075		if (ret) {
2076			dev_err(sdma->dev, "failed to read property %s index %d\n",
2077					propname, i);
2078			goto out;
2079		}
2080
2081		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
2082		if (ret) {
2083			dev_err(sdma->dev, "failed to read property %s index %d\n",
2084					propname, i + 1);
2085			goto out;
2086		}
2087
2088		ret = of_property_read_u32_index(np, propname, i + 2, &val);
2089		if (ret) {
2090			dev_err(sdma->dev, "failed to read property %s index %d\n",
2091					propname, i + 2);
2092			goto out;
2093		}
2094
2095		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
2096	}
2097
2098out:
2099	if (gpr_np)
2100		of_node_put(gpr_np);
2101
2102	return ret;
2103}
2104
2105static int sdma_get_firmware(struct sdma_engine *sdma,
2106		const char *fw_name)
2107{
2108	int ret;
2109
2110	ret = firmware_request_nowait_nowarn(THIS_MODULE, fw_name, sdma->dev,
2111					GFP_KERNEL, sdma, sdma_load_firmware);
 
2112
2113	return ret;
2114}
2115
2116static int sdma_init(struct sdma_engine *sdma)
2117{
2118	int i, ret;
2119	dma_addr_t ccb_phys;
2120	int ccbsize;
2121
2122	ret = clk_enable(sdma->clk_ipg);
2123	if (ret)
2124		return ret;
2125	ret = clk_enable(sdma->clk_ahb);
2126	if (ret)
2127		goto disable_clk_ipg;
2128
2129	if (sdma->drvdata->check_ratio &&
2130	    (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
2131		sdma->clk_ratio = 1;
2132
2133	/* Be sure SDMA has not started yet */
2134	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
2135
2136	ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control)
2137		  + sizeof(struct sdma_context_data));
2138
2139	if (sdma->iram_pool)
2140		sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys);
2141	else
2142		sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize, &ccb_phys,
2143							   GFP_KERNEL);
2144
2145	if (!sdma->channel_control) {
2146		ret = -ENOMEM;
2147		goto err_dma_alloc;
2148	}
2149
2150	sdma->context = (void *)sdma->channel_control +
2151		MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
2152	sdma->context_phys = ccb_phys +
2153		MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
2154
2155	/* disable all channels */
2156	for (i = 0; i < sdma->drvdata->num_events; i++)
2157		writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
2158
2159	/* All channels have priority 0 */
2160	for (i = 0; i < MAX_DMA_CHANNELS; i++)
2161		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
2162
2163	ret = sdma_request_channel0(sdma);
2164	if (ret)
2165		goto err_dma_alloc;
2166
2167	sdma_config_ownership(&sdma->channel[0], false, true, false);
2168
2169	/* Set Command Channel (Channel Zero) */
2170	writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
2171
2172	/* Set bits of CONFIG register but with static context switching */
2173	if (sdma->clk_ratio)
2174		writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
2175	else
2176		writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
2177
2178	writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
2179
2180	/* Initializes channel's priorities */
2181	sdma_set_channel_priority(&sdma->channel[0], 7);
2182
2183	clk_disable(sdma->clk_ipg);
2184	clk_disable(sdma->clk_ahb);
2185
2186	return 0;
2187
2188err_dma_alloc:
2189	clk_disable(sdma->clk_ahb);
2190disable_clk_ipg:
2191	clk_disable(sdma->clk_ipg);
2192	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
2193	return ret;
2194}
2195
2196static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
2197{
2198	struct sdma_channel *sdmac = to_sdma_chan(chan);
2199	struct imx_dma_data *data = fn_param;
2200
2201	if (!imx_dma_is_general_purpose(chan))
2202		return false;
2203
2204	sdmac->data = *data;
2205	chan->private = &sdmac->data;
2206
2207	return true;
2208}
2209
2210static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
2211				   struct of_dma *ofdma)
2212{
2213	struct sdma_engine *sdma = ofdma->of_dma_data;
2214	dma_cap_mask_t mask = sdma->dma_device.cap_mask;
2215	struct imx_dma_data data;
2216
2217	if (dma_spec->args_count != 3)
2218		return NULL;
2219
2220	data.dma_request = dma_spec->args[0];
2221	data.peripheral_type = dma_spec->args[1];
2222	data.priority = dma_spec->args[2];
2223	/*
2224	 * init dma_request2 to zero, which is not used by the dts.
2225	 * For P2P, dma_request2 is init from dma_request_channel(),
2226	 * chan->private will point to the imx_dma_data, and in
2227	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
2228	 * be set to sdmac->event_id1.
2229	 */
2230	data.dma_request2 = 0;
2231
2232	return __dma_request_channel(&mask, sdma_filter_fn, &data,
2233				     ofdma->of_node);
2234}
2235
2236static int sdma_probe(struct platform_device *pdev)
2237{
2238	struct device_node *np = pdev->dev.of_node;
2239	struct device_node *spba_bus;
2240	const char *fw_name;
2241	int ret;
2242	int irq;
2243	struct resource spba_res;
2244	int i;
2245	struct sdma_engine *sdma;
2246	s32 *saddr_arr;
2247
2248	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2249	if (ret)
2250		return ret;
2251
2252	sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
2253	if (!sdma)
2254		return -ENOMEM;
2255
2256	spin_lock_init(&sdma->channel_0_lock);
2257
2258	sdma->dev = &pdev->dev;
2259	sdma->drvdata = of_device_get_match_data(sdma->dev);
2260
2261	irq = platform_get_irq(pdev, 0);
2262	if (irq < 0)
2263		return irq;
2264
2265	sdma->regs = devm_platform_ioremap_resource(pdev, 0);
2266	if (IS_ERR(sdma->regs))
2267		return PTR_ERR(sdma->regs);
2268
2269	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2270	if (IS_ERR(sdma->clk_ipg))
2271		return PTR_ERR(sdma->clk_ipg);
2272
2273	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2274	if (IS_ERR(sdma->clk_ahb))
2275		return PTR_ERR(sdma->clk_ahb);
2276
2277	ret = clk_prepare(sdma->clk_ipg);
2278	if (ret)
2279		return ret;
2280
2281	ret = clk_prepare(sdma->clk_ahb);
2282	if (ret)
2283		goto err_clk;
2284
2285	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
2286				dev_name(&pdev->dev), sdma);
2287	if (ret)
2288		goto err_irq;
2289
2290	sdma->irq = irq;
2291
2292	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
2293	if (!sdma->script_addrs) {
2294		ret = -ENOMEM;
2295		goto err_irq;
2296	}
2297
2298	/* initially no scripts available */
2299	saddr_arr = (s32 *)sdma->script_addrs;
2300	for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++)
2301		saddr_arr[i] = -EINVAL;
2302
2303	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
2304	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
2305	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
2306	dma_cap_set(DMA_PRIVATE, sdma->dma_device.cap_mask);
2307
2308	INIT_LIST_HEAD(&sdma->dma_device.channels);
2309	/* Initialize channel parameters */
2310	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2311		struct sdma_channel *sdmac = &sdma->channel[i];
2312
2313		sdmac->sdma = sdma;
2314
2315		sdmac->channel = i;
2316		sdmac->vc.desc_free = sdma_desc_free;
2317		INIT_LIST_HEAD(&sdmac->terminated);
2318		INIT_WORK(&sdmac->terminate_worker,
2319				sdma_channel_terminate_work);
2320		/*
2321		 * Add the channel to the DMAC list. Do not add channel 0 though
2322		 * because we need it internally in the SDMA driver. This also means
2323		 * that channel 0 in dmaengine counting matches sdma channel 1.
2324		 */
2325		if (i)
2326			vchan_init(&sdmac->vc, &sdma->dma_device);
2327	}
2328
2329	if (np) {
2330		sdma->iram_pool = of_gen_pool_get(np, "iram", 0);
2331		if (sdma->iram_pool)
2332			dev_info(&pdev->dev, "alloc bd from iram.\n");
2333	}
2334
2335	ret = sdma_init(sdma);
2336	if (ret)
2337		goto err_init;
2338
2339	ret = sdma_event_remap(sdma);
2340	if (ret)
2341		goto err_init;
2342
2343	if (sdma->drvdata->script_addrs)
2344		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
2345
2346	sdma->dma_device.dev = &pdev->dev;
2347
2348	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2349	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
2350	sdma->dma_device.device_tx_status = sdma_tx_status;
2351	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
2352	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
2353	sdma->dma_device.device_config = sdma_config;
2354	sdma->dma_device.device_terminate_all = sdma_terminate_all;
2355	sdma->dma_device.device_synchronize = sdma_channel_synchronize;
2356	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
2357	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
2358	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
2359	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2360	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
2361	sdma->dma_device.device_issue_pending = sdma_issue_pending;
2362	sdma->dma_device.copy_align = 2;
2363	dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
2364
2365	platform_set_drvdata(pdev, sdma);
2366
2367	ret = dma_async_device_register(&sdma->dma_device);
2368	if (ret) {
2369		dev_err(&pdev->dev, "unable to register\n");
2370		goto err_init;
2371	}
2372
2373	if (np) {
2374		ret = of_dma_controller_register(np, sdma_xlate, sdma);
2375		if (ret) {
2376			dev_err(&pdev->dev, "failed to register controller\n");
2377			goto err_register;
2378		}
2379
2380		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
2381		ret = of_address_to_resource(spba_bus, 0, &spba_res);
2382		if (!ret) {
2383			sdma->spba_start_addr = spba_res.start;
2384			sdma->spba_end_addr = spba_res.end;
2385		}
2386		of_node_put(spba_bus);
2387	}
2388
2389	/*
2390	 * Because that device tree does not encode ROM script address,
2391	 * the RAM script in firmware is mandatory for device tree
2392	 * probe, otherwise it fails.
2393	 */
2394	ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2395				      &fw_name);
2396	if (ret) {
2397		dev_warn(&pdev->dev, "failed to get firmware name\n");
2398	} else {
2399		ret = sdma_get_firmware(sdma, fw_name);
2400		if (ret)
2401			dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2402	}
2403
2404	return 0;
2405
2406err_register:
2407	dma_async_device_unregister(&sdma->dma_device);
2408err_init:
2409	kfree(sdma->script_addrs);
2410err_irq:
2411	clk_unprepare(sdma->clk_ahb);
2412err_clk:
2413	clk_unprepare(sdma->clk_ipg);
2414	return ret;
2415}
2416
2417static void sdma_remove(struct platform_device *pdev)
2418{
2419	struct sdma_engine *sdma = platform_get_drvdata(pdev);
2420	int i;
2421
2422	devm_free_irq(&pdev->dev, sdma->irq, sdma);
2423	dma_async_device_unregister(&sdma->dma_device);
2424	kfree(sdma->script_addrs);
2425	clk_unprepare(sdma->clk_ahb);
2426	clk_unprepare(sdma->clk_ipg);
2427	/* Kill the tasklet */
2428	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2429		struct sdma_channel *sdmac = &sdma->channel[i];
2430
2431		tasklet_kill(&sdmac->vc.task);
2432		sdma_free_chan_resources(&sdmac->vc.chan);
2433	}
2434
2435	platform_set_drvdata(pdev, NULL);
2436}
2437
2438static struct platform_driver sdma_driver = {
2439	.driver		= {
2440		.name	= "imx-sdma",
2441		.of_match_table = sdma_dt_ids,
2442	},
2443	.remove		= sdma_remove,
2444	.probe		= sdma_probe,
2445};
2446
2447module_platform_driver(sdma_driver);
2448
2449MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2450MODULE_DESCRIPTION("i.MX SDMA driver");
2451#if IS_ENABLED(CONFIG_SOC_IMX6Q)
2452MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2453#endif
2454#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
2455MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2456#endif
2457MODULE_LICENSE("GPL");