Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0+
   2//
   3// drivers/dma/imx-sdma.c
   4//
   5// This file contains a driver for the Freescale Smart DMA engine
   6//
   7// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   8//
   9// Based on code from Freescale:
  10//
  11// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
 
 
 
 
 
 
 
  12
  13#include <linux/init.h>
  14#include <linux/iopoll.h>
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/bitops.h>
  18#include <linux/mm.h>
  19#include <linux/interrupt.h>
  20#include <linux/clk.h>
  21#include <linux/delay.h>
  22#include <linux/sched.h>
  23#include <linux/semaphore.h>
  24#include <linux/spinlock.h>
  25#include <linux/device.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/firmware.h>
  28#include <linux/slab.h>
  29#include <linux/platform_device.h>
  30#include <linux/dmaengine.h>
  31#include <linux/of.h>
  32#include <linux/of_address.h>
  33#include <linux/of_device.h>
  34#include <linux/of_dma.h>
  35#include <linux/workqueue.h>
  36
  37#include <asm/irq.h>
  38#include <linux/platform_data/dma-imx-sdma.h>
  39#include <linux/platform_data/dma-imx.h>
  40#include <linux/regmap.h>
  41#include <linux/mfd/syscon.h>
  42#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  43
  44#include "dmaengine.h"
  45#include "virt-dma.h"
  46
  47/* SDMA registers */
  48#define SDMA_H_C0PTR		0x000
  49#define SDMA_H_INTR		0x004
  50#define SDMA_H_STATSTOP		0x008
  51#define SDMA_H_START		0x00c
  52#define SDMA_H_EVTOVR		0x010
  53#define SDMA_H_DSPOVR		0x014
  54#define SDMA_H_HOSTOVR		0x018
  55#define SDMA_H_EVTPEND		0x01c
  56#define SDMA_H_DSPENBL		0x020
  57#define SDMA_H_RESET		0x024
  58#define SDMA_H_EVTERR		0x028
  59#define SDMA_H_INTRMSK		0x02c
  60#define SDMA_H_PSW		0x030
  61#define SDMA_H_EVTERRDBG	0x034
  62#define SDMA_H_CONFIG		0x038
  63#define SDMA_ONCE_ENB		0x040
  64#define SDMA_ONCE_DATA		0x044
  65#define SDMA_ONCE_INSTR		0x048
  66#define SDMA_ONCE_STAT		0x04c
  67#define SDMA_ONCE_CMD		0x050
  68#define SDMA_EVT_MIRROR		0x054
  69#define SDMA_ILLINSTADDR	0x058
  70#define SDMA_CHN0ADDR		0x05c
  71#define SDMA_ONCE_RTB		0x060
  72#define SDMA_XTRIG_CONF1	0x070
  73#define SDMA_XTRIG_CONF2	0x074
  74#define SDMA_CHNENBL0_IMX35	0x200
  75#define SDMA_CHNENBL0_IMX31	0x080
  76#define SDMA_CHNPRI_0		0x100
  77
  78/*
  79 * Buffer descriptor status values.
  80 */
  81#define BD_DONE  0x01
  82#define BD_WRAP  0x02
  83#define BD_CONT  0x04
  84#define BD_INTR  0x08
  85#define BD_RROR  0x10
  86#define BD_LAST  0x20
  87#define BD_EXTD  0x80
  88
  89/*
  90 * Data Node descriptor status values.
  91 */
  92#define DND_END_OF_FRAME  0x80
  93#define DND_END_OF_XFER   0x40
  94#define DND_DONE          0x20
  95#define DND_UNUSED        0x01
  96
  97/*
  98 * IPCV2 descriptor status values.
  99 */
 100#define BD_IPCV2_END_OF_FRAME  0x40
 101
 102#define IPCV2_MAX_NODES        50
 103/*
 104 * Error bit set in the CCB status field by the SDMA,
 105 * in setbd routine, in case of a transfer error
 106 */
 107#define DATA_ERROR  0x10000000
 108
 109/*
 110 * Buffer descriptor commands.
 111 */
 112#define C0_ADDR             0x01
 113#define C0_LOAD             0x02
 114#define C0_DUMP             0x03
 115#define C0_SETCTX           0x07
 116#define C0_GETCTX           0x03
 117#define C0_SETDM            0x01
 118#define C0_SETPM            0x04
 119#define C0_GETDM            0x02
 120#define C0_GETPM            0x08
 121/*
 122 * Change endianness indicator in the BD command field
 123 */
 124#define CHANGE_ENDIANNESS   0x80
 125
 126/*
 127 *  p_2_p watermark_level description
 128 *	Bits		Name			Description
 129 *	0-7		Lower WML		Lower watermark level
 130 *	8		PS			1: Pad Swallowing
 131 *						0: No Pad Swallowing
 132 *	9		PA			1: Pad Adding
 133 *						0: No Pad Adding
 134 *	10		SPDIF			If this bit is set both source
 135 *						and destination are on SPBA
 136 *	11		Source Bit(SP)		1: Source on SPBA
 137 *						0: Source on AIPS
 138 *	12		Destination Bit(DP)	1: Destination on SPBA
 139 *						0: Destination on AIPS
 140 *	13-15		---------		MUST BE 0
 141 *	16-23		Higher WML		HWML
 142 *	24-27		N			Total number of samples after
 143 *						which Pad adding/Swallowing
 144 *						must be done. It must be odd.
 145 *	28		Lower WML Event(LWE)	SDMA events reg to check for
 146 *						LWML event mask
 147 *						0: LWE in EVENTS register
 148 *						1: LWE in EVENTS2 register
 149 *	29		Higher WML Event(HWE)	SDMA events reg to check for
 150 *						HWML event mask
 151 *						0: HWE in EVENTS register
 152 *						1: HWE in EVENTS2 register
 153 *	30		---------		MUST BE 0
 154 *	31		CONT			1: Amount of samples to be
 155 *						transferred is unknown and
 156 *						script will keep on
 157 *						transferring samples as long as
 158 *						both events are detected and
 159 *						script must be manually stopped
 160 *						by the application
 161 *						0: The amount of samples to be
 162 *						transferred is equal to the
 163 *						count field of mode word
 164 */
 165#define SDMA_WATERMARK_LEVEL_LWML	0xFF
 166#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
 167#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
 168#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
 169#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
 170#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
 171#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
 172#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
 173#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
 174#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
 175
 176#define SDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
 177				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 178				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
 179
 180#define SDMA_DMA_DIRECTIONS	(BIT(DMA_DEV_TO_MEM) | \
 181				 BIT(DMA_MEM_TO_DEV) | \
 182				 BIT(DMA_DEV_TO_DEV))
 183
 184/*
 185 * Mode/Count of data node descriptors - IPCv2
 186 */
 187struct sdma_mode_count {
 188#define SDMA_BD_MAX_CNT	0xffff
 189	u32 count   : 16; /* size of the buffer pointed by this BD */
 190	u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
 191	u32 command :  8; /* command mostly used for channel 0 */
 192};
 193
 194/*
 195 * Buffer descriptor
 196 */
 197struct sdma_buffer_descriptor {
 198	struct sdma_mode_count  mode;
 199	u32 buffer_addr;	/* address of the buffer described */
 200	u32 ext_buffer_addr;	/* extended buffer address */
 201} __attribute__ ((packed));
 202
 203/**
 204 * struct sdma_channel_control - Channel control Block
 205 *
 206 * @current_bd_ptr:	current buffer descriptor processed
 207 * @base_bd_ptr:	first element of buffer descriptor array
 208 * @unused:		padding. The SDMA engine expects an array of 128 byte
 209 *			control blocks
 210 */
 211struct sdma_channel_control {
 212	u32 current_bd_ptr;
 213	u32 base_bd_ptr;
 214	u32 unused[2];
 215} __attribute__ ((packed));
 216
 217/**
 218 * struct sdma_state_registers - SDMA context for a channel
 219 *
 220 * @pc:		program counter
 221 * @unused1:	unused
 222 * @t:		test bit: status of arithmetic & test instruction
 223 * @rpc:	return program counter
 224 * @unused0:	unused
 225 * @sf:		source fault while loading data
 226 * @spc:	loop start program counter
 227 * @unused2:	unused
 228 * @df:		destination fault while storing data
 229 * @epc:	loop end program counter
 230 * @lm:		loop mode
 231 */
 232struct sdma_state_registers {
 233	u32 pc     :14;
 234	u32 unused1: 1;
 235	u32 t      : 1;
 236	u32 rpc    :14;
 237	u32 unused0: 1;
 238	u32 sf     : 1;
 239	u32 spc    :14;
 240	u32 unused2: 1;
 241	u32 df     : 1;
 242	u32 epc    :14;
 243	u32 lm     : 2;
 244} __attribute__ ((packed));
 245
 246/**
 247 * struct sdma_context_data - sdma context specific to a channel
 248 *
 249 * @channel_state:	channel state bits
 250 * @gReg:		general registers
 251 * @mda:		burst dma destination address register
 252 * @msa:		burst dma source address register
 253 * @ms:			burst dma status register
 254 * @md:			burst dma data register
 255 * @pda:		peripheral dma destination address register
 256 * @psa:		peripheral dma source address register
 257 * @ps:			peripheral dma status register
 258 * @pd:			peripheral dma data register
 259 * @ca:			CRC polynomial register
 260 * @cs:			CRC accumulator register
 261 * @dda:		dedicated core destination address register
 262 * @dsa:		dedicated core source address register
 263 * @ds:			dedicated core status register
 264 * @dd:			dedicated core data register
 265 * @scratch0:		1st word of dedicated ram for context switch
 266 * @scratch1:		2nd word of dedicated ram for context switch
 267 * @scratch2:		3rd word of dedicated ram for context switch
 268 * @scratch3:		4th word of dedicated ram for context switch
 269 * @scratch4:		5th word of dedicated ram for context switch
 270 * @scratch5:		6th word of dedicated ram for context switch
 271 * @scratch6:		7th word of dedicated ram for context switch
 272 * @scratch7:		8th word of dedicated ram for context switch
 273 */
 274struct sdma_context_data {
 275	struct sdma_state_registers  channel_state;
 276	u32  gReg[8];
 277	u32  mda;
 278	u32  msa;
 279	u32  ms;
 280	u32  md;
 281	u32  pda;
 282	u32  psa;
 283	u32  ps;
 284	u32  pd;
 285	u32  ca;
 286	u32  cs;
 287	u32  dda;
 288	u32  dsa;
 289	u32  ds;
 290	u32  dd;
 291	u32  scratch0;
 292	u32  scratch1;
 293	u32  scratch2;
 294	u32  scratch3;
 295	u32  scratch4;
 296	u32  scratch5;
 297	u32  scratch6;
 298	u32  scratch7;
 299} __attribute__ ((packed));
 300
 
 301
 302struct sdma_engine;
 303
 304/**
 305 * struct sdma_desc - descriptor structor for one transfer
 306 * @vd:			descriptor for virt dma
 307 * @num_bd:		number of descriptors currently handling
 308 * @bd_phys:		physical address of bd
 309 * @buf_tail:		ID of the buffer that was processed
 310 * @buf_ptail:		ID of the previous buffer that was processed
 311 * @period_len:		period length, used in cyclic.
 312 * @chn_real_count:	the real count updated from bd->mode.count
 313 * @chn_count:		the transfer count set
 314 * @sdmac:		sdma_channel pointer
 315 * @bd:			pointer of allocate bd
 316 */
 317struct sdma_desc {
 318	struct virt_dma_desc	vd;
 319	unsigned int		num_bd;
 320	dma_addr_t		bd_phys;
 321	unsigned int		buf_tail;
 322	unsigned int		buf_ptail;
 323	unsigned int		period_len;
 324	unsigned int		chn_real_count;
 325	unsigned int		chn_count;
 326	struct sdma_channel	*sdmac;
 327	struct sdma_buffer_descriptor *bd;
 328};
 329
 330/**
 331 * struct sdma_channel - housekeeping for a SDMA channel
 332 *
 333 * @vc:			virt_dma base structure
 334 * @desc:		sdma description including vd and other special member
 335 * @sdma:		pointer to the SDMA engine for this channel
 336 * @channel:		the channel number, matches dmaengine chan_id + 1
 337 * @direction:		transfer type. Needed for setting SDMA script
 338 * @slave_config	Slave configuration
 339 * @peripheral_type:	Peripheral type. Needed for setting SDMA script
 340 * @event_id0:		aka dma request line
 341 * @event_id1:		for channels that use 2 events
 342 * @word_size:		peripheral access size
 343 * @pc_from_device:	script address for those device_2_memory
 344 * @pc_to_device:	script address for those memory_2_device
 345 * @device_to_device:	script address for those device_2_device
 346 * @pc_to_pc:		script address for those memory_2_memory
 347 * @flags:		loop mode or not
 348 * @per_address:	peripheral source or destination address in common case
 349 *                      destination address in p_2_p case
 350 * @per_address2:	peripheral source address in p_2_p case
 351 * @event_mask:		event mask used in p_2_p script
 352 * @watermark_level:	value for gReg[7], some script will extend it from
 353 *			basic watermark such as p_2_p
 354 * @shp_addr:		value for gReg[6]
 355 * @per_addr:		value for gReg[2]
 356 * @status:		status of dma channel
 357 * @data:		specific sdma interface structure
 358 * @bd_pool:		dma_pool for bd
 359 */
 360struct sdma_channel {
 361	struct virt_dma_chan		vc;
 362	struct sdma_desc		*desc;
 363	struct sdma_engine		*sdma;
 364	unsigned int			channel;
 365	enum dma_transfer_direction		direction;
 366	struct dma_slave_config		slave_config;
 367	enum sdma_peripheral_type	peripheral_type;
 368	unsigned int			event_id0;
 369	unsigned int			event_id1;
 370	enum dma_slave_buswidth		word_size;
 
 
 
 
 
 
 371	unsigned int			pc_from_device, pc_to_device;
 372	unsigned int			device_to_device;
 373	unsigned int                    pc_to_pc;
 374	unsigned long			flags;
 375	dma_addr_t			per_address, per_address2;
 376	unsigned long			event_mask[2];
 377	unsigned long			watermark_level;
 378	u32				shp_addr, per_addr;
 
 
 
 379	enum dma_status			status;
 380	bool				context_loaded;
 
 
 381	struct imx_dma_data		data;
 382	struct work_struct		terminate_worker;
 383};
 384
 385#define IMX_DMA_SG_LOOP		BIT(0)
 386
 387#define MAX_DMA_CHANNELS 32
 388#define MXC_SDMA_DEFAULT_PRIORITY 1
 389#define MXC_SDMA_MIN_PRIORITY 1
 390#define MXC_SDMA_MAX_PRIORITY 7
 391
 392#define SDMA_FIRMWARE_MAGIC 0x414d4453
 393
 394/**
 395 * struct sdma_firmware_header - Layout of the firmware image
 396 *
 397 * @magic:		"SDMA"
 398 * @version_major:	increased whenever layout of struct
 399 *			sdma_script_start_addrs changes.
 400 * @version_minor:	firmware minor version (for binary compatible changes)
 401 * @script_addrs_start:	offset of struct sdma_script_start_addrs in this image
 402 * @num_script_addrs:	Number of script addresses in this image
 403 * @ram_code_start:	offset of SDMA ram image in this firmware image
 404 * @ram_code_size:	size of SDMA ram image
 405 * @script_addrs:	Stores the start address of the SDMA scripts
 406 *			(in SDMA memory space)
 407 */
 408struct sdma_firmware_header {
 409	u32	magic;
 410	u32	version_major;
 411	u32	version_minor;
 412	u32	script_addrs_start;
 413	u32	num_script_addrs;
 414	u32	ram_code_start;
 415	u32	ram_code_size;
 416};
 417
 418struct sdma_driver_data {
 419	int chnenbl0;
 420	int num_events;
 421	struct sdma_script_start_addrs	*script_addrs;
 422	bool check_ratio;
 423};
 424
 425struct sdma_engine {
 426	struct device			*dev;
 427	struct device_dma_parameters	dma_parms;
 428	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 429	struct sdma_channel_control	*channel_control;
 430	void __iomem			*regs;
 431	struct sdma_context_data	*context;
 432	dma_addr_t			context_phys;
 433	struct dma_device		dma_device;
 434	struct clk			*clk_ipg;
 435	struct clk			*clk_ahb;
 436	spinlock_t			channel_0_lock;
 437	u32				script_number;
 438	struct sdma_script_start_addrs	*script_addrs;
 439	const struct sdma_driver_data	*drvdata;
 440	u32				spba_start_addr;
 441	u32				spba_end_addr;
 442	unsigned int			irq;
 443	dma_addr_t			bd0_phys;
 444	struct sdma_buffer_descriptor	*bd0;
 445	/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
 446	bool				clk_ratio;
 447};
 448
 449static int sdma_config_write(struct dma_chan *chan,
 450		       struct dma_slave_config *dmaengine_cfg,
 451		       enum dma_transfer_direction direction);
 452
 453static struct sdma_driver_data sdma_imx31 = {
 454	.chnenbl0 = SDMA_CHNENBL0_IMX31,
 455	.num_events = 32,
 456};
 457
 458static struct sdma_script_start_addrs sdma_script_imx25 = {
 459	.ap_2_ap_addr = 729,
 460	.uart_2_mcu_addr = 904,
 461	.per_2_app_addr = 1255,
 462	.mcu_2_app_addr = 834,
 463	.uartsh_2_mcu_addr = 1120,
 464	.per_2_shp_addr = 1329,
 465	.mcu_2_shp_addr = 1048,
 466	.ata_2_mcu_addr = 1560,
 467	.mcu_2_ata_addr = 1479,
 468	.app_2_per_addr = 1189,
 469	.app_2_mcu_addr = 770,
 470	.shp_2_per_addr = 1407,
 471	.shp_2_mcu_addr = 979,
 472};
 473
 474static struct sdma_driver_data sdma_imx25 = {
 475	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 476	.num_events = 48,
 477	.script_addrs = &sdma_script_imx25,
 478};
 479
 480static struct sdma_driver_data sdma_imx35 = {
 481	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 482	.num_events = 48,
 483};
 484
 485static struct sdma_script_start_addrs sdma_script_imx51 = {
 486	.ap_2_ap_addr = 642,
 487	.uart_2_mcu_addr = 817,
 488	.mcu_2_app_addr = 747,
 489	.mcu_2_shp_addr = 961,
 490	.ata_2_mcu_addr = 1473,
 491	.mcu_2_ata_addr = 1392,
 492	.app_2_per_addr = 1033,
 493	.app_2_mcu_addr = 683,
 494	.shp_2_per_addr = 1251,
 495	.shp_2_mcu_addr = 892,
 496};
 497
 498static struct sdma_driver_data sdma_imx51 = {
 499	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 500	.num_events = 48,
 501	.script_addrs = &sdma_script_imx51,
 502};
 503
 504static struct sdma_script_start_addrs sdma_script_imx53 = {
 505	.ap_2_ap_addr = 642,
 506	.app_2_mcu_addr = 683,
 507	.mcu_2_app_addr = 747,
 508	.uart_2_mcu_addr = 817,
 509	.shp_2_mcu_addr = 891,
 510	.mcu_2_shp_addr = 960,
 511	.uartsh_2_mcu_addr = 1032,
 512	.spdif_2_mcu_addr = 1100,
 513	.mcu_2_spdif_addr = 1134,
 514	.firi_2_mcu_addr = 1193,
 515	.mcu_2_firi_addr = 1290,
 516};
 517
 518static struct sdma_driver_data sdma_imx53 = {
 519	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 520	.num_events = 48,
 521	.script_addrs = &sdma_script_imx53,
 522};
 523
 524static struct sdma_script_start_addrs sdma_script_imx6q = {
 525	.ap_2_ap_addr = 642,
 526	.uart_2_mcu_addr = 817,
 527	.mcu_2_app_addr = 747,
 528	.per_2_per_addr = 6331,
 529	.uartsh_2_mcu_addr = 1032,
 530	.mcu_2_shp_addr = 960,
 531	.app_2_mcu_addr = 683,
 532	.shp_2_mcu_addr = 891,
 533	.spdif_2_mcu_addr = 1100,
 534	.mcu_2_spdif_addr = 1134,
 535};
 536
 537static struct sdma_driver_data sdma_imx6q = {
 538	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 539	.num_events = 48,
 540	.script_addrs = &sdma_script_imx6q,
 541};
 542
 543static struct sdma_script_start_addrs sdma_script_imx7d = {
 544	.ap_2_ap_addr = 644,
 545	.uart_2_mcu_addr = 819,
 546	.mcu_2_app_addr = 749,
 547	.uartsh_2_mcu_addr = 1034,
 548	.mcu_2_shp_addr = 962,
 549	.app_2_mcu_addr = 685,
 550	.shp_2_mcu_addr = 893,
 551	.spdif_2_mcu_addr = 1102,
 552	.mcu_2_spdif_addr = 1136,
 553};
 554
 555static struct sdma_driver_data sdma_imx7d = {
 556	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 557	.num_events = 48,
 558	.script_addrs = &sdma_script_imx7d,
 559};
 560
 561static struct sdma_driver_data sdma_imx8mq = {
 562	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 563	.num_events = 48,
 564	.script_addrs = &sdma_script_imx7d,
 565	.check_ratio = 1,
 566};
 567
 568static const struct platform_device_id sdma_devtypes[] = {
 569	{
 570		.name = "imx25-sdma",
 571		.driver_data = (unsigned long)&sdma_imx25,
 572	}, {
 573		.name = "imx31-sdma",
 574		.driver_data = (unsigned long)&sdma_imx31,
 575	}, {
 576		.name = "imx35-sdma",
 577		.driver_data = (unsigned long)&sdma_imx35,
 578	}, {
 579		.name = "imx51-sdma",
 580		.driver_data = (unsigned long)&sdma_imx51,
 581	}, {
 582		.name = "imx53-sdma",
 583		.driver_data = (unsigned long)&sdma_imx53,
 584	}, {
 585		.name = "imx6q-sdma",
 586		.driver_data = (unsigned long)&sdma_imx6q,
 587	}, {
 588		.name = "imx7d-sdma",
 589		.driver_data = (unsigned long)&sdma_imx7d,
 590	}, {
 591		.name = "imx8mq-sdma",
 592		.driver_data = (unsigned long)&sdma_imx8mq,
 593	}, {
 594		/* sentinel */
 595	}
 596};
 597MODULE_DEVICE_TABLE(platform, sdma_devtypes);
 598
 599static const struct of_device_id sdma_dt_ids[] = {
 600	{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
 601	{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
 602	{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
 603	{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
 604	{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
 605	{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
 606	{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
 607	{ .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
 608	{ /* sentinel */ }
 609};
 610MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 611
 612#define SDMA_H_CONFIG_DSPDMA	BIT(12) /* indicates if the DSPDMA is used */
 613#define SDMA_H_CONFIG_RTD_PINS	BIT(11) /* indicates if Real-Time Debug pins are enabled */
 614#define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
 615#define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
 616
 617static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 618{
 619	u32 chnenbl0 = sdma->drvdata->chnenbl0;
 620	return chnenbl0 + event * 4;
 621}
 622
 623static int sdma_config_ownership(struct sdma_channel *sdmac,
 624		bool event_override, bool mcu_override, bool dsp_override)
 625{
 626	struct sdma_engine *sdma = sdmac->sdma;
 627	int channel = sdmac->channel;
 628	unsigned long evt, mcu, dsp;
 629
 630	if (event_override && mcu_override && dsp_override)
 631		return -EINVAL;
 632
 633	evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
 634	mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
 635	dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
 636
 637	if (dsp_override)
 638		__clear_bit(channel, &dsp);
 639	else
 640		__set_bit(channel, &dsp);
 641
 642	if (event_override)
 643		__clear_bit(channel, &evt);
 644	else
 645		__set_bit(channel, &evt);
 646
 647	if (mcu_override)
 648		__clear_bit(channel, &mcu);
 649	else
 650		__set_bit(channel, &mcu);
 651
 652	writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
 653	writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
 654	writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
 655
 656	return 0;
 657}
 658
 659static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 660{
 661	writel(BIT(channel), sdma->regs + SDMA_H_START);
 662}
 663
 664/*
 665 * sdma_run_channel0 - run a channel and wait till it's done
 666 */
 667static int sdma_run_channel0(struct sdma_engine *sdma)
 668{
 669	int ret;
 670	u32 reg;
 671
 672	sdma_enable_channel(sdma, 0);
 673
 674	ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
 675						reg, !(reg & 1), 1, 500);
 676	if (ret)
 677		dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
 678
 679	/* Set bits of CONFIG register with dynamic context switching */
 680	reg = readl(sdma->regs + SDMA_H_CONFIG);
 681	if ((reg & SDMA_H_CONFIG_CSM) == 0) {
 682		reg |= SDMA_H_CONFIG_CSM;
 683		writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
 684	}
 685
 686	return ret;
 687}
 688
 689static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 690		u32 address)
 691{
 692	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
 693	void *buf_virt;
 694	dma_addr_t buf_phys;
 695	int ret;
 696	unsigned long flags;
 697
 698	buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
 
 
 699	if (!buf_virt) {
 700		return -ENOMEM;
 701	}
 702
 703	spin_lock_irqsave(&sdma->channel_0_lock, flags);
 704
 705	bd0->mode.command = C0_SETPM;
 706	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
 707	bd0->mode.count = size / 2;
 708	bd0->buffer_addr = buf_phys;
 709	bd0->ext_buffer_addr = address;
 710
 711	memcpy(buf_virt, buf, size);
 712
 713	ret = sdma_run_channel0(sdma);
 714
 715	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 716
 717	dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
 718
 719	return ret;
 720}
 721
 722static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 723{
 724	struct sdma_engine *sdma = sdmac->sdma;
 725	int channel = sdmac->channel;
 726	unsigned long val;
 727	u32 chnenbl = chnenbl_ofs(sdma, event);
 728
 729	val = readl_relaxed(sdma->regs + chnenbl);
 730	__set_bit(channel, &val);
 731	writel_relaxed(val, sdma->regs + chnenbl);
 732}
 733
 734static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 735{
 736	struct sdma_engine *sdma = sdmac->sdma;
 737	int channel = sdmac->channel;
 738	u32 chnenbl = chnenbl_ofs(sdma, event);
 739	unsigned long val;
 740
 741	val = readl_relaxed(sdma->regs + chnenbl);
 742	__clear_bit(channel, &val);
 743	writel_relaxed(val, sdma->regs + chnenbl);
 744}
 745
 746static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
 747{
 748	return container_of(t, struct sdma_desc, vd.tx);
 749}
 750
 751static void sdma_start_desc(struct sdma_channel *sdmac)
 752{
 753	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
 754	struct sdma_desc *desc;
 755	struct sdma_engine *sdma = sdmac->sdma;
 756	int channel = sdmac->channel;
 757
 758	if (!vd) {
 759		sdmac->desc = NULL;
 760		return;
 761	}
 762	sdmac->desc = desc = to_sdma_desc(&vd->tx);
 763	/*
 764	 * Do not delete the node in desc_issued list in cyclic mode, otherwise
 765	 * the desc allocated will never be freed in vchan_dma_desc_free_list
 766	 */
 767	if (!(sdmac->flags & IMX_DMA_SG_LOOP))
 768		list_del(&vd->node);
 769
 770	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
 771	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
 772	sdma_enable_channel(sdma, sdmac->channel);
 773}
 774
 775static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 776{
 777	struct sdma_buffer_descriptor *bd;
 778	int error = 0;
 779	enum dma_status	old_status = sdmac->status;
 780
 781	/*
 782	 * loop mode. Iterate over descriptors, re-setup them and
 783	 * call callback function.
 784	 */
 785	while (sdmac->desc) {
 786		struct sdma_desc *desc = sdmac->desc;
 787
 788		bd = &desc->bd[desc->buf_tail];
 789
 790		if (bd->mode.status & BD_DONE)
 791			break;
 792
 793		if (bd->mode.status & BD_RROR) {
 794			bd->mode.status &= ~BD_RROR;
 795			sdmac->status = DMA_ERROR;
 796			error = -EIO;
 797		}
 798
 799	       /*
 800		* We use bd->mode.count to calculate the residue, since contains
 801		* the number of bytes present in the current buffer descriptor.
 802		*/
 803
 804		desc->chn_real_count = bd->mode.count;
 805		bd->mode.status |= BD_DONE;
 806		bd->mode.count = desc->period_len;
 807		desc->buf_ptail = desc->buf_tail;
 808		desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
 809
 810		/*
 811		 * The callback is called from the interrupt context in order
 812		 * to reduce latency and to avoid the risk of altering the
 813		 * SDMA transaction status by the time the client tasklet is
 814		 * executed.
 815		 */
 816		spin_unlock(&sdmac->vc.lock);
 817		dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
 818		spin_lock(&sdmac->vc.lock);
 819
 820		if (error)
 821			sdmac->status = old_status;
 822	}
 823}
 824
 825static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
 826{
 827	struct sdma_channel *sdmac = (struct sdma_channel *) data;
 828	struct sdma_buffer_descriptor *bd;
 829	int i, error = 0;
 830
 831	sdmac->desc->chn_real_count = 0;
 832	/*
 833	 * non loop mode. Iterate over all descriptors, collect
 834	 * errors and call callback function
 835	 */
 836	for (i = 0; i < sdmac->desc->num_bd; i++) {
 837		bd = &sdmac->desc->bd[i];
 838
 839		 if (bd->mode.status & (BD_DONE | BD_RROR))
 840			error = -EIO;
 841		 sdmac->desc->chn_real_count += bd->mode.count;
 842	}
 843
 844	if (error)
 845		sdmac->status = DMA_ERROR;
 846	else
 847		sdmac->status = DMA_COMPLETE;
 
 
 
 
 848}
 849
 850static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 851{
 852	struct sdma_engine *sdma = dev_id;
 853	unsigned long stat;
 854
 855	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
 856	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 857	/* channel 0 is special and not handled here, see run_channel0() */
 858	stat &= ~1;
 859
 860	while (stat) {
 861		int channel = fls(stat) - 1;
 862		struct sdma_channel *sdmac = &sdma->channel[channel];
 863		struct sdma_desc *desc;
 864
 865		spin_lock(&sdmac->vc.lock);
 866		desc = sdmac->desc;
 867		if (desc) {
 868			if (sdmac->flags & IMX_DMA_SG_LOOP) {
 869				sdma_update_channel_loop(sdmac);
 870			} else {
 871				mxc_sdma_handle_channel_normal(sdmac);
 872				vchan_cookie_complete(&desc->vd);
 873				sdma_start_desc(sdmac);
 874			}
 875		}
 876
 877		spin_unlock(&sdmac->vc.lock);
 878		__clear_bit(channel, &stat);
 879	}
 880
 881	return IRQ_HANDLED;
 882}
 883
 884/*
 885 * sets the pc of SDMA script according to the peripheral type
 886 */
 887static void sdma_get_pc(struct sdma_channel *sdmac,
 888		enum sdma_peripheral_type peripheral_type)
 889{
 890	struct sdma_engine *sdma = sdmac->sdma;
 891	int per_2_emi = 0, emi_2_per = 0;
 892	/*
 893	 * These are needed once we start to support transfers between
 894	 * two peripherals or memory-to-memory transfers
 895	 */
 896	int per_2_per = 0, emi_2_emi = 0;
 897
 898	sdmac->pc_from_device = 0;
 899	sdmac->pc_to_device = 0;
 900	sdmac->device_to_device = 0;
 901	sdmac->pc_to_pc = 0;
 902
 903	switch (peripheral_type) {
 904	case IMX_DMATYPE_MEMORY:
 905		emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
 906		break;
 907	case IMX_DMATYPE_DSP:
 908		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
 909		per_2_emi = sdma->script_addrs->ap_2_bp_addr;
 910		break;
 911	case IMX_DMATYPE_FIRI:
 912		per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
 913		emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
 914		break;
 915	case IMX_DMATYPE_UART:
 916		per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
 917		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 918		break;
 919	case IMX_DMATYPE_UART_SP:
 920		per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
 921		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 922		break;
 923	case IMX_DMATYPE_ATA:
 924		per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
 925		emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
 926		break;
 927	case IMX_DMATYPE_CSPI:
 928	case IMX_DMATYPE_EXT:
 929	case IMX_DMATYPE_SSI:
 930	case IMX_DMATYPE_SAI:
 931		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
 932		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 933		break;
 934	case IMX_DMATYPE_SSI_DUAL:
 935		per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
 936		emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
 937		break;
 938	case IMX_DMATYPE_SSI_SP:
 939	case IMX_DMATYPE_MMC:
 940	case IMX_DMATYPE_SDHC:
 941	case IMX_DMATYPE_CSPI_SP:
 942	case IMX_DMATYPE_ESAI:
 943	case IMX_DMATYPE_MSHC_SP:
 944		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
 945		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 946		break;
 947	case IMX_DMATYPE_ASRC:
 948		per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
 949		emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
 950		per_2_per = sdma->script_addrs->per_2_per_addr;
 951		break;
 952	case IMX_DMATYPE_ASRC_SP:
 953		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
 954		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 955		per_2_per = sdma->script_addrs->per_2_per_addr;
 956		break;
 957	case IMX_DMATYPE_MSHC:
 958		per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
 959		emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
 960		break;
 961	case IMX_DMATYPE_CCM:
 962		per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
 963		break;
 964	case IMX_DMATYPE_SPDIF:
 965		per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
 966		emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
 967		break;
 968	case IMX_DMATYPE_IPU_MEMORY:
 969		emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
 970		break;
 971	default:
 972		break;
 973	}
 974
 975	sdmac->pc_from_device = per_2_emi;
 976	sdmac->pc_to_device = emi_2_per;
 977	sdmac->device_to_device = per_2_per;
 978	sdmac->pc_to_pc = emi_2_emi;
 979}
 980
 981static int sdma_load_context(struct sdma_channel *sdmac)
 982{
 983	struct sdma_engine *sdma = sdmac->sdma;
 984	int channel = sdmac->channel;
 985	int load_address;
 986	struct sdma_context_data *context = sdma->context;
 987	struct sdma_buffer_descriptor *bd0 = sdma->bd0;
 988	int ret;
 989	unsigned long flags;
 990
 991	if (sdmac->context_loaded)
 992		return 0;
 993
 994	if (sdmac->direction == DMA_DEV_TO_MEM)
 995		load_address = sdmac->pc_from_device;
 996	else if (sdmac->direction == DMA_DEV_TO_DEV)
 997		load_address = sdmac->device_to_device;
 998	else if (sdmac->direction == DMA_MEM_TO_MEM)
 999		load_address = sdmac->pc_to_pc;
1000	else
1001		load_address = sdmac->pc_to_device;
1002
1003	if (load_address < 0)
1004		return load_address;
1005
1006	dev_dbg(sdma->dev, "load_address = %d\n", load_address);
1007	dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1008	dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
1009	dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
1010	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
1011	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1012
1013	spin_lock_irqsave(&sdma->channel_0_lock, flags);
1014
1015	memset(context, 0, sizeof(*context));
1016	context->channel_state.pc = load_address;
1017
1018	/* Send by context the event mask,base address for peripheral
1019	 * and watermark level
1020	 */
1021	context->gReg[0] = sdmac->event_mask[1];
1022	context->gReg[1] = sdmac->event_mask[0];
1023	context->gReg[2] = sdmac->per_addr;
1024	context->gReg[6] = sdmac->shp_addr;
1025	context->gReg[7] = sdmac->watermark_level;
1026
1027	bd0->mode.command = C0_SETDM;
1028	bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
1029	bd0->mode.count = sizeof(*context) / 4;
1030	bd0->buffer_addr = sdma->context_phys;
1031	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
1032	ret = sdma_run_channel0(sdma);
1033
1034	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1035
1036	sdmac->context_loaded = true;
1037
1038	return ret;
1039}
1040
1041static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
1042{
1043	return container_of(chan, struct sdma_channel, vc.chan);
1044}
1045
1046static int sdma_disable_channel(struct dma_chan *chan)
1047{
1048	struct sdma_channel *sdmac = to_sdma_chan(chan);
1049	struct sdma_engine *sdma = sdmac->sdma;
1050	int channel = sdmac->channel;
1051
1052	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
1053	sdmac->status = DMA_ERROR;
1054
1055	return 0;
1056}
1057static void sdma_channel_terminate_work(struct work_struct *work)
1058{
1059	struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1060						  terminate_worker);
1061	unsigned long flags;
1062	LIST_HEAD(head);
1063
1064	/*
1065	 * According to NXP R&D team a delay of one BD SDMA cost time
1066	 * (maximum is 1ms) should be added after disable of the channel
1067	 * bit, to ensure SDMA core has really been stopped after SDMA
1068	 * clients call .device_terminate_all.
1069	 */
1070	usleep_range(1000, 2000);
1071
1072	spin_lock_irqsave(&sdmac->vc.lock, flags);
1073	vchan_get_all_descriptors(&sdmac->vc, &head);
1074	sdmac->desc = NULL;
1075	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1076	vchan_dma_desc_free_list(&sdmac->vc, &head);
1077	sdmac->context_loaded = false;
1078}
1079
1080static int sdma_disable_channel_async(struct dma_chan *chan)
1081{
1082	struct sdma_channel *sdmac = to_sdma_chan(chan);
1083
1084	sdma_disable_channel(chan);
1085
1086	if (sdmac->desc)
1087		schedule_work(&sdmac->terminate_worker);
1088
1089	return 0;
1090}
1091
1092static void sdma_channel_synchronize(struct dma_chan *chan)
1093{
1094	struct sdma_channel *sdmac = to_sdma_chan(chan);
1095
1096	vchan_synchronize(&sdmac->vc);
1097
1098	flush_work(&sdmac->terminate_worker);
1099}
1100
1101static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1102{
1103	struct sdma_engine *sdma = sdmac->sdma;
1104
1105	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1106	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1107
1108	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1109	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1110
1111	if (sdmac->event_id0 > 31)
1112		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1113
1114	if (sdmac->event_id1 > 31)
1115		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1116
1117	/*
1118	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
1119	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
1120	 * r0(event_mask[1]) and r1(event_mask[0]).
1121	 */
1122	if (lwml > hwml) {
1123		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1124						SDMA_WATERMARK_LEVEL_HWML);
1125		sdmac->watermark_level |= hwml;
1126		sdmac->watermark_level |= lwml << 16;
1127		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1128	}
1129
1130	if (sdmac->per_address2 >= sdma->spba_start_addr &&
1131			sdmac->per_address2 <= sdma->spba_end_addr)
1132		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1133
1134	if (sdmac->per_address >= sdma->spba_start_addr &&
1135			sdmac->per_address <= sdma->spba_end_addr)
1136		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1137
1138	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1139}
1140
1141static int sdma_config_channel(struct dma_chan *chan)
1142{
1143	struct sdma_channel *sdmac = to_sdma_chan(chan);
1144	int ret;
1145
1146	sdma_disable_channel(chan);
1147
1148	sdmac->event_mask[0] = 0;
1149	sdmac->event_mask[1] = 0;
1150	sdmac->shp_addr = 0;
1151	sdmac->per_addr = 0;
1152
 
 
 
 
 
 
 
 
 
 
 
 
1153	switch (sdmac->peripheral_type) {
1154	case IMX_DMATYPE_DSP:
1155		sdma_config_ownership(sdmac, false, true, true);
1156		break;
1157	case IMX_DMATYPE_MEMORY:
1158		sdma_config_ownership(sdmac, false, true, false);
1159		break;
1160	default:
1161		sdma_config_ownership(sdmac, true, true, false);
1162		break;
1163	}
1164
1165	sdma_get_pc(sdmac, sdmac->peripheral_type);
1166
1167	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1168			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1169		/* Handle multiple event channels differently */
1170		if (sdmac->event_id1) {
1171			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1172			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1173				sdma_set_watermarklevel_for_p2p(sdmac);
1174		} else
1175			__set_bit(sdmac->event_id0, sdmac->event_mask);
1176
1177		/* Address */
1178		sdmac->shp_addr = sdmac->per_address;
1179		sdmac->per_addr = sdmac->per_address2;
1180	} else {
1181		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1182	}
1183
1184	ret = sdma_load_context(sdmac);
1185
1186	return ret;
1187}
1188
1189static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1190		unsigned int priority)
1191{
1192	struct sdma_engine *sdma = sdmac->sdma;
1193	int channel = sdmac->channel;
1194
1195	if (priority < MXC_SDMA_MIN_PRIORITY
1196	    || priority > MXC_SDMA_MAX_PRIORITY) {
1197		return -EINVAL;
1198	}
1199
1200	writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1201
1202	return 0;
1203}
1204
1205static int sdma_request_channel0(struct sdma_engine *sdma)
1206{
 
 
1207	int ret = -EBUSY;
1208
1209	sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
1210					GFP_NOWAIT);
1211	if (!sdma->bd0) {
1212		ret = -ENOMEM;
1213		goto out;
1214	}
1215
1216	sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1217	sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
1218
1219	sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1220	return 0;
1221out:
1222
1223	return ret;
1224}
1225
1226
1227static int sdma_alloc_bd(struct sdma_desc *desc)
1228{
1229	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1230	int ret = 0;
1231
1232	desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
1233				       &desc->bd_phys, GFP_NOWAIT);
1234	if (!desc->bd) {
1235		ret = -ENOMEM;
1236		goto out;
1237	}
1238out:
1239	return ret;
1240}
1241
1242static void sdma_free_bd(struct sdma_desc *desc)
1243{
1244	u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1245
1246	dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
1247			  desc->bd_phys);
1248}
1249
1250static void sdma_desc_free(struct virt_dma_desc *vd)
1251{
1252	struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1253
1254	sdma_free_bd(desc);
1255	kfree(desc);
1256}
1257
1258static int sdma_alloc_chan_resources(struct dma_chan *chan)
1259{
1260	struct sdma_channel *sdmac = to_sdma_chan(chan);
1261	struct imx_dma_data *data = chan->private;
1262	struct imx_dma_data mem_data;
1263	int prio, ret;
1264
1265	/*
1266	 * MEMCPY may never setup chan->private by filter function such as
1267	 * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
1268	 * Please note in any other slave case, you have to setup chan->private
1269	 * with 'struct imx_dma_data' in your own filter function if you want to
1270	 * request dma channel by dma_request_channel() rather than
1271	 * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
1272	 * to warn you to correct your filter function.
1273	 */
1274	if (!data) {
1275		dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1276		mem_data.priority = 2;
1277		mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1278		mem_data.dma_request = 0;
1279		mem_data.dma_request2 = 0;
1280		data = &mem_data;
1281
1282		sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1283	}
1284
1285	switch (data->priority) {
1286	case DMA_PRIO_HIGH:
1287		prio = 3;
1288		break;
1289	case DMA_PRIO_MEDIUM:
1290		prio = 2;
1291		break;
1292	case DMA_PRIO_LOW:
1293	default:
1294		prio = 1;
1295		break;
1296	}
1297
1298	sdmac->peripheral_type = data->peripheral_type;
1299	sdmac->event_id0 = data->dma_request;
1300	sdmac->event_id1 = data->dma_request2;
1301
1302	ret = clk_enable(sdmac->sdma->clk_ipg);
1303	if (ret)
1304		return ret;
1305	ret = clk_enable(sdmac->sdma->clk_ahb);
1306	if (ret)
1307		goto disable_clk_ipg;
1308
 
 
 
 
1309	ret = sdma_set_channel_priority(sdmac, prio);
1310	if (ret)
1311		goto disable_clk_ahb;
1312
 
 
 
 
 
1313	return 0;
1314
1315disable_clk_ahb:
1316	clk_disable(sdmac->sdma->clk_ahb);
1317disable_clk_ipg:
1318	clk_disable(sdmac->sdma->clk_ipg);
1319	return ret;
1320}
1321
1322static void sdma_free_chan_resources(struct dma_chan *chan)
1323{
1324	struct sdma_channel *sdmac = to_sdma_chan(chan);
1325	struct sdma_engine *sdma = sdmac->sdma;
1326
1327	sdma_disable_channel_async(chan);
1328
1329	sdma_channel_synchronize(chan);
1330
1331	if (sdmac->event_id0)
1332		sdma_event_disable(sdmac, sdmac->event_id0);
1333	if (sdmac->event_id1)
1334		sdma_event_disable(sdmac, sdmac->event_id1);
1335
1336	sdmac->event_id0 = 0;
1337	sdmac->event_id1 = 0;
1338
1339	sdma_set_channel_priority(sdmac, 0);
1340
 
 
1341	clk_disable(sdma->clk_ipg);
1342	clk_disable(sdma->clk_ahb);
1343}
1344
1345static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1346				enum dma_transfer_direction direction, u32 bds)
1347{
1348	struct sdma_desc *desc;
1349
1350	desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1351	if (!desc)
1352		goto err_out;
1353
1354	sdmac->status = DMA_IN_PROGRESS;
1355	sdmac->direction = direction;
1356	sdmac->flags = 0;
1357
1358	desc->chn_count = 0;
1359	desc->chn_real_count = 0;
1360	desc->buf_tail = 0;
1361	desc->buf_ptail = 0;
1362	desc->sdmac = sdmac;
1363	desc->num_bd = bds;
1364
1365	if (sdma_alloc_bd(desc))
1366		goto err_desc_out;
1367
1368	/* No slave_config called in MEMCPY case, so do here */
1369	if (direction == DMA_MEM_TO_MEM)
1370		sdma_config_ownership(sdmac, false, true, false);
1371
1372	if (sdma_load_context(sdmac))
1373		goto err_desc_out;
1374
1375	return desc;
1376
1377err_desc_out:
1378	kfree(desc);
1379err_out:
1380	return NULL;
1381}
1382
1383static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1384		struct dma_chan *chan, dma_addr_t dma_dst,
1385		dma_addr_t dma_src, size_t len, unsigned long flags)
1386{
1387	struct sdma_channel *sdmac = to_sdma_chan(chan);
1388	struct sdma_engine *sdma = sdmac->sdma;
1389	int channel = sdmac->channel;
1390	size_t count;
1391	int i = 0, param;
1392	struct sdma_buffer_descriptor *bd;
1393	struct sdma_desc *desc;
1394
1395	if (!chan || !len)
1396		return NULL;
1397
1398	dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1399		&dma_src, &dma_dst, len, channel);
1400
1401	desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1402					len / SDMA_BD_MAX_CNT + 1);
1403	if (!desc)
1404		return NULL;
1405
1406	do {
1407		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1408		bd = &desc->bd[i];
1409		bd->buffer_addr = dma_src;
1410		bd->ext_buffer_addr = dma_dst;
1411		bd->mode.count = count;
1412		desc->chn_count += count;
1413		bd->mode.command = 0;
1414
1415		dma_src += count;
1416		dma_dst += count;
1417		len -= count;
1418		i++;
1419
1420		param = BD_DONE | BD_EXTD | BD_CONT;
1421		/* last bd */
1422		if (!len) {
1423			param |= BD_INTR;
1424			param |= BD_LAST;
1425			param &= ~BD_CONT;
1426		}
1427
1428		dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1429				i, count, bd->buffer_addr,
1430				param & BD_WRAP ? "wrap" : "",
1431				param & BD_INTR ? " intr" : "");
1432
1433		bd->mode.status = param;
1434	} while (len);
1435
1436	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1437}
1438
1439static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1440		struct dma_chan *chan, struct scatterlist *sgl,
1441		unsigned int sg_len, enum dma_transfer_direction direction,
1442		unsigned long flags, void *context)
1443{
1444	struct sdma_channel *sdmac = to_sdma_chan(chan);
1445	struct sdma_engine *sdma = sdmac->sdma;
1446	int i, count;
1447	int channel = sdmac->channel;
1448	struct scatterlist *sg;
1449	struct sdma_desc *desc;
1450
1451	sdma_config_write(chan, &sdmac->slave_config, direction);
 
 
 
 
1452
1453	desc = sdma_transfer_init(sdmac, direction, sg_len);
1454	if (!desc)
1455		goto err_out;
1456
1457	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1458			sg_len, channel);
1459
 
 
 
 
 
 
 
 
 
 
 
 
 
1460	for_each_sg(sgl, sg, sg_len, i) {
1461		struct sdma_buffer_descriptor *bd = &desc->bd[i];
1462		int param;
1463
1464		bd->buffer_addr = sg->dma_address;
1465
1466		count = sg_dma_len(sg);
1467
1468		if (count > SDMA_BD_MAX_CNT) {
1469			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1470					channel, count, SDMA_BD_MAX_CNT);
1471			goto err_bd_out;
 
1472		}
1473
1474		bd->mode.count = count;
1475		desc->chn_count += count;
1476
1477		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1478			goto err_bd_out;
 
 
1479
1480		switch (sdmac->word_size) {
1481		case DMA_SLAVE_BUSWIDTH_4_BYTES:
1482			bd->mode.command = 0;
1483			if (count & 3 || sg->dma_address & 3)
1484				goto err_bd_out;
1485			break;
1486		case DMA_SLAVE_BUSWIDTH_2_BYTES:
1487			bd->mode.command = 2;
1488			if (count & 1 || sg->dma_address & 1)
1489				goto err_bd_out;
1490			break;
1491		case DMA_SLAVE_BUSWIDTH_1_BYTE:
1492			bd->mode.command = 1;
1493			break;
1494		default:
1495			goto err_bd_out;
1496		}
1497
1498		param = BD_DONE | BD_EXTD | BD_CONT;
1499
1500		if (i + 1 == sg_len) {
1501			param |= BD_INTR;
1502			param |= BD_LAST;
1503			param &= ~BD_CONT;
1504		}
1505
1506		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1507				i, count, (u64)sg->dma_address,
1508				param & BD_WRAP ? "wrap" : "",
1509				param & BD_INTR ? " intr" : "");
1510
1511		bd->mode.status = param;
1512	}
1513
1514	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1515err_bd_out:
1516	sdma_free_bd(desc);
1517	kfree(desc);
1518err_out:
1519	sdmac->status = DMA_ERROR;
1520	return NULL;
1521}
1522
1523static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1524		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1525		size_t period_len, enum dma_transfer_direction direction,
1526		unsigned long flags)
1527{
1528	struct sdma_channel *sdmac = to_sdma_chan(chan);
1529	struct sdma_engine *sdma = sdmac->sdma;
1530	int num_periods = buf_len / period_len;
1531	int channel = sdmac->channel;
1532	int i = 0, buf = 0;
1533	struct sdma_desc *desc;
1534
1535	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1536
1537	sdma_config_write(chan, &sdmac->slave_config, direction);
 
1538
1539	desc = sdma_transfer_init(sdmac, direction, num_periods);
1540	if (!desc)
1541		goto err_out;
1542
1543	desc->period_len = period_len;
 
 
 
1544
1545	sdmac->flags |= IMX_DMA_SG_LOOP;
 
 
 
 
1546
1547	if (period_len > SDMA_BD_MAX_CNT) {
1548		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1549				channel, period_len, SDMA_BD_MAX_CNT);
1550		goto err_bd_out;
 
 
 
 
 
 
1551	}
1552
1553	while (buf < buf_len) {
1554		struct sdma_buffer_descriptor *bd = &desc->bd[i];
1555		int param;
1556
1557		bd->buffer_addr = dma_addr;
1558
1559		bd->mode.count = period_len;
1560
1561		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1562			goto err_bd_out;
1563		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1564			bd->mode.command = 0;
1565		else
1566			bd->mode.command = sdmac->word_size;
1567
1568		param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1569		if (i + 1 == num_periods)
1570			param |= BD_WRAP;
1571
1572		dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
1573				i, period_len, (u64)dma_addr,
1574				param & BD_WRAP ? "wrap" : "",
1575				param & BD_INTR ? " intr" : "");
1576
1577		bd->mode.status = param;
1578
1579		dma_addr += period_len;
1580		buf += period_len;
1581
1582		i++;
1583	}
1584
1585	return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1586err_bd_out:
1587	sdma_free_bd(desc);
1588	kfree(desc);
1589err_out:
1590	sdmac->status = DMA_ERROR;
1591	return NULL;
1592}
1593
1594static int sdma_config_write(struct dma_chan *chan,
1595		       struct dma_slave_config *dmaengine_cfg,
1596		       enum dma_transfer_direction direction)
1597{
1598	struct sdma_channel *sdmac = to_sdma_chan(chan);
1599
1600	if (direction == DMA_DEV_TO_MEM) {
1601		sdmac->per_address = dmaengine_cfg->src_addr;
1602		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1603			dmaengine_cfg->src_addr_width;
1604		sdmac->word_size = dmaengine_cfg->src_addr_width;
1605	} else if (direction == DMA_DEV_TO_DEV) {
1606		sdmac->per_address2 = dmaengine_cfg->src_addr;
1607		sdmac->per_address = dmaengine_cfg->dst_addr;
1608		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1609			SDMA_WATERMARK_LEVEL_LWML;
1610		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1611			SDMA_WATERMARK_LEVEL_HWML;
1612		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1613	} else {
1614		sdmac->per_address = dmaengine_cfg->dst_addr;
1615		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1616			dmaengine_cfg->dst_addr_width;
1617		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1618	}
1619	sdmac->direction = direction;
1620	return sdma_config_channel(chan);
1621}
1622
1623static int sdma_config(struct dma_chan *chan,
1624		       struct dma_slave_config *dmaengine_cfg)
1625{
1626	struct sdma_channel *sdmac = to_sdma_chan(chan);
1627
1628	memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1629
1630	/* Set ENBLn earlier to make sure dma request triggered after that */
1631	if (sdmac->event_id0) {
1632		if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1633			return -EINVAL;
1634		sdma_event_enable(sdmac, sdmac->event_id0);
1635	}
1636
1637	if (sdmac->event_id1) {
1638		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1639			return -EINVAL;
1640		sdma_event_enable(sdmac, sdmac->event_id1);
1641	}
1642
1643	return 0;
1644}
1645
1646static enum dma_status sdma_tx_status(struct dma_chan *chan,
1647				      dma_cookie_t cookie,
1648				      struct dma_tx_state *txstate)
1649{
1650	struct sdma_channel *sdmac = to_sdma_chan(chan);
1651	struct sdma_desc *desc;
1652	u32 residue;
1653	struct virt_dma_desc *vd;
1654	enum dma_status ret;
1655	unsigned long flags;
1656
1657	ret = dma_cookie_status(chan, cookie, txstate);
1658	if (ret == DMA_COMPLETE || !txstate)
1659		return ret;
1660
1661	spin_lock_irqsave(&sdmac->vc.lock, flags);
1662	vd = vchan_find_desc(&sdmac->vc, cookie);
1663	if (vd) {
1664		desc = to_sdma_desc(&vd->tx);
1665		if (sdmac->flags & IMX_DMA_SG_LOOP)
1666			residue = (desc->num_bd - desc->buf_ptail) *
1667				desc->period_len - desc->chn_real_count;
1668		else
1669			residue = desc->chn_count - desc->chn_real_count;
1670	} else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
1671		residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
1672	} else {
1673		residue = 0;
1674	}
1675	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1676
1677	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1678			 residue);
1679
1680	return sdmac->status;
1681}
1682
1683static void sdma_issue_pending(struct dma_chan *chan)
1684{
1685	struct sdma_channel *sdmac = to_sdma_chan(chan);
1686	unsigned long flags;
1687
1688	spin_lock_irqsave(&sdmac->vc.lock, flags);
1689	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1690		sdma_start_desc(sdmac);
1691	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1692}
1693
1694#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
1695#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2	38
1696#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3	41
1697#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4	42
1698
1699static void sdma_add_scripts(struct sdma_engine *sdma,
1700		const struct sdma_script_start_addrs *addr)
1701{
1702	s32 *addr_arr = (u32 *)addr;
1703	s32 *saddr_arr = (u32 *)sdma->script_addrs;
1704	int i;
1705
1706	/* use the default firmware in ROM if missing external firmware */
1707	if (!sdma->script_number)
1708		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1709
1710	if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
1711				  / sizeof(s32)) {
1712		dev_err(sdma->dev,
1713			"SDMA script number %d not match with firmware.\n",
1714			sdma->script_number);
1715		return;
1716	}
1717
1718	for (i = 0; i < sdma->script_number; i++)
1719		if (addr_arr[i] > 0)
1720			saddr_arr[i] = addr_arr[i];
1721}
1722
1723static void sdma_load_firmware(const struct firmware *fw, void *context)
1724{
1725	struct sdma_engine *sdma = context;
1726	const struct sdma_firmware_header *header;
1727	const struct sdma_script_start_addrs *addr;
1728	unsigned short *ram_code;
1729
1730	if (!fw) {
1731		dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1732		/* In this case we just use the ROM firmware. */
1733		return;
1734	}
1735
1736	if (fw->size < sizeof(*header))
1737		goto err_firmware;
1738
1739	header = (struct sdma_firmware_header *)fw->data;
1740
1741	if (header->magic != SDMA_FIRMWARE_MAGIC)
1742		goto err_firmware;
1743	if (header->ram_code_start + header->ram_code_size > fw->size)
1744		goto err_firmware;
1745	switch (header->version_major) {
1746	case 1:
1747		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1748		break;
1749	case 2:
1750		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1751		break;
1752	case 3:
1753		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1754		break;
1755	case 4:
1756		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1757		break;
1758	default:
1759		dev_err(sdma->dev, "unknown firmware version\n");
1760		goto err_firmware;
1761	}
1762
1763	addr = (void *)header + header->script_addrs_start;
1764	ram_code = (void *)header + header->ram_code_start;
1765
1766	clk_enable(sdma->clk_ipg);
1767	clk_enable(sdma->clk_ahb);
1768	/* download the RAM image for SDMA */
1769	sdma_load_script(sdma, ram_code,
1770			header->ram_code_size,
1771			addr->ram_code_start_addr);
1772	clk_disable(sdma->clk_ipg);
1773	clk_disable(sdma->clk_ahb);
1774
1775	sdma_add_scripts(sdma, addr);
1776
1777	dev_info(sdma->dev, "loaded firmware %d.%d\n",
1778			header->version_major,
1779			header->version_minor);
1780
1781err_firmware:
1782	release_firmware(fw);
1783}
1784
1785#define EVENT_REMAP_CELLS 3
1786
1787static int sdma_event_remap(struct sdma_engine *sdma)
1788{
1789	struct device_node *np = sdma->dev->of_node;
1790	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1791	struct property *event_remap;
1792	struct regmap *gpr;
1793	char propname[] = "fsl,sdma-event-remap";
1794	u32 reg, val, shift, num_map, i;
1795	int ret = 0;
1796
1797	if (IS_ERR(np) || IS_ERR(gpr_np))
1798		goto out;
1799
1800	event_remap = of_find_property(np, propname, NULL);
1801	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1802	if (!num_map) {
1803		dev_dbg(sdma->dev, "no event needs to be remapped\n");
1804		goto out;
1805	} else if (num_map % EVENT_REMAP_CELLS) {
1806		dev_err(sdma->dev, "the property %s must modulo %d\n",
1807				propname, EVENT_REMAP_CELLS);
1808		ret = -EINVAL;
1809		goto out;
1810	}
1811
1812	gpr = syscon_node_to_regmap(gpr_np);
1813	if (IS_ERR(gpr)) {
1814		dev_err(sdma->dev, "failed to get gpr regmap\n");
1815		ret = PTR_ERR(gpr);
1816		goto out;
1817	}
1818
1819	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1820		ret = of_property_read_u32_index(np, propname, i, &reg);
1821		if (ret) {
1822			dev_err(sdma->dev, "failed to read property %s index %d\n",
1823					propname, i);
1824			goto out;
1825		}
1826
1827		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1828		if (ret) {
1829			dev_err(sdma->dev, "failed to read property %s index %d\n",
1830					propname, i + 1);
1831			goto out;
1832		}
1833
1834		ret = of_property_read_u32_index(np, propname, i + 2, &val);
1835		if (ret) {
1836			dev_err(sdma->dev, "failed to read property %s index %d\n",
1837					propname, i + 2);
1838			goto out;
1839		}
1840
1841		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1842	}
1843
1844out:
1845	if (!IS_ERR(gpr_np))
1846		of_node_put(gpr_np);
1847
1848	return ret;
1849}
1850
1851static int sdma_get_firmware(struct sdma_engine *sdma,
1852		const char *fw_name)
1853{
1854	int ret;
1855
1856	ret = request_firmware_nowait(THIS_MODULE,
1857			FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1858			GFP_KERNEL, sdma, sdma_load_firmware);
1859
1860	return ret;
1861}
1862
1863static int sdma_init(struct sdma_engine *sdma)
1864{
1865	int i, ret;
1866	dma_addr_t ccb_phys;
1867
1868	ret = clk_enable(sdma->clk_ipg);
1869	if (ret)
1870		return ret;
1871	ret = clk_enable(sdma->clk_ahb);
1872	if (ret)
1873		goto disable_clk_ipg;
1874
1875	if (sdma->drvdata->check_ratio &&
1876	    (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
1877		sdma->clk_ratio = 1;
1878
1879	/* Be sure SDMA has not started yet */
1880	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1881
1882	sdma->channel_control = dma_alloc_coherent(sdma->dev,
1883			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1884			sizeof(struct sdma_context_data),
1885			&ccb_phys, GFP_KERNEL);
1886
1887	if (!sdma->channel_control) {
1888		ret = -ENOMEM;
1889		goto err_dma_alloc;
1890	}
1891
1892	sdma->context = (void *)sdma->channel_control +
1893		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1894	sdma->context_phys = ccb_phys +
1895		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1896
 
 
 
 
1897	/* disable all channels */
1898	for (i = 0; i < sdma->drvdata->num_events; i++)
1899		writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1900
1901	/* All channels have priority 0 */
1902	for (i = 0; i < MAX_DMA_CHANNELS; i++)
1903		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1904
1905	ret = sdma_request_channel0(sdma);
1906	if (ret)
1907		goto err_dma_alloc;
1908
1909	sdma_config_ownership(&sdma->channel[0], false, true, false);
1910
1911	/* Set Command Channel (Channel Zero) */
1912	writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1913
1914	/* Set bits of CONFIG register but with static context switching */
1915	if (sdma->clk_ratio)
1916		writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
1917	else
1918		writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1919
1920	writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1921
1922	/* Initializes channel's priorities */
1923	sdma_set_channel_priority(&sdma->channel[0], 7);
1924
1925	clk_disable(sdma->clk_ipg);
1926	clk_disable(sdma->clk_ahb);
1927
1928	return 0;
1929
1930err_dma_alloc:
1931	clk_disable(sdma->clk_ahb);
1932disable_clk_ipg:
1933	clk_disable(sdma->clk_ipg);
1934	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1935	return ret;
1936}
1937
1938static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1939{
1940	struct sdma_channel *sdmac = to_sdma_chan(chan);
1941	struct imx_dma_data *data = fn_param;
1942
1943	if (!imx_dma_is_general_purpose(chan))
1944		return false;
1945
1946	sdmac->data = *data;
1947	chan->private = &sdmac->data;
1948
1949	return true;
1950}
1951
1952static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1953				   struct of_dma *ofdma)
1954{
1955	struct sdma_engine *sdma = ofdma->of_dma_data;
1956	dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1957	struct imx_dma_data data;
1958
1959	if (dma_spec->args_count != 3)
1960		return NULL;
1961
1962	data.dma_request = dma_spec->args[0];
1963	data.peripheral_type = dma_spec->args[1];
1964	data.priority = dma_spec->args[2];
1965	/*
1966	 * init dma_request2 to zero, which is not used by the dts.
1967	 * For P2P, dma_request2 is init from dma_request_channel(),
1968	 * chan->private will point to the imx_dma_data, and in
1969	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
1970	 * be set to sdmac->event_id1.
1971	 */
1972	data.dma_request2 = 0;
1973
1974	return __dma_request_channel(&mask, sdma_filter_fn, &data,
1975				     ofdma->of_node);
1976}
1977
1978static int sdma_probe(struct platform_device *pdev)
1979{
1980	const struct of_device_id *of_id =
1981			of_match_device(sdma_dt_ids, &pdev->dev);
1982	struct device_node *np = pdev->dev.of_node;
1983	struct device_node *spba_bus;
1984	const char *fw_name;
1985	int ret;
1986	int irq;
1987	struct resource *iores;
1988	struct resource spba_res;
1989	struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1990	int i;
1991	struct sdma_engine *sdma;
1992	s32 *saddr_arr;
1993	const struct sdma_driver_data *drvdata = NULL;
1994
1995	if (of_id)
1996		drvdata = of_id->data;
1997	else if (pdev->id_entry)
1998		drvdata = (void *)pdev->id_entry->driver_data;
1999
2000	if (!drvdata) {
2001		dev_err(&pdev->dev, "unable to find driver data\n");
2002		return -EINVAL;
2003	}
2004
2005	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2006	if (ret)
2007		return ret;
2008
2009	sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
2010	if (!sdma)
2011		return -ENOMEM;
2012
2013	spin_lock_init(&sdma->channel_0_lock);
2014
2015	sdma->dev = &pdev->dev;
2016	sdma->drvdata = drvdata;
2017
2018	irq = platform_get_irq(pdev, 0);
2019	if (irq < 0)
2020		return irq;
2021
2022	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2023	sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
2024	if (IS_ERR(sdma->regs))
2025		return PTR_ERR(sdma->regs);
2026
2027	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2028	if (IS_ERR(sdma->clk_ipg))
2029		return PTR_ERR(sdma->clk_ipg);
2030
2031	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2032	if (IS_ERR(sdma->clk_ahb))
2033		return PTR_ERR(sdma->clk_ahb);
2034
2035	ret = clk_prepare(sdma->clk_ipg);
2036	if (ret)
2037		return ret;
2038
2039	ret = clk_prepare(sdma->clk_ahb);
2040	if (ret)
2041		goto err_clk;
2042
2043	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
2044			       sdma);
2045	if (ret)
2046		goto err_irq;
2047
2048	sdma->irq = irq;
2049
2050	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
2051	if (!sdma->script_addrs) {
2052		ret = -ENOMEM;
2053		goto err_irq;
2054	}
2055
2056	/* initially no scripts available */
2057	saddr_arr = (s32 *)sdma->script_addrs;
2058	for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
2059		saddr_arr[i] = -EINVAL;
2060
2061	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
2062	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
2063	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
2064
2065	INIT_LIST_HEAD(&sdma->dma_device.channels);
2066	/* Initialize channel parameters */
2067	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2068		struct sdma_channel *sdmac = &sdma->channel[i];
2069
2070		sdmac->sdma = sdma;
 
2071
 
 
2072		sdmac->channel = i;
2073		sdmac->vc.desc_free = sdma_desc_free;
2074		INIT_WORK(&sdmac->terminate_worker,
2075				sdma_channel_terminate_work);
2076		/*
2077		 * Add the channel to the DMAC list. Do not add channel 0 though
2078		 * because we need it internally in the SDMA driver. This also means
2079		 * that channel 0 in dmaengine counting matches sdma channel 1.
2080		 */
2081		if (i)
2082			vchan_init(&sdmac->vc, &sdma->dma_device);
 
2083	}
2084
2085	ret = sdma_init(sdma);
2086	if (ret)
2087		goto err_init;
2088
2089	ret = sdma_event_remap(sdma);
2090	if (ret)
2091		goto err_init;
2092
2093	if (sdma->drvdata->script_addrs)
2094		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
2095	if (pdata && pdata->script_addrs)
2096		sdma_add_scripts(sdma, pdata->script_addrs);
2097
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2098	sdma->dma_device.dev = &pdev->dev;
2099
2100	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2101	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
2102	sdma->dma_device.device_tx_status = sdma_tx_status;
2103	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
2104	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
2105	sdma->dma_device.device_config = sdma_config;
2106	sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
2107	sdma->dma_device.device_synchronize = sdma_channel_synchronize;
2108	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
2109	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
2110	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
2111	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2112	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
2113	sdma->dma_device.device_issue_pending = sdma_issue_pending;
2114	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
2115	sdma->dma_device.copy_align = 2;
2116	dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
2117
2118	platform_set_drvdata(pdev, sdma);
2119
2120	ret = dma_async_device_register(&sdma->dma_device);
2121	if (ret) {
2122		dev_err(&pdev->dev, "unable to register\n");
2123		goto err_init;
2124	}
2125
2126	if (np) {
2127		ret = of_dma_controller_register(np, sdma_xlate, sdma);
2128		if (ret) {
2129			dev_err(&pdev->dev, "failed to register controller\n");
2130			goto err_register;
2131		}
2132
2133		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
2134		ret = of_address_to_resource(spba_bus, 0, &spba_res);
2135		if (!ret) {
2136			sdma->spba_start_addr = spba_res.start;
2137			sdma->spba_end_addr = spba_res.end;
2138		}
2139		of_node_put(spba_bus);
2140	}
2141
2142	/*
2143	 * Kick off firmware loading as the very last step:
2144	 * attempt to load firmware only if we're not on the error path, because
2145	 * the firmware callback requires a fully functional and allocated sdma
2146	 * instance.
2147	 */
2148	if (pdata) {
2149		ret = sdma_get_firmware(sdma, pdata->fw_name);
2150		if (ret)
2151			dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
2152	} else {
2153		/*
2154		 * Because that device tree does not encode ROM script address,
2155		 * the RAM script in firmware is mandatory for device tree
2156		 * probe, otherwise it fails.
2157		 */
2158		ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2159					      &fw_name);
2160		if (ret) {
2161			dev_warn(&pdev->dev, "failed to get firmware name\n");
2162		} else {
2163			ret = sdma_get_firmware(sdma, fw_name);
2164			if (ret)
2165				dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2166		}
2167	}
2168
2169	return 0;
2170
2171err_register:
2172	dma_async_device_unregister(&sdma->dma_device);
2173err_init:
2174	kfree(sdma->script_addrs);
2175err_irq:
2176	clk_unprepare(sdma->clk_ahb);
2177err_clk:
2178	clk_unprepare(sdma->clk_ipg);
2179	return ret;
2180}
2181
2182static int sdma_remove(struct platform_device *pdev)
2183{
2184	struct sdma_engine *sdma = platform_get_drvdata(pdev);
2185	int i;
2186
2187	devm_free_irq(&pdev->dev, sdma->irq, sdma);
2188	dma_async_device_unregister(&sdma->dma_device);
2189	kfree(sdma->script_addrs);
2190	clk_unprepare(sdma->clk_ahb);
2191	clk_unprepare(sdma->clk_ipg);
2192	/* Kill the tasklet */
2193	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2194		struct sdma_channel *sdmac = &sdma->channel[i];
2195
2196		tasklet_kill(&sdmac->vc.task);
2197		sdma_free_chan_resources(&sdmac->vc.chan);
2198	}
2199
2200	platform_set_drvdata(pdev, NULL);
2201	return 0;
2202}
2203
2204static struct platform_driver sdma_driver = {
2205	.driver		= {
2206		.name	= "imx-sdma",
2207		.of_match_table = sdma_dt_ids,
2208	},
2209	.id_table	= sdma_devtypes,
2210	.remove		= sdma_remove,
2211	.probe		= sdma_probe,
2212};
2213
2214module_platform_driver(sdma_driver);
2215
2216MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2217MODULE_DESCRIPTION("i.MX SDMA driver");
2218#if IS_ENABLED(CONFIG_SOC_IMX6Q)
2219MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2220#endif
2221#if IS_ENABLED(CONFIG_SOC_IMX7D)
2222MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2223#endif
2224MODULE_LICENSE("GPL");
v4.10.11
   1/*
   2 * drivers/dma/imx-sdma.c
   3 *
   4 * This file contains a driver for the Freescale Smart DMA engine
   5 *
   6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
   7 *
   8 * Based on code from Freescale:
   9 *
  10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
  11 *
  12 * The code contained herein is licensed under the GNU General Public
  13 * License. You may obtain a copy of the GNU General Public License
  14 * Version 2 or later at the following locations:
  15 *
  16 * http://www.opensource.org/licenses/gpl-license.html
  17 * http://www.gnu.org/copyleft/gpl.html
  18 */
  19
  20#include <linux/init.h>
  21#include <linux/iopoll.h>
  22#include <linux/module.h>
  23#include <linux/types.h>
  24#include <linux/bitops.h>
  25#include <linux/mm.h>
  26#include <linux/interrupt.h>
  27#include <linux/clk.h>
  28#include <linux/delay.h>
  29#include <linux/sched.h>
  30#include <linux/semaphore.h>
  31#include <linux/spinlock.h>
  32#include <linux/device.h>
  33#include <linux/dma-mapping.h>
  34#include <linux/firmware.h>
  35#include <linux/slab.h>
  36#include <linux/platform_device.h>
  37#include <linux/dmaengine.h>
  38#include <linux/of.h>
  39#include <linux/of_address.h>
  40#include <linux/of_device.h>
  41#include <linux/of_dma.h>
 
  42
  43#include <asm/irq.h>
  44#include <linux/platform_data/dma-imx-sdma.h>
  45#include <linux/platform_data/dma-imx.h>
  46#include <linux/regmap.h>
  47#include <linux/mfd/syscon.h>
  48#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  49
  50#include "dmaengine.h"
 
  51
  52/* SDMA registers */
  53#define SDMA_H_C0PTR		0x000
  54#define SDMA_H_INTR		0x004
  55#define SDMA_H_STATSTOP		0x008
  56#define SDMA_H_START		0x00c
  57#define SDMA_H_EVTOVR		0x010
  58#define SDMA_H_DSPOVR		0x014
  59#define SDMA_H_HOSTOVR		0x018
  60#define SDMA_H_EVTPEND		0x01c
  61#define SDMA_H_DSPENBL		0x020
  62#define SDMA_H_RESET		0x024
  63#define SDMA_H_EVTERR		0x028
  64#define SDMA_H_INTRMSK		0x02c
  65#define SDMA_H_PSW		0x030
  66#define SDMA_H_EVTERRDBG	0x034
  67#define SDMA_H_CONFIG		0x038
  68#define SDMA_ONCE_ENB		0x040
  69#define SDMA_ONCE_DATA		0x044
  70#define SDMA_ONCE_INSTR		0x048
  71#define SDMA_ONCE_STAT		0x04c
  72#define SDMA_ONCE_CMD		0x050
  73#define SDMA_EVT_MIRROR		0x054
  74#define SDMA_ILLINSTADDR	0x058
  75#define SDMA_CHN0ADDR		0x05c
  76#define SDMA_ONCE_RTB		0x060
  77#define SDMA_XTRIG_CONF1	0x070
  78#define SDMA_XTRIG_CONF2	0x074
  79#define SDMA_CHNENBL0_IMX35	0x200
  80#define SDMA_CHNENBL0_IMX31	0x080
  81#define SDMA_CHNPRI_0		0x100
  82
  83/*
  84 * Buffer descriptor status values.
  85 */
  86#define BD_DONE  0x01
  87#define BD_WRAP  0x02
  88#define BD_CONT  0x04
  89#define BD_INTR  0x08
  90#define BD_RROR  0x10
  91#define BD_LAST  0x20
  92#define BD_EXTD  0x80
  93
  94/*
  95 * Data Node descriptor status values.
  96 */
  97#define DND_END_OF_FRAME  0x80
  98#define DND_END_OF_XFER   0x40
  99#define DND_DONE          0x20
 100#define DND_UNUSED        0x01
 101
 102/*
 103 * IPCV2 descriptor status values.
 104 */
 105#define BD_IPCV2_END_OF_FRAME  0x40
 106
 107#define IPCV2_MAX_NODES        50
 108/*
 109 * Error bit set in the CCB status field by the SDMA,
 110 * in setbd routine, in case of a transfer error
 111 */
 112#define DATA_ERROR  0x10000000
 113
 114/*
 115 * Buffer descriptor commands.
 116 */
 117#define C0_ADDR             0x01
 118#define C0_LOAD             0x02
 119#define C0_DUMP             0x03
 120#define C0_SETCTX           0x07
 121#define C0_GETCTX           0x03
 122#define C0_SETDM            0x01
 123#define C0_SETPM            0x04
 124#define C0_GETDM            0x02
 125#define C0_GETPM            0x08
 126/*
 127 * Change endianness indicator in the BD command field
 128 */
 129#define CHANGE_ENDIANNESS   0x80
 130
 131/*
 132 *  p_2_p watermark_level description
 133 *	Bits		Name			Description
 134 *	0-7		Lower WML		Lower watermark level
 135 *	8		PS			1: Pad Swallowing
 136 *						0: No Pad Swallowing
 137 *	9		PA			1: Pad Adding
 138 *						0: No Pad Adding
 139 *	10		SPDIF			If this bit is set both source
 140 *						and destination are on SPBA
 141 *	11		Source Bit(SP)		1: Source on SPBA
 142 *						0: Source on AIPS
 143 *	12		Destination Bit(DP)	1: Destination on SPBA
 144 *						0: Destination on AIPS
 145 *	13-15		---------		MUST BE 0
 146 *	16-23		Higher WML		HWML
 147 *	24-27		N			Total number of samples after
 148 *						which Pad adding/Swallowing
 149 *						must be done. It must be odd.
 150 *	28		Lower WML Event(LWE)	SDMA events reg to check for
 151 *						LWML event mask
 152 *						0: LWE in EVENTS register
 153 *						1: LWE in EVENTS2 register
 154 *	29		Higher WML Event(HWE)	SDMA events reg to check for
 155 *						HWML event mask
 156 *						0: HWE in EVENTS register
 157 *						1: HWE in EVENTS2 register
 158 *	30		---------		MUST BE 0
 159 *	31		CONT			1: Amount of samples to be
 160 *						transferred is unknown and
 161 *						script will keep on
 162 *						transferring samples as long as
 163 *						both events are detected and
 164 *						script must be manually stopped
 165 *						by the application
 166 *						0: The amount of samples to be
 167 *						transferred is equal to the
 168 *						count field of mode word
 169 */
 170#define SDMA_WATERMARK_LEVEL_LWML	0xFF
 171#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
 172#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
 173#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
 174#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
 175#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
 176#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
 177#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
 178#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
 179#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
 180
 
 
 
 
 
 
 
 
 181/*
 182 * Mode/Count of data node descriptors - IPCv2
 183 */
 184struct sdma_mode_count {
 
 185	u32 count   : 16; /* size of the buffer pointed by this BD */
 186	u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
 187	u32 command :  8; /* command mostly used for channel 0 */
 188};
 189
 190/*
 191 * Buffer descriptor
 192 */
 193struct sdma_buffer_descriptor {
 194	struct sdma_mode_count  mode;
 195	u32 buffer_addr;	/* address of the buffer described */
 196	u32 ext_buffer_addr;	/* extended buffer address */
 197} __attribute__ ((packed));
 198
 199/**
 200 * struct sdma_channel_control - Channel control Block
 201 *
 202 * @current_bd_ptr	current buffer descriptor processed
 203 * @base_bd_ptr		first element of buffer descriptor array
 204 * @unused		padding. The SDMA engine expects an array of 128 byte
 205 *			control blocks
 206 */
 207struct sdma_channel_control {
 208	u32 current_bd_ptr;
 209	u32 base_bd_ptr;
 210	u32 unused[2];
 211} __attribute__ ((packed));
 212
 213/**
 214 * struct sdma_state_registers - SDMA context for a channel
 215 *
 216 * @pc:		program counter
 
 217 * @t:		test bit: status of arithmetic & test instruction
 218 * @rpc:	return program counter
 
 219 * @sf:		source fault while loading data
 220 * @spc:	loop start program counter
 
 221 * @df:		destination fault while storing data
 222 * @epc:	loop end program counter
 223 * @lm:		loop mode
 224 */
 225struct sdma_state_registers {
 226	u32 pc     :14;
 227	u32 unused1: 1;
 228	u32 t      : 1;
 229	u32 rpc    :14;
 230	u32 unused0: 1;
 231	u32 sf     : 1;
 232	u32 spc    :14;
 233	u32 unused2: 1;
 234	u32 df     : 1;
 235	u32 epc    :14;
 236	u32 lm     : 2;
 237} __attribute__ ((packed));
 238
 239/**
 240 * struct sdma_context_data - sdma context specific to a channel
 241 *
 242 * @channel_state:	channel state bits
 243 * @gReg:		general registers
 244 * @mda:		burst dma destination address register
 245 * @msa:		burst dma source address register
 246 * @ms:			burst dma status register
 247 * @md:			burst dma data register
 248 * @pda:		peripheral dma destination address register
 249 * @psa:		peripheral dma source address register
 250 * @ps:			peripheral dma status register
 251 * @pd:			peripheral dma data register
 252 * @ca:			CRC polynomial register
 253 * @cs:			CRC accumulator register
 254 * @dda:		dedicated core destination address register
 255 * @dsa:		dedicated core source address register
 256 * @ds:			dedicated core status register
 257 * @dd:			dedicated core data register
 
 
 
 
 
 
 
 
 258 */
 259struct sdma_context_data {
 260	struct sdma_state_registers  channel_state;
 261	u32  gReg[8];
 262	u32  mda;
 263	u32  msa;
 264	u32  ms;
 265	u32  md;
 266	u32  pda;
 267	u32  psa;
 268	u32  ps;
 269	u32  pd;
 270	u32  ca;
 271	u32  cs;
 272	u32  dda;
 273	u32  dsa;
 274	u32  ds;
 275	u32  dd;
 276	u32  scratch0;
 277	u32  scratch1;
 278	u32  scratch2;
 279	u32  scratch3;
 280	u32  scratch4;
 281	u32  scratch5;
 282	u32  scratch6;
 283	u32  scratch7;
 284} __attribute__ ((packed));
 285
 286#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
 287
 288struct sdma_engine;
 289
 290/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 291 * struct sdma_channel - housekeeping for a SDMA channel
 292 *
 293 * @sdma		pointer to the SDMA engine for this channel
 294 * @channel		the channel number, matches dmaengine chan_id + 1
 295 * @direction		transfer type. Needed for setting SDMA script
 296 * @peripheral_type	Peripheral type. Needed for setting SDMA script
 297 * @event_id0		aka dma request line
 298 * @event_id1		for channels that use 2 events
 299 * @word_size		peripheral access size
 300 * @buf_tail		ID of the buffer that was processed
 301 * @buf_ptail		ID of the previous buffer that was processed
 302 * @num_bd		max NUM_BD. number of descriptors currently handling
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 303 */
 304struct sdma_channel {
 
 
 305	struct sdma_engine		*sdma;
 306	unsigned int			channel;
 307	enum dma_transfer_direction		direction;
 
 308	enum sdma_peripheral_type	peripheral_type;
 309	unsigned int			event_id0;
 310	unsigned int			event_id1;
 311	enum dma_slave_buswidth		word_size;
 312	unsigned int			buf_tail;
 313	unsigned int			buf_ptail;
 314	unsigned int			num_bd;
 315	unsigned int			period_len;
 316	struct sdma_buffer_descriptor	*bd;
 317	dma_addr_t			bd_phys;
 318	unsigned int			pc_from_device, pc_to_device;
 319	unsigned int			device_to_device;
 
 320	unsigned long			flags;
 321	dma_addr_t			per_address, per_address2;
 322	unsigned long			event_mask[2];
 323	unsigned long			watermark_level;
 324	u32				shp_addr, per_addr;
 325	struct dma_chan			chan;
 326	spinlock_t			lock;
 327	struct dma_async_tx_descriptor	desc;
 328	enum dma_status			status;
 329	unsigned int			chn_count;
 330	unsigned int			chn_real_count;
 331	struct tasklet_struct		tasklet;
 332	struct imx_dma_data		data;
 
 333};
 334
 335#define IMX_DMA_SG_LOOP		BIT(0)
 336
 337#define MAX_DMA_CHANNELS 32
 338#define MXC_SDMA_DEFAULT_PRIORITY 1
 339#define MXC_SDMA_MIN_PRIORITY 1
 340#define MXC_SDMA_MAX_PRIORITY 7
 341
 342#define SDMA_FIRMWARE_MAGIC 0x414d4453
 343
 344/**
 345 * struct sdma_firmware_header - Layout of the firmware image
 346 *
 347 * @magic		"SDMA"
 348 * @version_major	increased whenever layout of struct sdma_script_start_addrs
 349 *			changes.
 350 * @version_minor	firmware minor version (for binary compatible changes)
 351 * @script_addrs_start	offset of struct sdma_script_start_addrs in this image
 352 * @num_script_addrs	Number of script addresses in this image
 353 * @ram_code_start	offset of SDMA ram image in this firmware image
 354 * @ram_code_size	size of SDMA ram image
 355 * @script_addrs	Stores the start address of the SDMA scripts
 356 *			(in SDMA memory space)
 357 */
 358struct sdma_firmware_header {
 359	u32	magic;
 360	u32	version_major;
 361	u32	version_minor;
 362	u32	script_addrs_start;
 363	u32	num_script_addrs;
 364	u32	ram_code_start;
 365	u32	ram_code_size;
 366};
 367
 368struct sdma_driver_data {
 369	int chnenbl0;
 370	int num_events;
 371	struct sdma_script_start_addrs	*script_addrs;
 
 372};
 373
 374struct sdma_engine {
 375	struct device			*dev;
 376	struct device_dma_parameters	dma_parms;
 377	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 378	struct sdma_channel_control	*channel_control;
 379	void __iomem			*regs;
 380	struct sdma_context_data	*context;
 381	dma_addr_t			context_phys;
 382	struct dma_device		dma_device;
 383	struct clk			*clk_ipg;
 384	struct clk			*clk_ahb;
 385	spinlock_t			channel_0_lock;
 386	u32				script_number;
 387	struct sdma_script_start_addrs	*script_addrs;
 388	const struct sdma_driver_data	*drvdata;
 389	u32				spba_start_addr;
 390	u32				spba_end_addr;
 391	unsigned int			irq;
 
 
 
 
 392};
 393
 
 
 
 
 394static struct sdma_driver_data sdma_imx31 = {
 395	.chnenbl0 = SDMA_CHNENBL0_IMX31,
 396	.num_events = 32,
 397};
 398
 399static struct sdma_script_start_addrs sdma_script_imx25 = {
 400	.ap_2_ap_addr = 729,
 401	.uart_2_mcu_addr = 904,
 402	.per_2_app_addr = 1255,
 403	.mcu_2_app_addr = 834,
 404	.uartsh_2_mcu_addr = 1120,
 405	.per_2_shp_addr = 1329,
 406	.mcu_2_shp_addr = 1048,
 407	.ata_2_mcu_addr = 1560,
 408	.mcu_2_ata_addr = 1479,
 409	.app_2_per_addr = 1189,
 410	.app_2_mcu_addr = 770,
 411	.shp_2_per_addr = 1407,
 412	.shp_2_mcu_addr = 979,
 413};
 414
 415static struct sdma_driver_data sdma_imx25 = {
 416	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 417	.num_events = 48,
 418	.script_addrs = &sdma_script_imx25,
 419};
 420
 421static struct sdma_driver_data sdma_imx35 = {
 422	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 423	.num_events = 48,
 424};
 425
 426static struct sdma_script_start_addrs sdma_script_imx51 = {
 427	.ap_2_ap_addr = 642,
 428	.uart_2_mcu_addr = 817,
 429	.mcu_2_app_addr = 747,
 430	.mcu_2_shp_addr = 961,
 431	.ata_2_mcu_addr = 1473,
 432	.mcu_2_ata_addr = 1392,
 433	.app_2_per_addr = 1033,
 434	.app_2_mcu_addr = 683,
 435	.shp_2_per_addr = 1251,
 436	.shp_2_mcu_addr = 892,
 437};
 438
 439static struct sdma_driver_data sdma_imx51 = {
 440	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 441	.num_events = 48,
 442	.script_addrs = &sdma_script_imx51,
 443};
 444
 445static struct sdma_script_start_addrs sdma_script_imx53 = {
 446	.ap_2_ap_addr = 642,
 447	.app_2_mcu_addr = 683,
 448	.mcu_2_app_addr = 747,
 449	.uart_2_mcu_addr = 817,
 450	.shp_2_mcu_addr = 891,
 451	.mcu_2_shp_addr = 960,
 452	.uartsh_2_mcu_addr = 1032,
 453	.spdif_2_mcu_addr = 1100,
 454	.mcu_2_spdif_addr = 1134,
 455	.firi_2_mcu_addr = 1193,
 456	.mcu_2_firi_addr = 1290,
 457};
 458
 459static struct sdma_driver_data sdma_imx53 = {
 460	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 461	.num_events = 48,
 462	.script_addrs = &sdma_script_imx53,
 463};
 464
 465static struct sdma_script_start_addrs sdma_script_imx6q = {
 466	.ap_2_ap_addr = 642,
 467	.uart_2_mcu_addr = 817,
 468	.mcu_2_app_addr = 747,
 469	.per_2_per_addr = 6331,
 470	.uartsh_2_mcu_addr = 1032,
 471	.mcu_2_shp_addr = 960,
 472	.app_2_mcu_addr = 683,
 473	.shp_2_mcu_addr = 891,
 474	.spdif_2_mcu_addr = 1100,
 475	.mcu_2_spdif_addr = 1134,
 476};
 477
 478static struct sdma_driver_data sdma_imx6q = {
 479	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 480	.num_events = 48,
 481	.script_addrs = &sdma_script_imx6q,
 482};
 483
 484static struct sdma_script_start_addrs sdma_script_imx7d = {
 485	.ap_2_ap_addr = 644,
 486	.uart_2_mcu_addr = 819,
 487	.mcu_2_app_addr = 749,
 488	.uartsh_2_mcu_addr = 1034,
 489	.mcu_2_shp_addr = 962,
 490	.app_2_mcu_addr = 685,
 491	.shp_2_mcu_addr = 893,
 492	.spdif_2_mcu_addr = 1102,
 493	.mcu_2_spdif_addr = 1136,
 494};
 495
 496static struct sdma_driver_data sdma_imx7d = {
 497	.chnenbl0 = SDMA_CHNENBL0_IMX35,
 498	.num_events = 48,
 499	.script_addrs = &sdma_script_imx7d,
 500};
 501
 
 
 
 
 
 
 
 502static const struct platform_device_id sdma_devtypes[] = {
 503	{
 504		.name = "imx25-sdma",
 505		.driver_data = (unsigned long)&sdma_imx25,
 506	}, {
 507		.name = "imx31-sdma",
 508		.driver_data = (unsigned long)&sdma_imx31,
 509	}, {
 510		.name = "imx35-sdma",
 511		.driver_data = (unsigned long)&sdma_imx35,
 512	}, {
 513		.name = "imx51-sdma",
 514		.driver_data = (unsigned long)&sdma_imx51,
 515	}, {
 516		.name = "imx53-sdma",
 517		.driver_data = (unsigned long)&sdma_imx53,
 518	}, {
 519		.name = "imx6q-sdma",
 520		.driver_data = (unsigned long)&sdma_imx6q,
 521	}, {
 522		.name = "imx7d-sdma",
 523		.driver_data = (unsigned long)&sdma_imx7d,
 524	}, {
 
 
 
 525		/* sentinel */
 526	}
 527};
 528MODULE_DEVICE_TABLE(platform, sdma_devtypes);
 529
 530static const struct of_device_id sdma_dt_ids[] = {
 531	{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
 532	{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
 533	{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
 534	{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
 535	{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
 536	{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
 537	{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
 
 538	{ /* sentinel */ }
 539};
 540MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 541
 542#define SDMA_H_CONFIG_DSPDMA	BIT(12) /* indicates if the DSPDMA is used */
 543#define SDMA_H_CONFIG_RTD_PINS	BIT(11) /* indicates if Real-Time Debug pins are enabled */
 544#define SDMA_H_CONFIG_ACR	BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
 545#define SDMA_H_CONFIG_CSM	(3)       /* indicates which context switch mode is selected*/
 546
 547static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
 548{
 549	u32 chnenbl0 = sdma->drvdata->chnenbl0;
 550	return chnenbl0 + event * 4;
 551}
 552
 553static int sdma_config_ownership(struct sdma_channel *sdmac,
 554		bool event_override, bool mcu_override, bool dsp_override)
 555{
 556	struct sdma_engine *sdma = sdmac->sdma;
 557	int channel = sdmac->channel;
 558	unsigned long evt, mcu, dsp;
 559
 560	if (event_override && mcu_override && dsp_override)
 561		return -EINVAL;
 562
 563	evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
 564	mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
 565	dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
 566
 567	if (dsp_override)
 568		__clear_bit(channel, &dsp);
 569	else
 570		__set_bit(channel, &dsp);
 571
 572	if (event_override)
 573		__clear_bit(channel, &evt);
 574	else
 575		__set_bit(channel, &evt);
 576
 577	if (mcu_override)
 578		__clear_bit(channel, &mcu);
 579	else
 580		__set_bit(channel, &mcu);
 581
 582	writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
 583	writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
 584	writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
 585
 586	return 0;
 587}
 588
 589static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 590{
 591	writel(BIT(channel), sdma->regs + SDMA_H_START);
 592}
 593
 594/*
 595 * sdma_run_channel0 - run a channel and wait till it's done
 596 */
 597static int sdma_run_channel0(struct sdma_engine *sdma)
 598{
 599	int ret;
 600	u32 reg;
 601
 602	sdma_enable_channel(sdma, 0);
 603
 604	ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
 605						reg, !(reg & 1), 1, 500);
 606	if (ret)
 607		dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
 608
 609	/* Set bits of CONFIG register with dynamic context switching */
 610	if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
 611		writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
 
 
 
 612
 613	return ret;
 614}
 615
 616static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 617		u32 address)
 618{
 619	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
 620	void *buf_virt;
 621	dma_addr_t buf_phys;
 622	int ret;
 623	unsigned long flags;
 624
 625	buf_virt = dma_alloc_coherent(NULL,
 626			size,
 627			&buf_phys, GFP_KERNEL);
 628	if (!buf_virt) {
 629		return -ENOMEM;
 630	}
 631
 632	spin_lock_irqsave(&sdma->channel_0_lock, flags);
 633
 634	bd0->mode.command = C0_SETPM;
 635	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
 636	bd0->mode.count = size / 2;
 637	bd0->buffer_addr = buf_phys;
 638	bd0->ext_buffer_addr = address;
 639
 640	memcpy(buf_virt, buf, size);
 641
 642	ret = sdma_run_channel0(sdma);
 643
 644	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 645
 646	dma_free_coherent(NULL, size, buf_virt, buf_phys);
 647
 648	return ret;
 649}
 650
 651static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 652{
 653	struct sdma_engine *sdma = sdmac->sdma;
 654	int channel = sdmac->channel;
 655	unsigned long val;
 656	u32 chnenbl = chnenbl_ofs(sdma, event);
 657
 658	val = readl_relaxed(sdma->regs + chnenbl);
 659	__set_bit(channel, &val);
 660	writel_relaxed(val, sdma->regs + chnenbl);
 661}
 662
 663static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 664{
 665	struct sdma_engine *sdma = sdmac->sdma;
 666	int channel = sdmac->channel;
 667	u32 chnenbl = chnenbl_ofs(sdma, event);
 668	unsigned long val;
 669
 670	val = readl_relaxed(sdma->regs + chnenbl);
 671	__clear_bit(channel, &val);
 672	writel_relaxed(val, sdma->regs + chnenbl);
 673}
 674
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 675static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 676{
 677	struct sdma_buffer_descriptor *bd;
 678	int error = 0;
 679	enum dma_status	old_status = sdmac->status;
 680
 681	/*
 682	 * loop mode. Iterate over descriptors, re-setup them and
 683	 * call callback function.
 684	 */
 685	while (1) {
 686		bd = &sdmac->bd[sdmac->buf_tail];
 
 
 687
 688		if (bd->mode.status & BD_DONE)
 689			break;
 690
 691		if (bd->mode.status & BD_RROR) {
 692			bd->mode.status &= ~BD_RROR;
 693			sdmac->status = DMA_ERROR;
 694			error = -EIO;
 695		}
 696
 697	       /*
 698		* We use bd->mode.count to calculate the residue, since contains
 699		* the number of bytes present in the current buffer descriptor.
 700		*/
 701
 702		sdmac->chn_real_count = bd->mode.count;
 703		bd->mode.status |= BD_DONE;
 704		bd->mode.count = sdmac->period_len;
 705		sdmac->buf_ptail = sdmac->buf_tail;
 706		sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
 707
 708		/*
 709		 * The callback is called from the interrupt context in order
 710		 * to reduce latency and to avoid the risk of altering the
 711		 * SDMA transaction status by the time the client tasklet is
 712		 * executed.
 713		 */
 714
 715		dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
 
 716
 717		if (error)
 718			sdmac->status = old_status;
 719	}
 720}
 721
 722static void mxc_sdma_handle_channel_normal(unsigned long data)
 723{
 724	struct sdma_channel *sdmac = (struct sdma_channel *) data;
 725	struct sdma_buffer_descriptor *bd;
 726	int i, error = 0;
 727
 728	sdmac->chn_real_count = 0;
 729	/*
 730	 * non loop mode. Iterate over all descriptors, collect
 731	 * errors and call callback function
 732	 */
 733	for (i = 0; i < sdmac->num_bd; i++) {
 734		bd = &sdmac->bd[i];
 735
 736		 if (bd->mode.status & (BD_DONE | BD_RROR))
 737			error = -EIO;
 738		 sdmac->chn_real_count += bd->mode.count;
 739	}
 740
 741	if (error)
 742		sdmac->status = DMA_ERROR;
 743	else
 744		sdmac->status = DMA_COMPLETE;
 745
 746	dma_cookie_complete(&sdmac->desc);
 747
 748	dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
 749}
 750
 751static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 752{
 753	struct sdma_engine *sdma = dev_id;
 754	unsigned long stat;
 755
 756	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
 757	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 758	/* channel 0 is special and not handled here, see run_channel0() */
 759	stat &= ~1;
 760
 761	while (stat) {
 762		int channel = fls(stat) - 1;
 763		struct sdma_channel *sdmac = &sdma->channel[channel];
 
 764
 765		if (sdmac->flags & IMX_DMA_SG_LOOP)
 766			sdma_update_channel_loop(sdmac);
 767		else
 768			tasklet_schedule(&sdmac->tasklet);
 
 
 
 
 
 
 
 769
 
 770		__clear_bit(channel, &stat);
 771	}
 772
 773	return IRQ_HANDLED;
 774}
 775
 776/*
 777 * sets the pc of SDMA script according to the peripheral type
 778 */
 779static void sdma_get_pc(struct sdma_channel *sdmac,
 780		enum sdma_peripheral_type peripheral_type)
 781{
 782	struct sdma_engine *sdma = sdmac->sdma;
 783	int per_2_emi = 0, emi_2_per = 0;
 784	/*
 785	 * These are needed once we start to support transfers between
 786	 * two peripherals or memory-to-memory transfers
 787	 */
 788	int per_2_per = 0;
 789
 790	sdmac->pc_from_device = 0;
 791	sdmac->pc_to_device = 0;
 792	sdmac->device_to_device = 0;
 
 793
 794	switch (peripheral_type) {
 795	case IMX_DMATYPE_MEMORY:
 
 796		break;
 797	case IMX_DMATYPE_DSP:
 798		emi_2_per = sdma->script_addrs->bp_2_ap_addr;
 799		per_2_emi = sdma->script_addrs->ap_2_bp_addr;
 800		break;
 801	case IMX_DMATYPE_FIRI:
 802		per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
 803		emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
 804		break;
 805	case IMX_DMATYPE_UART:
 806		per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
 807		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 808		break;
 809	case IMX_DMATYPE_UART_SP:
 810		per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
 811		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 812		break;
 813	case IMX_DMATYPE_ATA:
 814		per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
 815		emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
 816		break;
 817	case IMX_DMATYPE_CSPI:
 818	case IMX_DMATYPE_EXT:
 819	case IMX_DMATYPE_SSI:
 820	case IMX_DMATYPE_SAI:
 821		per_2_emi = sdma->script_addrs->app_2_mcu_addr;
 822		emi_2_per = sdma->script_addrs->mcu_2_app_addr;
 823		break;
 824	case IMX_DMATYPE_SSI_DUAL:
 825		per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
 826		emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
 827		break;
 828	case IMX_DMATYPE_SSI_SP:
 829	case IMX_DMATYPE_MMC:
 830	case IMX_DMATYPE_SDHC:
 831	case IMX_DMATYPE_CSPI_SP:
 832	case IMX_DMATYPE_ESAI:
 833	case IMX_DMATYPE_MSHC_SP:
 834		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
 835		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 836		break;
 837	case IMX_DMATYPE_ASRC:
 838		per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
 839		emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
 840		per_2_per = sdma->script_addrs->per_2_per_addr;
 841		break;
 842	case IMX_DMATYPE_ASRC_SP:
 843		per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
 844		emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
 845		per_2_per = sdma->script_addrs->per_2_per_addr;
 846		break;
 847	case IMX_DMATYPE_MSHC:
 848		per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
 849		emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
 850		break;
 851	case IMX_DMATYPE_CCM:
 852		per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
 853		break;
 854	case IMX_DMATYPE_SPDIF:
 855		per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
 856		emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
 857		break;
 858	case IMX_DMATYPE_IPU_MEMORY:
 859		emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
 860		break;
 861	default:
 862		break;
 863	}
 864
 865	sdmac->pc_from_device = per_2_emi;
 866	sdmac->pc_to_device = emi_2_per;
 867	sdmac->device_to_device = per_2_per;
 
 868}
 869
 870static int sdma_load_context(struct sdma_channel *sdmac)
 871{
 872	struct sdma_engine *sdma = sdmac->sdma;
 873	int channel = sdmac->channel;
 874	int load_address;
 875	struct sdma_context_data *context = sdma->context;
 876	struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
 877	int ret;
 878	unsigned long flags;
 879
 
 
 
 880	if (sdmac->direction == DMA_DEV_TO_MEM)
 881		load_address = sdmac->pc_from_device;
 882	else if (sdmac->direction == DMA_DEV_TO_DEV)
 883		load_address = sdmac->device_to_device;
 
 
 884	else
 885		load_address = sdmac->pc_to_device;
 886
 887	if (load_address < 0)
 888		return load_address;
 889
 890	dev_dbg(sdma->dev, "load_address = %d\n", load_address);
 891	dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
 892	dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
 893	dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
 894	dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
 895	dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
 896
 897	spin_lock_irqsave(&sdma->channel_0_lock, flags);
 898
 899	memset(context, 0, sizeof(*context));
 900	context->channel_state.pc = load_address;
 901
 902	/* Send by context the event mask,base address for peripheral
 903	 * and watermark level
 904	 */
 905	context->gReg[0] = sdmac->event_mask[1];
 906	context->gReg[1] = sdmac->event_mask[0];
 907	context->gReg[2] = sdmac->per_addr;
 908	context->gReg[6] = sdmac->shp_addr;
 909	context->gReg[7] = sdmac->watermark_level;
 910
 911	bd0->mode.command = C0_SETDM;
 912	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
 913	bd0->mode.count = sizeof(*context) / 4;
 914	bd0->buffer_addr = sdma->context_phys;
 915	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
 916	ret = sdma_run_channel0(sdma);
 917
 918	spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 919
 
 
 920	return ret;
 921}
 922
 923static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 924{
 925	return container_of(chan, struct sdma_channel, chan);
 926}
 927
 928static int sdma_disable_channel(struct dma_chan *chan)
 929{
 930	struct sdma_channel *sdmac = to_sdma_chan(chan);
 931	struct sdma_engine *sdma = sdmac->sdma;
 932	int channel = sdmac->channel;
 933
 934	writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
 935	sdmac->status = DMA_ERROR;
 936
 937	return 0;
 938}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939
 940static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
 941{
 942	struct sdma_engine *sdma = sdmac->sdma;
 943
 944	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
 945	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
 946
 947	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
 948	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
 949
 950	if (sdmac->event_id0 > 31)
 951		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
 952
 953	if (sdmac->event_id1 > 31)
 954		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
 955
 956	/*
 957	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
 958	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
 959	 * r0(event_mask[1]) and r1(event_mask[0]).
 960	 */
 961	if (lwml > hwml) {
 962		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
 963						SDMA_WATERMARK_LEVEL_HWML);
 964		sdmac->watermark_level |= hwml;
 965		sdmac->watermark_level |= lwml << 16;
 966		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
 967	}
 968
 969	if (sdmac->per_address2 >= sdma->spba_start_addr &&
 970			sdmac->per_address2 <= sdma->spba_end_addr)
 971		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
 972
 973	if (sdmac->per_address >= sdma->spba_start_addr &&
 974			sdmac->per_address <= sdma->spba_end_addr)
 975		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
 976
 977	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
 978}
 979
 980static int sdma_config_channel(struct dma_chan *chan)
 981{
 982	struct sdma_channel *sdmac = to_sdma_chan(chan);
 983	int ret;
 984
 985	sdma_disable_channel(chan);
 986
 987	sdmac->event_mask[0] = 0;
 988	sdmac->event_mask[1] = 0;
 989	sdmac->shp_addr = 0;
 990	sdmac->per_addr = 0;
 991
 992	if (sdmac->event_id0) {
 993		if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
 994			return -EINVAL;
 995		sdma_event_enable(sdmac, sdmac->event_id0);
 996	}
 997
 998	if (sdmac->event_id1) {
 999		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1000			return -EINVAL;
1001		sdma_event_enable(sdmac, sdmac->event_id1);
1002	}
1003
1004	switch (sdmac->peripheral_type) {
1005	case IMX_DMATYPE_DSP:
1006		sdma_config_ownership(sdmac, false, true, true);
1007		break;
1008	case IMX_DMATYPE_MEMORY:
1009		sdma_config_ownership(sdmac, false, true, false);
1010		break;
1011	default:
1012		sdma_config_ownership(sdmac, true, true, false);
1013		break;
1014	}
1015
1016	sdma_get_pc(sdmac, sdmac->peripheral_type);
1017
1018	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1019			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1020		/* Handle multiple event channels differently */
1021		if (sdmac->event_id1) {
1022			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1023			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1024				sdma_set_watermarklevel_for_p2p(sdmac);
1025		} else
1026			__set_bit(sdmac->event_id0, sdmac->event_mask);
1027
1028		/* Address */
1029		sdmac->shp_addr = sdmac->per_address;
1030		sdmac->per_addr = sdmac->per_address2;
1031	} else {
1032		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1033	}
1034
1035	ret = sdma_load_context(sdmac);
1036
1037	return ret;
1038}
1039
1040static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1041		unsigned int priority)
1042{
1043	struct sdma_engine *sdma = sdmac->sdma;
1044	int channel = sdmac->channel;
1045
1046	if (priority < MXC_SDMA_MIN_PRIORITY
1047	    || priority > MXC_SDMA_MAX_PRIORITY) {
1048		return -EINVAL;
1049	}
1050
1051	writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1052
1053	return 0;
1054}
1055
1056static int sdma_request_channel(struct sdma_channel *sdmac)
1057{
1058	struct sdma_engine *sdma = sdmac->sdma;
1059	int channel = sdmac->channel;
1060	int ret = -EBUSY;
1061
1062	sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
1063					GFP_KERNEL);
1064	if (!sdmac->bd) {
1065		ret = -ENOMEM;
1066		goto out;
1067	}
1068
1069	sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
1070	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1071
1072	sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
1073	return 0;
1074out:
1075
1076	return ret;
1077}
1078
1079static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 
1080{
1081	unsigned long flags;
1082	struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
1083	dma_cookie_t cookie;
 
 
 
 
 
 
 
 
 
1084
1085	spin_lock_irqsave(&sdmac->lock, flags);
 
 
1086
1087	cookie = dma_cookie_assign(tx);
 
 
1088
1089	spin_unlock_irqrestore(&sdmac->lock, flags);
 
 
1090
1091	return cookie;
 
1092}
1093
1094static int sdma_alloc_chan_resources(struct dma_chan *chan)
1095{
1096	struct sdma_channel *sdmac = to_sdma_chan(chan);
1097	struct imx_dma_data *data = chan->private;
 
1098	int prio, ret;
1099
1100	if (!data)
1101		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1102
1103	switch (data->priority) {
1104	case DMA_PRIO_HIGH:
1105		prio = 3;
1106		break;
1107	case DMA_PRIO_MEDIUM:
1108		prio = 2;
1109		break;
1110	case DMA_PRIO_LOW:
1111	default:
1112		prio = 1;
1113		break;
1114	}
1115
1116	sdmac->peripheral_type = data->peripheral_type;
1117	sdmac->event_id0 = data->dma_request;
1118	sdmac->event_id1 = data->dma_request2;
1119
1120	ret = clk_enable(sdmac->sdma->clk_ipg);
1121	if (ret)
1122		return ret;
1123	ret = clk_enable(sdmac->sdma->clk_ahb);
1124	if (ret)
1125		goto disable_clk_ipg;
1126
1127	ret = sdma_request_channel(sdmac);
1128	if (ret)
1129		goto disable_clk_ahb;
1130
1131	ret = sdma_set_channel_priority(sdmac, prio);
1132	if (ret)
1133		goto disable_clk_ahb;
1134
1135	dma_async_tx_descriptor_init(&sdmac->desc, chan);
1136	sdmac->desc.tx_submit = sdma_tx_submit;
1137	/* txd.flags will be overwritten in prep funcs */
1138	sdmac->desc.flags = DMA_CTRL_ACK;
1139
1140	return 0;
1141
1142disable_clk_ahb:
1143	clk_disable(sdmac->sdma->clk_ahb);
1144disable_clk_ipg:
1145	clk_disable(sdmac->sdma->clk_ipg);
1146	return ret;
1147}
1148
1149static void sdma_free_chan_resources(struct dma_chan *chan)
1150{
1151	struct sdma_channel *sdmac = to_sdma_chan(chan);
1152	struct sdma_engine *sdma = sdmac->sdma;
1153
1154	sdma_disable_channel(chan);
 
 
1155
1156	if (sdmac->event_id0)
1157		sdma_event_disable(sdmac, sdmac->event_id0);
1158	if (sdmac->event_id1)
1159		sdma_event_disable(sdmac, sdmac->event_id1);
1160
1161	sdmac->event_id0 = 0;
1162	sdmac->event_id1 = 0;
1163
1164	sdma_set_channel_priority(sdmac, 0);
1165
1166	dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
1167
1168	clk_disable(sdma->clk_ipg);
1169	clk_disable(sdma->clk_ahb);
1170}
1171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1172static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1173		struct dma_chan *chan, struct scatterlist *sgl,
1174		unsigned int sg_len, enum dma_transfer_direction direction,
1175		unsigned long flags, void *context)
1176{
1177	struct sdma_channel *sdmac = to_sdma_chan(chan);
1178	struct sdma_engine *sdma = sdmac->sdma;
1179	int ret, i, count;
1180	int channel = sdmac->channel;
1181	struct scatterlist *sg;
 
1182
1183	if (sdmac->status == DMA_IN_PROGRESS)
1184		return NULL;
1185	sdmac->status = DMA_IN_PROGRESS;
1186
1187	sdmac->flags = 0;
1188
1189	sdmac->buf_tail = 0;
1190	sdmac->buf_ptail = 0;
1191	sdmac->chn_real_count = 0;
1192
1193	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1194			sg_len, channel);
1195
1196	sdmac->direction = direction;
1197	ret = sdma_load_context(sdmac);
1198	if (ret)
1199		goto err_out;
1200
1201	if (sg_len > NUM_BD) {
1202		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1203				channel, sg_len, NUM_BD);
1204		ret = -EINVAL;
1205		goto err_out;
1206	}
1207
1208	sdmac->chn_count = 0;
1209	for_each_sg(sgl, sg, sg_len, i) {
1210		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1211		int param;
1212
1213		bd->buffer_addr = sg->dma_address;
1214
1215		count = sg_dma_len(sg);
1216
1217		if (count > 0xffff) {
1218			dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1219					channel, count, 0xffff);
1220			ret = -EINVAL;
1221			goto err_out;
1222		}
1223
1224		bd->mode.count = count;
1225		sdmac->chn_count += count;
1226
1227		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1228			ret =  -EINVAL;
1229			goto err_out;
1230		}
1231
1232		switch (sdmac->word_size) {
1233		case DMA_SLAVE_BUSWIDTH_4_BYTES:
1234			bd->mode.command = 0;
1235			if (count & 3 || sg->dma_address & 3)
1236				return NULL;
1237			break;
1238		case DMA_SLAVE_BUSWIDTH_2_BYTES:
1239			bd->mode.command = 2;
1240			if (count & 1 || sg->dma_address & 1)
1241				return NULL;
1242			break;
1243		case DMA_SLAVE_BUSWIDTH_1_BYTE:
1244			bd->mode.command = 1;
1245			break;
1246		default:
1247			return NULL;
1248		}
1249
1250		param = BD_DONE | BD_EXTD | BD_CONT;
1251
1252		if (i + 1 == sg_len) {
1253			param |= BD_INTR;
1254			param |= BD_LAST;
1255			param &= ~BD_CONT;
1256		}
1257
1258		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1259				i, count, (u64)sg->dma_address,
1260				param & BD_WRAP ? "wrap" : "",
1261				param & BD_INTR ? " intr" : "");
1262
1263		bd->mode.status = param;
1264	}
1265
1266	sdmac->num_bd = sg_len;
1267	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1268
1269	return &sdmac->desc;
1270err_out:
1271	sdmac->status = DMA_ERROR;
1272	return NULL;
1273}
1274
1275static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1276		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1277		size_t period_len, enum dma_transfer_direction direction,
1278		unsigned long flags)
1279{
1280	struct sdma_channel *sdmac = to_sdma_chan(chan);
1281	struct sdma_engine *sdma = sdmac->sdma;
1282	int num_periods = buf_len / period_len;
1283	int channel = sdmac->channel;
1284	int ret, i = 0, buf = 0;
 
1285
1286	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1287
1288	if (sdmac->status == DMA_IN_PROGRESS)
1289		return NULL;
1290
1291	sdmac->status = DMA_IN_PROGRESS;
 
 
1292
1293	sdmac->buf_tail = 0;
1294	sdmac->buf_ptail = 0;
1295	sdmac->chn_real_count = 0;
1296	sdmac->period_len = period_len;
1297
1298	sdmac->flags |= IMX_DMA_SG_LOOP;
1299	sdmac->direction = direction;
1300	ret = sdma_load_context(sdmac);
1301	if (ret)
1302		goto err_out;
1303
1304	if (num_periods > NUM_BD) {
1305		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1306				channel, num_periods, NUM_BD);
1307		goto err_out;
1308	}
1309
1310	if (period_len > 0xffff) {
1311		dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1312				channel, period_len, 0xffff);
1313		goto err_out;
1314	}
1315
1316	while (buf < buf_len) {
1317		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1318		int param;
1319
1320		bd->buffer_addr = dma_addr;
1321
1322		bd->mode.count = period_len;
1323
1324		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1325			goto err_out;
1326		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1327			bd->mode.command = 0;
1328		else
1329			bd->mode.command = sdmac->word_size;
1330
1331		param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1332		if (i + 1 == num_periods)
1333			param |= BD_WRAP;
1334
1335		dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1336				i, period_len, (u64)dma_addr,
1337				param & BD_WRAP ? "wrap" : "",
1338				param & BD_INTR ? " intr" : "");
1339
1340		bd->mode.status = param;
1341
1342		dma_addr += period_len;
1343		buf += period_len;
1344
1345		i++;
1346	}
1347
1348	sdmac->num_bd = num_periods;
1349	sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1350
1351	return &sdmac->desc;
1352err_out:
1353	sdmac->status = DMA_ERROR;
1354	return NULL;
1355}
1356
1357static int sdma_config(struct dma_chan *chan,
1358		       struct dma_slave_config *dmaengine_cfg)
 
1359{
1360	struct sdma_channel *sdmac = to_sdma_chan(chan);
1361
1362	if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1363		sdmac->per_address = dmaengine_cfg->src_addr;
1364		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1365			dmaengine_cfg->src_addr_width;
1366		sdmac->word_size = dmaengine_cfg->src_addr_width;
1367	} else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
1368		sdmac->per_address2 = dmaengine_cfg->src_addr;
1369		sdmac->per_address = dmaengine_cfg->dst_addr;
1370		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1371			SDMA_WATERMARK_LEVEL_LWML;
1372		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1373			SDMA_WATERMARK_LEVEL_HWML;
1374		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1375	} else {
1376		sdmac->per_address = dmaengine_cfg->dst_addr;
1377		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1378			dmaengine_cfg->dst_addr_width;
1379		sdmac->word_size = dmaengine_cfg->dst_addr_width;
1380	}
1381	sdmac->direction = dmaengine_cfg->direction;
1382	return sdma_config_channel(chan);
1383}
1384
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1385static enum dma_status sdma_tx_status(struct dma_chan *chan,
1386				      dma_cookie_t cookie,
1387				      struct dma_tx_state *txstate)
1388{
1389	struct sdma_channel *sdmac = to_sdma_chan(chan);
 
1390	u32 residue;
 
 
 
1391
1392	if (sdmac->flags & IMX_DMA_SG_LOOP)
1393		residue = (sdmac->num_bd - sdmac->buf_ptail) *
1394			   sdmac->period_len - sdmac->chn_real_count;
1395	else
1396		residue = sdmac->chn_count - sdmac->chn_real_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1397
1398	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1399			 residue);
1400
1401	return sdmac->status;
1402}
1403
1404static void sdma_issue_pending(struct dma_chan *chan)
1405{
1406	struct sdma_channel *sdmac = to_sdma_chan(chan);
1407	struct sdma_engine *sdma = sdmac->sdma;
1408
1409	if (sdmac->status == DMA_IN_PROGRESS)
1410		sdma_enable_channel(sdma, sdmac->channel);
 
 
1411}
1412
1413#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
1414#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2	38
1415#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3	41
1416#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4	42
1417
1418static void sdma_add_scripts(struct sdma_engine *sdma,
1419		const struct sdma_script_start_addrs *addr)
1420{
1421	s32 *addr_arr = (u32 *)addr;
1422	s32 *saddr_arr = (u32 *)sdma->script_addrs;
1423	int i;
1424
1425	/* use the default firmware in ROM if missing external firmware */
1426	if (!sdma->script_number)
1427		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1428
 
 
 
 
 
 
 
 
1429	for (i = 0; i < sdma->script_number; i++)
1430		if (addr_arr[i] > 0)
1431			saddr_arr[i] = addr_arr[i];
1432}
1433
1434static void sdma_load_firmware(const struct firmware *fw, void *context)
1435{
1436	struct sdma_engine *sdma = context;
1437	const struct sdma_firmware_header *header;
1438	const struct sdma_script_start_addrs *addr;
1439	unsigned short *ram_code;
1440
1441	if (!fw) {
1442		dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1443		/* In this case we just use the ROM firmware. */
1444		return;
1445	}
1446
1447	if (fw->size < sizeof(*header))
1448		goto err_firmware;
1449
1450	header = (struct sdma_firmware_header *)fw->data;
1451
1452	if (header->magic != SDMA_FIRMWARE_MAGIC)
1453		goto err_firmware;
1454	if (header->ram_code_start + header->ram_code_size > fw->size)
1455		goto err_firmware;
1456	switch (header->version_major) {
1457	case 1:
1458		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1459		break;
1460	case 2:
1461		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1462		break;
1463	case 3:
1464		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1465		break;
1466	case 4:
1467		sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1468		break;
1469	default:
1470		dev_err(sdma->dev, "unknown firmware version\n");
1471		goto err_firmware;
1472	}
1473
1474	addr = (void *)header + header->script_addrs_start;
1475	ram_code = (void *)header + header->ram_code_start;
1476
1477	clk_enable(sdma->clk_ipg);
1478	clk_enable(sdma->clk_ahb);
1479	/* download the RAM image for SDMA */
1480	sdma_load_script(sdma, ram_code,
1481			header->ram_code_size,
1482			addr->ram_code_start_addr);
1483	clk_disable(sdma->clk_ipg);
1484	clk_disable(sdma->clk_ahb);
1485
1486	sdma_add_scripts(sdma, addr);
1487
1488	dev_info(sdma->dev, "loaded firmware %d.%d\n",
1489			header->version_major,
1490			header->version_minor);
1491
1492err_firmware:
1493	release_firmware(fw);
1494}
1495
1496#define EVENT_REMAP_CELLS 3
1497
1498static int sdma_event_remap(struct sdma_engine *sdma)
1499{
1500	struct device_node *np = sdma->dev->of_node;
1501	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1502	struct property *event_remap;
1503	struct regmap *gpr;
1504	char propname[] = "fsl,sdma-event-remap";
1505	u32 reg, val, shift, num_map, i;
1506	int ret = 0;
1507
1508	if (IS_ERR(np) || IS_ERR(gpr_np))
1509		goto out;
1510
1511	event_remap = of_find_property(np, propname, NULL);
1512	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1513	if (!num_map) {
1514		dev_dbg(sdma->dev, "no event needs to be remapped\n");
1515		goto out;
1516	} else if (num_map % EVENT_REMAP_CELLS) {
1517		dev_err(sdma->dev, "the property %s must modulo %d\n",
1518				propname, EVENT_REMAP_CELLS);
1519		ret = -EINVAL;
1520		goto out;
1521	}
1522
1523	gpr = syscon_node_to_regmap(gpr_np);
1524	if (IS_ERR(gpr)) {
1525		dev_err(sdma->dev, "failed to get gpr regmap\n");
1526		ret = PTR_ERR(gpr);
1527		goto out;
1528	}
1529
1530	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1531		ret = of_property_read_u32_index(np, propname, i, &reg);
1532		if (ret) {
1533			dev_err(sdma->dev, "failed to read property %s index %d\n",
1534					propname, i);
1535			goto out;
1536		}
1537
1538		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1539		if (ret) {
1540			dev_err(sdma->dev, "failed to read property %s index %d\n",
1541					propname, i + 1);
1542			goto out;
1543		}
1544
1545		ret = of_property_read_u32_index(np, propname, i + 2, &val);
1546		if (ret) {
1547			dev_err(sdma->dev, "failed to read property %s index %d\n",
1548					propname, i + 2);
1549			goto out;
1550		}
1551
1552		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1553	}
1554
1555out:
1556	if (!IS_ERR(gpr_np))
1557		of_node_put(gpr_np);
1558
1559	return ret;
1560}
1561
1562static int sdma_get_firmware(struct sdma_engine *sdma,
1563		const char *fw_name)
1564{
1565	int ret;
1566
1567	ret = request_firmware_nowait(THIS_MODULE,
1568			FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1569			GFP_KERNEL, sdma, sdma_load_firmware);
1570
1571	return ret;
1572}
1573
1574static int sdma_init(struct sdma_engine *sdma)
1575{
1576	int i, ret;
1577	dma_addr_t ccb_phys;
1578
1579	ret = clk_enable(sdma->clk_ipg);
1580	if (ret)
1581		return ret;
1582	ret = clk_enable(sdma->clk_ahb);
1583	if (ret)
1584		goto disable_clk_ipg;
1585
 
 
 
 
1586	/* Be sure SDMA has not started yet */
1587	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1588
1589	sdma->channel_control = dma_alloc_coherent(NULL,
1590			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1591			sizeof(struct sdma_context_data),
1592			&ccb_phys, GFP_KERNEL);
1593
1594	if (!sdma->channel_control) {
1595		ret = -ENOMEM;
1596		goto err_dma_alloc;
1597	}
1598
1599	sdma->context = (void *)sdma->channel_control +
1600		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1601	sdma->context_phys = ccb_phys +
1602		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1603
1604	/* Zero-out the CCB structures array just allocated */
1605	memset(sdma->channel_control, 0,
1606			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1607
1608	/* disable all channels */
1609	for (i = 0; i < sdma->drvdata->num_events; i++)
1610		writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1611
1612	/* All channels have priority 0 */
1613	for (i = 0; i < MAX_DMA_CHANNELS; i++)
1614		writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1615
1616	ret = sdma_request_channel(&sdma->channel[0]);
1617	if (ret)
1618		goto err_dma_alloc;
1619
1620	sdma_config_ownership(&sdma->channel[0], false, true, false);
1621
1622	/* Set Command Channel (Channel Zero) */
1623	writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1624
1625	/* Set bits of CONFIG register but with static context switching */
1626	/* FIXME: Check whether to set ACR bit depending on clock ratios */
1627	writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
 
 
1628
1629	writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1630
1631	/* Initializes channel's priorities */
1632	sdma_set_channel_priority(&sdma->channel[0], 7);
1633
1634	clk_disable(sdma->clk_ipg);
1635	clk_disable(sdma->clk_ahb);
1636
1637	return 0;
1638
1639err_dma_alloc:
1640	clk_disable(sdma->clk_ahb);
1641disable_clk_ipg:
1642	clk_disable(sdma->clk_ipg);
1643	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1644	return ret;
1645}
1646
1647static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1648{
1649	struct sdma_channel *sdmac = to_sdma_chan(chan);
1650	struct imx_dma_data *data = fn_param;
1651
1652	if (!imx_dma_is_general_purpose(chan))
1653		return false;
1654
1655	sdmac->data = *data;
1656	chan->private = &sdmac->data;
1657
1658	return true;
1659}
1660
1661static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1662				   struct of_dma *ofdma)
1663{
1664	struct sdma_engine *sdma = ofdma->of_dma_data;
1665	dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1666	struct imx_dma_data data;
1667
1668	if (dma_spec->args_count != 3)
1669		return NULL;
1670
1671	data.dma_request = dma_spec->args[0];
1672	data.peripheral_type = dma_spec->args[1];
1673	data.priority = dma_spec->args[2];
1674	/*
1675	 * init dma_request2 to zero, which is not used by the dts.
1676	 * For P2P, dma_request2 is init from dma_request_channel(),
1677	 * chan->private will point to the imx_dma_data, and in
1678	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
1679	 * be set to sdmac->event_id1.
1680	 */
1681	data.dma_request2 = 0;
1682
1683	return dma_request_channel(mask, sdma_filter_fn, &data);
 
1684}
1685
1686static int sdma_probe(struct platform_device *pdev)
1687{
1688	const struct of_device_id *of_id =
1689			of_match_device(sdma_dt_ids, &pdev->dev);
1690	struct device_node *np = pdev->dev.of_node;
1691	struct device_node *spba_bus;
1692	const char *fw_name;
1693	int ret;
1694	int irq;
1695	struct resource *iores;
1696	struct resource spba_res;
1697	struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1698	int i;
1699	struct sdma_engine *sdma;
1700	s32 *saddr_arr;
1701	const struct sdma_driver_data *drvdata = NULL;
1702
1703	if (of_id)
1704		drvdata = of_id->data;
1705	else if (pdev->id_entry)
1706		drvdata = (void *)pdev->id_entry->driver_data;
1707
1708	if (!drvdata) {
1709		dev_err(&pdev->dev, "unable to find driver data\n");
1710		return -EINVAL;
1711	}
1712
1713	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1714	if (ret)
1715		return ret;
1716
1717	sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
1718	if (!sdma)
1719		return -ENOMEM;
1720
1721	spin_lock_init(&sdma->channel_0_lock);
1722
1723	sdma->dev = &pdev->dev;
1724	sdma->drvdata = drvdata;
1725
1726	irq = platform_get_irq(pdev, 0);
1727	if (irq < 0)
1728		return irq;
1729
1730	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1731	sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1732	if (IS_ERR(sdma->regs))
1733		return PTR_ERR(sdma->regs);
1734
1735	sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1736	if (IS_ERR(sdma->clk_ipg))
1737		return PTR_ERR(sdma->clk_ipg);
1738
1739	sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1740	if (IS_ERR(sdma->clk_ahb))
1741		return PTR_ERR(sdma->clk_ahb);
1742
1743	clk_prepare(sdma->clk_ipg);
1744	clk_prepare(sdma->clk_ahb);
 
 
 
 
 
1745
1746	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1747			       sdma);
1748	if (ret)
1749		return ret;
1750
1751	sdma->irq = irq;
1752
1753	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1754	if (!sdma->script_addrs)
1755		return -ENOMEM;
 
 
1756
1757	/* initially no scripts available */
1758	saddr_arr = (s32 *)sdma->script_addrs;
1759	for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1760		saddr_arr[i] = -EINVAL;
1761
1762	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1763	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
 
1764
1765	INIT_LIST_HEAD(&sdma->dma_device.channels);
1766	/* Initialize channel parameters */
1767	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1768		struct sdma_channel *sdmac = &sdma->channel[i];
1769
1770		sdmac->sdma = sdma;
1771		spin_lock_init(&sdmac->lock);
1772
1773		sdmac->chan.device = &sdma->dma_device;
1774		dma_cookie_init(&sdmac->chan);
1775		sdmac->channel = i;
1776
1777		tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
1778			     (unsigned long) sdmac);
1779		/*
1780		 * Add the channel to the DMAC list. Do not add channel 0 though
1781		 * because we need it internally in the SDMA driver. This also means
1782		 * that channel 0 in dmaengine counting matches sdma channel 1.
1783		 */
1784		if (i)
1785			list_add_tail(&sdmac->chan.device_node,
1786					&sdma->dma_device.channels);
1787	}
1788
1789	ret = sdma_init(sdma);
1790	if (ret)
1791		goto err_init;
1792
1793	ret = sdma_event_remap(sdma);
1794	if (ret)
1795		goto err_init;
1796
1797	if (sdma->drvdata->script_addrs)
1798		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
1799	if (pdata && pdata->script_addrs)
1800		sdma_add_scripts(sdma, pdata->script_addrs);
1801
1802	if (pdata) {
1803		ret = sdma_get_firmware(sdma, pdata->fw_name);
1804		if (ret)
1805			dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
1806	} else {
1807		/*
1808		 * Because that device tree does not encode ROM script address,
1809		 * the RAM script in firmware is mandatory for device tree
1810		 * probe, otherwise it fails.
1811		 */
1812		ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1813					      &fw_name);
1814		if (ret)
1815			dev_warn(&pdev->dev, "failed to get firmware name\n");
1816		else {
1817			ret = sdma_get_firmware(sdma, fw_name);
1818			if (ret)
1819				dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
1820		}
1821	}
1822
1823	sdma->dma_device.dev = &pdev->dev;
1824
1825	sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1826	sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1827	sdma->dma_device.device_tx_status = sdma_tx_status;
1828	sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1829	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1830	sdma->dma_device.device_config = sdma_config;
1831	sdma->dma_device.device_terminate_all = sdma_disable_channel;
1832	sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1833	sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1834	sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1835	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 
 
1836	sdma->dma_device.device_issue_pending = sdma_issue_pending;
1837	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1838	dma_set_max_seg_size(sdma->dma_device.dev, 65535);
 
1839
1840	platform_set_drvdata(pdev, sdma);
1841
1842	ret = dma_async_device_register(&sdma->dma_device);
1843	if (ret) {
1844		dev_err(&pdev->dev, "unable to register\n");
1845		goto err_init;
1846	}
1847
1848	if (np) {
1849		ret = of_dma_controller_register(np, sdma_xlate, sdma);
1850		if (ret) {
1851			dev_err(&pdev->dev, "failed to register controller\n");
1852			goto err_register;
1853		}
1854
1855		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
1856		ret = of_address_to_resource(spba_bus, 0, &spba_res);
1857		if (!ret) {
1858			sdma->spba_start_addr = spba_res.start;
1859			sdma->spba_end_addr = spba_res.end;
1860		}
1861		of_node_put(spba_bus);
1862	}
1863
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1864	return 0;
1865
1866err_register:
1867	dma_async_device_unregister(&sdma->dma_device);
1868err_init:
1869	kfree(sdma->script_addrs);
 
 
 
 
1870	return ret;
1871}
1872
1873static int sdma_remove(struct platform_device *pdev)
1874{
1875	struct sdma_engine *sdma = platform_get_drvdata(pdev);
1876	int i;
1877
1878	devm_free_irq(&pdev->dev, sdma->irq, sdma);
1879	dma_async_device_unregister(&sdma->dma_device);
1880	kfree(sdma->script_addrs);
 
 
1881	/* Kill the tasklet */
1882	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1883		struct sdma_channel *sdmac = &sdma->channel[i];
1884
1885		tasklet_kill(&sdmac->tasklet);
 
1886	}
1887
1888	platform_set_drvdata(pdev, NULL);
1889	return 0;
1890}
1891
1892static struct platform_driver sdma_driver = {
1893	.driver		= {
1894		.name	= "imx-sdma",
1895		.of_match_table = sdma_dt_ids,
1896	},
1897	.id_table	= sdma_devtypes,
1898	.remove		= sdma_remove,
1899	.probe		= sdma_probe,
1900};
1901
1902module_platform_driver(sdma_driver);
1903
1904MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1905MODULE_DESCRIPTION("i.MX SDMA driver");
 
 
 
 
 
 
1906MODULE_LICENSE("GPL");