Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Ericsson AB 2007-2008
   4 * Copyright (C) ST-Ericsson SA 2008-2010
   5 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
   6 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
 
   7 */
   8
   9#include <linux/dma-mapping.h>
  10#include <linux/kernel.h>
  11#include <linux/slab.h>
  12#include <linux/export.h>
  13#include <linux/dmaengine.h>
  14#include <linux/platform_device.h>
  15#include <linux/clk.h>
  16#include <linux/delay.h>
  17#include <linux/log2.h>
  18#include <linux/pm.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/err.h>
  21#include <linux/of.h>
  22#include <linux/of_dma.h>
  23#include <linux/amba/bus.h>
  24#include <linux/regulator/consumer.h>
  25#include <linux/platform_data/dma-ste-dma40.h>
  26
  27#include "dmaengine.h"
  28#include "ste_dma40_ll.h"
  29
  30#define D40_NAME "dma40"
  31
  32#define D40_PHY_CHAN -1
  33
  34/* For masking out/in 2 bit channel positions */
  35#define D40_CHAN_POS(chan)  (2 * (chan / 2))
  36#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
  37
  38/* Maximum iterations taken before giving up suspending a channel */
  39#define D40_SUSPEND_MAX_IT 500
  40
  41/* Milliseconds */
  42#define DMA40_AUTOSUSPEND_DELAY	100
  43
  44/* Hardware requirement on LCLA alignment */
  45#define LCLA_ALIGNMENT 0x40000
  46
  47/* Max number of links per event group */
  48#define D40_LCLA_LINK_PER_EVENT_GRP 128
  49#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
  50
  51/* Max number of logical channels per physical channel */
  52#define D40_MAX_LOG_CHAN_PER_PHY 32
  53
  54/* Attempts before giving up to trying to get pages that are aligned */
  55#define MAX_LCLA_ALLOC_ATTEMPTS 256
  56
  57/* Bit markings for allocation map */
  58#define D40_ALLOC_FREE		BIT(31)
  59#define D40_ALLOC_PHY		BIT(30)
  60#define D40_ALLOC_LOG_FREE	0
  61
  62#define D40_MEMCPY_MAX_CHANS	8
  63
  64/* Reserved event lines for memcpy only. */
  65#define DB8500_DMA_MEMCPY_EV_0	51
  66#define DB8500_DMA_MEMCPY_EV_1	56
  67#define DB8500_DMA_MEMCPY_EV_2	57
  68#define DB8500_DMA_MEMCPY_EV_3	58
  69#define DB8500_DMA_MEMCPY_EV_4	59
  70#define DB8500_DMA_MEMCPY_EV_5	60
  71
  72static int dma40_memcpy_channels[] = {
  73	DB8500_DMA_MEMCPY_EV_0,
  74	DB8500_DMA_MEMCPY_EV_1,
  75	DB8500_DMA_MEMCPY_EV_2,
  76	DB8500_DMA_MEMCPY_EV_3,
  77	DB8500_DMA_MEMCPY_EV_4,
  78	DB8500_DMA_MEMCPY_EV_5,
  79};
  80
  81/* Default configuration for physcial memcpy */
  82static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
  83	.mode = STEDMA40_MODE_PHYSICAL,
  84	.dir = DMA_MEM_TO_MEM,
  85
  86	.src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
  87	.src_info.psize = STEDMA40_PSIZE_PHY_1,
  88	.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  89
  90	.dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
  91	.dst_info.psize = STEDMA40_PSIZE_PHY_1,
  92	.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  93};
  94
  95/* Default configuration for logical memcpy */
  96static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
  97	.mode = STEDMA40_MODE_LOGICAL,
  98	.dir = DMA_MEM_TO_MEM,
  99
 100	.src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 101	.src_info.psize = STEDMA40_PSIZE_LOG_1,
 102	.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
 103
 104	.dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 105	.dst_info.psize = STEDMA40_PSIZE_LOG_1,
 106	.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
 107};
 108
 109/**
 110 * enum 40_command - The different commands and/or statuses.
 111 *
 112 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
 113 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
 114 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
 115 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
 116 */
 117enum d40_command {
 118	D40_DMA_STOP		= 0,
 119	D40_DMA_RUN		= 1,
 120	D40_DMA_SUSPEND_REQ	= 2,
 121	D40_DMA_SUSPENDED	= 3
 122};
 123
 124/*
 125 * enum d40_events - The different Event Enables for the event lines.
 126 *
 127 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
 128 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
 129 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
 130 * @D40_ROUND_EVENTLINE: Status check for event line.
 131 */
 132
 133enum d40_events {
 134	D40_DEACTIVATE_EVENTLINE	= 0,
 135	D40_ACTIVATE_EVENTLINE		= 1,
 136	D40_SUSPEND_REQ_EVENTLINE	= 2,
 137	D40_ROUND_EVENTLINE		= 3
 138};
 139
 140/*
 141 * These are the registers that has to be saved and later restored
 142 * when the DMA hw is powered off.
 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
 144 */
 145static __maybe_unused u32 d40_backup_regs[] = {
 146	D40_DREG_LCPA,
 147	D40_DREG_LCLA,
 148	D40_DREG_PRMSE,
 149	D40_DREG_PRMSO,
 150	D40_DREG_PRMOE,
 151	D40_DREG_PRMOO,
 152};
 153
 154#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
 155
 156/*
 157 * since 9540 and 8540 has the same HW revision
 158 * use v4a for 9540 or ealier
 159 * use v4b for 8540 or later
 160 * HW revision:
 161 * DB8500ed has revision 0
 162 * DB8500v1 has revision 2
 163 * DB8500v2 has revision 3
 164 * AP9540v1 has revision 4
 165 * DB8540v1 has revision 4
 166 * TODO: Check if all these registers have to be saved/restored on dma40 v4a
 167 */
 168static u32 d40_backup_regs_v4a[] = {
 169	D40_DREG_PSEG1,
 170	D40_DREG_PSEG2,
 171	D40_DREG_PSEG3,
 172	D40_DREG_PSEG4,
 173	D40_DREG_PCEG1,
 174	D40_DREG_PCEG2,
 175	D40_DREG_PCEG3,
 176	D40_DREG_PCEG4,
 177	D40_DREG_RSEG1,
 178	D40_DREG_RSEG2,
 179	D40_DREG_RSEG3,
 180	D40_DREG_RSEG4,
 181	D40_DREG_RCEG1,
 182	D40_DREG_RCEG2,
 183	D40_DREG_RCEG3,
 184	D40_DREG_RCEG4,
 185};
 186
 187#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
 188
 189static u32 d40_backup_regs_v4b[] = {
 190	D40_DREG_CPSEG1,
 191	D40_DREG_CPSEG2,
 192	D40_DREG_CPSEG3,
 193	D40_DREG_CPSEG4,
 194	D40_DREG_CPSEG5,
 195	D40_DREG_CPCEG1,
 196	D40_DREG_CPCEG2,
 197	D40_DREG_CPCEG3,
 198	D40_DREG_CPCEG4,
 199	D40_DREG_CPCEG5,
 200	D40_DREG_CRSEG1,
 201	D40_DREG_CRSEG2,
 202	D40_DREG_CRSEG3,
 203	D40_DREG_CRSEG4,
 204	D40_DREG_CRSEG5,
 205	D40_DREG_CRCEG1,
 206	D40_DREG_CRCEG2,
 207	D40_DREG_CRCEG3,
 208	D40_DREG_CRCEG4,
 209	D40_DREG_CRCEG5,
 210};
 211
 212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
 213
 214static __maybe_unused u32 d40_backup_regs_chan[] = {
 215	D40_CHAN_REG_SSCFG,
 216	D40_CHAN_REG_SSELT,
 217	D40_CHAN_REG_SSPTR,
 218	D40_CHAN_REG_SSLNK,
 219	D40_CHAN_REG_SDCFG,
 220	D40_CHAN_REG_SDELT,
 221	D40_CHAN_REG_SDPTR,
 222	D40_CHAN_REG_SDLNK,
 223};
 224
 225#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
 226			     BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
 227
 228/**
 229 * struct d40_interrupt_lookup - lookup table for interrupt handler
 230 *
 231 * @src: Interrupt mask register.
 232 * @clr: Interrupt clear register.
 233 * @is_error: true if this is an error interrupt.
 234 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
 235 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
 236 */
 237struct d40_interrupt_lookup {
 238	u32 src;
 239	u32 clr;
 240	bool is_error;
 241	int offset;
 242};
 243
 244
 245static struct d40_interrupt_lookup il_v4a[] = {
 246	{D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
 247	{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
 248	{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
 249	{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
 250	{D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
 251	{D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
 252	{D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
 253	{D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
 254	{D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
 255	{D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
 256};
 257
 258static struct d40_interrupt_lookup il_v4b[] = {
 259	{D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false,  0},
 260	{D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
 261	{D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
 262	{D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
 263	{D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
 264	{D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true,   0},
 265	{D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true,  32},
 266	{D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true,  64},
 267	{D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true,  96},
 268	{D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true,  128},
 269	{D40_DREG_CPCTIS,  D40_DREG_CPCICR,  false, D40_PHY_CHAN},
 270	{D40_DREG_CPCEIS,  D40_DREG_CPCICR,  true,  D40_PHY_CHAN},
 271};
 272
 273/**
 274 * struct d40_reg_val - simple lookup struct
 275 *
 276 * @reg: The register.
 277 * @val: The value that belongs to the register in reg.
 278 */
 279struct d40_reg_val {
 280	unsigned int reg;
 281	unsigned int val;
 282};
 283
 284static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
 285	/* Clock every part of the DMA block from start */
 286	{ .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
 287
 288	/* Interrupts on all logical channels */
 289	{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
 290	{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
 291	{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
 292	{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
 293	{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
 294	{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
 295	{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
 296	{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
 297	{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
 298	{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
 299	{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
 300	{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
 301};
 302static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
 303	/* Clock every part of the DMA block from start */
 304	{ .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
 305
 306	/* Interrupts on all logical channels */
 307	{ .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
 308	{ .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
 309	{ .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
 310	{ .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
 311	{ .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
 312	{ .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
 313	{ .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
 314	{ .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
 315	{ .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
 316	{ .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
 317	{ .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
 318	{ .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
 319	{ .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
 320	{ .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
 321	{ .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
 322};
 323
 324/**
 325 * struct d40_lli_pool - Structure for keeping LLIs in memory
 326 *
 327 * @base: Pointer to memory area when the pre_alloc_lli's are not large
 328 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
 329 * pre_alloc_lli is used.
 330 * @dma_addr: DMA address, if mapped
 331 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
 332 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
 333 * one buffer to one buffer.
 334 */
 335struct d40_lli_pool {
 336	void	*base;
 337	int	 size;
 338	dma_addr_t	dma_addr;
 339	/* Space for dst and src, plus an extra for padding */
 340	u8	 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
 341};
 342
 343/**
 344 * struct d40_desc - A descriptor is one DMA job.
 345 *
 346 * @lli_phy: LLI settings for physical channel. Both src and dst=
 347 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
 348 * lli_len equals one.
 349 * @lli_log: Same as above but for logical channels.
 350 * @lli_pool: The pool with two entries pre-allocated.
 351 * @lli_len: Number of llis of current descriptor.
 352 * @lli_current: Number of transferred llis.
 353 * @lcla_alloc: Number of LCLA entries allocated.
 354 * @txd: DMA engine struct. Used for among other things for communication
 355 * during a transfer.
 356 * @node: List entry.
 357 * @is_in_client_list: true if the client owns this descriptor.
 358 * @cyclic: true if this is a cyclic job
 359 *
 360 * This descriptor is used for both logical and physical transfers.
 361 */
 362struct d40_desc {
 363	/* LLI physical */
 364	struct d40_phy_lli_bidir	 lli_phy;
 365	/* LLI logical */
 366	struct d40_log_lli_bidir	 lli_log;
 367
 368	struct d40_lli_pool		 lli_pool;
 369	int				 lli_len;
 370	int				 lli_current;
 371	int				 lcla_alloc;
 372
 373	struct dma_async_tx_descriptor	 txd;
 374	struct list_head		 node;
 375
 376	bool				 is_in_client_list;
 377	bool				 cyclic;
 378};
 379
 380/**
 381 * struct d40_lcla_pool - LCLA pool settings and data.
 382 *
 383 * @base: The virtual address of LCLA. 18 bit aligned.
 384 * @dma_addr: DMA address, if mapped
 385 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
 386 * This pointer is only there for clean-up on error.
 387 * @pages: The number of pages needed for all physical channels.
 388 * Only used later for clean-up on error
 389 * @lock: Lock to protect the content in this struct.
 390 * @alloc_map: big map over which LCLA entry is own by which job.
 391 */
 392struct d40_lcla_pool {
 393	void		*base;
 394	dma_addr_t	dma_addr;
 395	void		*base_unaligned;
 396	int		 pages;
 397	spinlock_t	 lock;
 398	struct d40_desc	**alloc_map;
 399};
 400
 401/**
 402 * struct d40_phy_res - struct for handling eventlines mapped to physical
 403 * channels.
 404 *
 405 * @lock: A lock protection this entity.
 406 * @reserved: True if used by secure world or otherwise.
 407 * @num: The physical channel number of this entity.
 408 * @allocated_src: Bit mapped to show which src event line's are mapped to
 409 * this physical channel. Can also be free or physically allocated.
 410 * @allocated_dst: Same as for src but is dst.
 411 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
 412 * event line number.
 413 * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
 414 */
 415struct d40_phy_res {
 416	spinlock_t lock;
 417	bool	   reserved;
 418	int	   num;
 419	u32	   allocated_src;
 420	u32	   allocated_dst;
 421	bool	   use_soft_lli;
 422};
 423
 424struct d40_base;
 425
 426/**
 427 * struct d40_chan - Struct that describes a channel.
 428 *
 429 * @lock: A spinlock to protect this struct.
 430 * @log_num: The logical number, if any of this channel.
 431 * @pending_tx: The number of pending transfers. Used between interrupt handler
 432 * and tasklet.
 433 * @busy: Set to true when transfer is ongoing on this channel.
 434 * @phy_chan: Pointer to physical channel which this instance runs on. If this
 435 * point is NULL, then the channel is not allocated.
 436 * @chan: DMA engine handle.
 437 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
 438 * transfer and call client callback.
 439 * @client: Cliented owned descriptor list.
 440 * @pending_queue: Submitted jobs, to be issued by issue_pending()
 441 * @active: Active descriptor.
 442 * @done: Completed jobs
 443 * @queue: Queued jobs.
 444 * @prepare_queue: Prepared jobs.
 445 * @dma_cfg: The client configuration of this dma channel.
 446 * @slave_config: DMA slave configuration.
 447 * @configured: whether the dma_cfg configuration is valid
 448 * @base: Pointer to the device instance struct.
 449 * @src_def_cfg: Default cfg register setting for src.
 450 * @dst_def_cfg: Default cfg register setting for dst.
 451 * @log_def: Default logical channel settings.
 452 * @lcpa: Pointer to dst and src lcpa settings.
 453 * @runtime_addr: runtime configured address.
 454 * @runtime_direction: runtime configured direction.
 455 *
 456 * This struct can either "be" a logical or a physical channel.
 457 */
 458struct d40_chan {
 459	spinlock_t			 lock;
 460	int				 log_num;
 461	int				 pending_tx;
 462	bool				 busy;
 463	struct d40_phy_res		*phy_chan;
 464	struct dma_chan			 chan;
 465	struct tasklet_struct		 tasklet;
 466	struct list_head		 client;
 467	struct list_head		 pending_queue;
 468	struct list_head		 active;
 469	struct list_head		 done;
 470	struct list_head		 queue;
 471	struct list_head		 prepare_queue;
 472	struct stedma40_chan_cfg	 dma_cfg;
 473	struct dma_slave_config		 slave_config;
 474	bool				 configured;
 475	struct d40_base			*base;
 476	/* Default register configurations */
 477	u32				 src_def_cfg;
 478	u32				 dst_def_cfg;
 479	struct d40_def_lcsp		 log_def;
 480	struct d40_log_lli_full		*lcpa;
 481	/* Runtime reconfiguration */
 482	dma_addr_t			runtime_addr;
 483	enum dma_transfer_direction	runtime_direction;
 484};
 485
 486/**
 487 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
 488 * controller
 489 *
 490 * @backup: the pointer to the registers address array for backup
 491 * @backup_size: the size of the registers address array for backup
 492 * @realtime_en: the realtime enable register
 493 * @realtime_clear: the realtime clear register
 494 * @high_prio_en: the high priority enable register
 495 * @high_prio_clear: the high priority clear register
 496 * @interrupt_en: the interrupt enable register
 497 * @interrupt_clear: the interrupt clear register
 498 * @il: the pointer to struct d40_interrupt_lookup
 499 * @il_size: the size of d40_interrupt_lookup array
 500 * @init_reg: the pointer to the struct d40_reg_val
 501 * @init_reg_size: the size of d40_reg_val array
 502 */
 503struct d40_gen_dmac {
 504	u32				*backup;
 505	u32				 backup_size;
 506	u32				 realtime_en;
 507	u32				 realtime_clear;
 508	u32				 high_prio_en;
 509	u32				 high_prio_clear;
 510	u32				 interrupt_en;
 511	u32				 interrupt_clear;
 512	struct d40_interrupt_lookup	*il;
 513	u32				 il_size;
 514	struct d40_reg_val		*init_reg;
 515	u32				 init_reg_size;
 516};
 517
 518/**
 519 * struct d40_base - The big global struct, one for each probe'd instance.
 520 *
 521 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
 522 * @execmd_lock: Lock for execute command usage since several channels share
 523 * the same physical register.
 524 * @dev: The device structure.
 525 * @virtbase: The virtual base address of the DMA's register.
 526 * @rev: silicon revision detected.
 527 * @clk: Pointer to the DMA clock structure.
 528 * @phy_start: Physical memory start of the DMA registers.
 529 * @phy_size: Size of the DMA register map.
 530 * @irq: The IRQ number.
 531 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
 532 * transfers).
 533 * @num_phy_chans: The number of physical channels. Read from HW. This
 534 * is the number of available channels for this driver, not counting "Secure
 535 * mode" allocated physical channels.
 536 * @num_log_chans: The number of logical channels. Calculated from
 537 * num_phy_chans.
 538 * @dma_parms: DMA parameters for the channel
 539 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
 540 * @dma_slave: dma_device channels that can do only do slave transfers.
 541 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
 542 * @phy_chans: Room for all possible physical channels in system.
 543 * @log_chans: Room for all possible logical channels in system.
 544 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
 545 * to log_chans entries.
 546 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
 547 * to phy_chans entries.
 548 * @plat_data: Pointer to provided platform_data which is the driver
 549 * configuration.
 550 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
 551 * @phy_res: Vector containing all physical channels.
 552 * @lcla_pool: lcla pool settings and data.
 553 * @lcpa_base: The virtual mapped address of LCPA.
 554 * @phy_lcpa: The physical address of the LCPA.
 555 * @lcpa_size: The size of the LCPA area.
 556 * @desc_slab: cache for descriptors.
 557 * @reg_val_backup: Here the values of some hardware registers are stored
 558 * before the DMA is powered off. They are restored when the power is back on.
 559 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
 560 * later
 561 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
 562 * @regs_interrupt: Scratch space for registers during interrupt.
 563 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
 564 * @gen_dmac: the struct for generic registers values to represent u8500/8540
 565 * DMA controller
 566 */
 567struct d40_base {
 568	spinlock_t			 interrupt_lock;
 569	spinlock_t			 execmd_lock;
 570	struct device			 *dev;
 571	void __iomem			 *virtbase;
 572	u8				  rev:4;
 573	struct clk			 *clk;
 574	phys_addr_t			  phy_start;
 575	resource_size_t			  phy_size;
 576	int				  irq;
 577	int				  num_memcpy_chans;
 578	int				  num_phy_chans;
 579	int				  num_log_chans;
 580	struct device_dma_parameters	  dma_parms;
 581	struct dma_device		  dma_both;
 582	struct dma_device		  dma_slave;
 583	struct dma_device		  dma_memcpy;
 584	struct d40_chan			 *phy_chans;
 585	struct d40_chan			 *log_chans;
 586	struct d40_chan			**lookup_log_chans;
 587	struct d40_chan			**lookup_phy_chans;
 588	struct stedma40_platform_data	 *plat_data;
 589	struct regulator		 *lcpa_regulator;
 590	/* Physical half channels */
 591	struct d40_phy_res		 *phy_res;
 592	struct d40_lcla_pool		  lcla_pool;
 593	void				 *lcpa_base;
 594	dma_addr_t			  phy_lcpa;
 595	resource_size_t			  lcpa_size;
 596	struct kmem_cache		 *desc_slab;
 597	u32				  reg_val_backup[BACKUP_REGS_SZ];
 598	u32				  reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
 599	u32				 *reg_val_backup_chan;
 600	u32				 *regs_interrupt;
 601	u16				  gcc_pwr_off_mask;
 602	struct d40_gen_dmac		  gen_dmac;
 603};
 604
 605static struct device *chan2dev(struct d40_chan *d40c)
 606{
 607	return &d40c->chan.dev->device;
 608}
 609
 610static bool chan_is_physical(struct d40_chan *chan)
 611{
 612	return chan->log_num == D40_PHY_CHAN;
 613}
 614
 615static bool chan_is_logical(struct d40_chan *chan)
 616{
 617	return !chan_is_physical(chan);
 618}
 619
 620static void __iomem *chan_base(struct d40_chan *chan)
 621{
 622	return chan->base->virtbase + D40_DREG_PCBASE +
 623	       chan->phy_chan->num * D40_DREG_PCDELTA;
 624}
 625
 626#define d40_err(dev, format, arg...)		\
 627	dev_err(dev, "[%s] " format, __func__, ## arg)
 628
 629#define chan_err(d40c, format, arg...)		\
 630	d40_err(chan2dev(d40c), format, ## arg)
 631
 632static int d40_set_runtime_config_write(struct dma_chan *chan,
 633				  struct dma_slave_config *config,
 634				  enum dma_transfer_direction direction);
 635
 636static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
 637			      int lli_len)
 638{
 639	bool is_log = chan_is_logical(d40c);
 640	u32 align;
 641	void *base;
 642
 643	if (is_log)
 644		align = sizeof(struct d40_log_lli);
 645	else
 646		align = sizeof(struct d40_phy_lli);
 647
 648	if (lli_len == 1) {
 649		base = d40d->lli_pool.pre_alloc_lli;
 650		d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
 651		d40d->lli_pool.base = NULL;
 652	} else {
 653		d40d->lli_pool.size = lli_len * 2 * align;
 654
 655		base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
 656		d40d->lli_pool.base = base;
 657
 658		if (d40d->lli_pool.base == NULL)
 659			return -ENOMEM;
 660	}
 661
 662	if (is_log) {
 663		d40d->lli_log.src = PTR_ALIGN(base, align);
 664		d40d->lli_log.dst = d40d->lli_log.src + lli_len;
 665
 666		d40d->lli_pool.dma_addr = 0;
 667	} else {
 668		d40d->lli_phy.src = PTR_ALIGN(base, align);
 669		d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
 670
 671		d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
 672							 d40d->lli_phy.src,
 673							 d40d->lli_pool.size,
 674							 DMA_TO_DEVICE);
 675
 676		if (dma_mapping_error(d40c->base->dev,
 677				      d40d->lli_pool.dma_addr)) {
 678			kfree(d40d->lli_pool.base);
 679			d40d->lli_pool.base = NULL;
 680			d40d->lli_pool.dma_addr = 0;
 681			return -ENOMEM;
 682		}
 683	}
 684
 685	return 0;
 686}
 687
 688static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
 689{
 690	if (d40d->lli_pool.dma_addr)
 691		dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
 692				 d40d->lli_pool.size, DMA_TO_DEVICE);
 693
 694	kfree(d40d->lli_pool.base);
 695	d40d->lli_pool.base = NULL;
 696	d40d->lli_pool.size = 0;
 697	d40d->lli_log.src = NULL;
 698	d40d->lli_log.dst = NULL;
 699	d40d->lli_phy.src = NULL;
 700	d40d->lli_phy.dst = NULL;
 701}
 702
 703static int d40_lcla_alloc_one(struct d40_chan *d40c,
 704			      struct d40_desc *d40d)
 705{
 706	unsigned long flags;
 707	int i;
 708	int ret = -EINVAL;
 709
 710	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
 711
 712	/*
 713	 * Allocate both src and dst at the same time, therefore the half
 714	 * start on 1 since 0 can't be used since zero is used as end marker.
 715	 */
 716	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
 717		int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
 718
 719		if (!d40c->base->lcla_pool.alloc_map[idx]) {
 720			d40c->base->lcla_pool.alloc_map[idx] = d40d;
 721			d40d->lcla_alloc++;
 722			ret = i;
 723			break;
 724		}
 725	}
 726
 727	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
 728
 729	return ret;
 730}
 731
 732static int d40_lcla_free_all(struct d40_chan *d40c,
 733			     struct d40_desc *d40d)
 734{
 735	unsigned long flags;
 736	int i;
 737	int ret = -EINVAL;
 738
 739	if (chan_is_physical(d40c))
 740		return 0;
 741
 742	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
 743
 744	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
 745		int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
 746
 747		if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
 748			d40c->base->lcla_pool.alloc_map[idx] = NULL;
 749			d40d->lcla_alloc--;
 750			if (d40d->lcla_alloc == 0) {
 751				ret = 0;
 752				break;
 753			}
 754		}
 755	}
 756
 757	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
 758
 759	return ret;
 760
 761}
 762
 763static void d40_desc_remove(struct d40_desc *d40d)
 764{
 765	list_del(&d40d->node);
 766}
 767
 768static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
 769{
 770	struct d40_desc *desc = NULL;
 771
 772	if (!list_empty(&d40c->client)) {
 773		struct d40_desc *d;
 774		struct d40_desc *_d;
 775
 776		list_for_each_entry_safe(d, _d, &d40c->client, node) {
 777			if (async_tx_test_ack(&d->txd)) {
 778				d40_desc_remove(d);
 779				desc = d;
 780				memset(desc, 0, sizeof(*desc));
 781				break;
 782			}
 783		}
 784	}
 785
 786	if (!desc)
 787		desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
 788
 789	if (desc)
 790		INIT_LIST_HEAD(&desc->node);
 791
 792	return desc;
 793}
 794
 795static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
 796{
 797
 798	d40_pool_lli_free(d40c, d40d);
 799	d40_lcla_free_all(d40c, d40d);
 800	kmem_cache_free(d40c->base->desc_slab, d40d);
 801}
 802
 803static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
 804{
 805	list_add_tail(&desc->node, &d40c->active);
 806}
 807
 808static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
 809{
 810	struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
 811	struct d40_phy_lli *lli_src = desc->lli_phy.src;
 812	void __iomem *base = chan_base(chan);
 813
 814	writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
 815	writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
 816	writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
 817	writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
 818
 819	writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
 820	writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
 821	writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
 822	writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
 823}
 824
 825static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
 826{
 827	list_add_tail(&desc->node, &d40c->done);
 828}
 829
 830static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
 831{
 832	struct d40_lcla_pool *pool = &chan->base->lcla_pool;
 833	struct d40_log_lli_bidir *lli = &desc->lli_log;
 834	int lli_current = desc->lli_current;
 835	int lli_len = desc->lli_len;
 836	bool cyclic = desc->cyclic;
 837	int curr_lcla = -EINVAL;
 838	int first_lcla = 0;
 839	bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
 840	bool linkback;
 841
 842	/*
 843	 * We may have partially running cyclic transfers, in case we did't get
 844	 * enough LCLA entries.
 845	 */
 846	linkback = cyclic && lli_current == 0;
 847
 848	/*
 849	 * For linkback, we need one LCLA even with only one link, because we
 850	 * can't link back to the one in LCPA space
 851	 */
 852	if (linkback || (lli_len - lli_current > 1)) {
 853		/*
 854		 * If the channel is expected to use only soft_lli don't
 855		 * allocate a lcla. This is to avoid a HW issue that exists
 856		 * in some controller during a peripheral to memory transfer
 857		 * that uses linked lists.
 858		 */
 859		if (!(chan->phy_chan->use_soft_lli &&
 860			chan->dma_cfg.dir == DMA_DEV_TO_MEM))
 861			curr_lcla = d40_lcla_alloc_one(chan, desc);
 862
 863		first_lcla = curr_lcla;
 864	}
 865
 866	/*
 867	 * For linkback, we normally load the LCPA in the loop since we need to
 868	 * link it to the second LCLA and not the first.  However, if we
 869	 * couldn't even get a first LCLA, then we have to run in LCPA and
 870	 * reload manually.
 871	 */
 872	if (!linkback || curr_lcla == -EINVAL) {
 873		unsigned int flags = 0;
 874
 875		if (curr_lcla == -EINVAL)
 876			flags |= LLI_TERM_INT;
 877
 878		d40_log_lli_lcpa_write(chan->lcpa,
 879				       &lli->dst[lli_current],
 880				       &lli->src[lli_current],
 881				       curr_lcla,
 882				       flags);
 883		lli_current++;
 884	}
 885
 886	if (curr_lcla < 0)
 887		goto set_current;
 888
 889	for (; lli_current < lli_len; lli_current++) {
 890		unsigned int lcla_offset = chan->phy_chan->num * 1024 +
 891					   8 * curr_lcla * 2;
 892		struct d40_log_lli *lcla = pool->base + lcla_offset;
 893		unsigned int flags = 0;
 894		int next_lcla;
 895
 896		if (lli_current + 1 < lli_len)
 897			next_lcla = d40_lcla_alloc_one(chan, desc);
 898		else
 899			next_lcla = linkback ? first_lcla : -EINVAL;
 900
 901		if (cyclic || next_lcla == -EINVAL)
 902			flags |= LLI_TERM_INT;
 903
 904		if (linkback && curr_lcla == first_lcla) {
 905			/* First link goes in both LCPA and LCLA */
 906			d40_log_lli_lcpa_write(chan->lcpa,
 907					       &lli->dst[lli_current],
 908					       &lli->src[lli_current],
 909					       next_lcla, flags);
 910		}
 911
 912		/*
 913		 * One unused LCLA in the cyclic case if the very first
 914		 * next_lcla fails...
 915		 */
 916		d40_log_lli_lcla_write(lcla,
 917				       &lli->dst[lli_current],
 918				       &lli->src[lli_current],
 919				       next_lcla, flags);
 920
 921		/*
 922		 * Cache maintenance is not needed if lcla is
 923		 * mapped in esram
 924		 */
 925		if (!use_esram_lcla) {
 926			dma_sync_single_range_for_device(chan->base->dev,
 927						pool->dma_addr, lcla_offset,
 928						2 * sizeof(struct d40_log_lli),
 929						DMA_TO_DEVICE);
 930		}
 931		curr_lcla = next_lcla;
 932
 933		if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
 934			lli_current++;
 935			break;
 936		}
 937	}
 938 set_current:
 939	desc->lli_current = lli_current;
 940}
 941
 942static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
 943{
 944	if (chan_is_physical(d40c)) {
 945		d40_phy_lli_load(d40c, d40d);
 946		d40d->lli_current = d40d->lli_len;
 947	} else
 948		d40_log_lli_to_lcxa(d40c, d40d);
 949}
 950
 951static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
 952{
 953	return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
 954}
 955
 956/* remove desc from current queue and add it to the pending_queue */
 957static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
 958{
 959	d40_desc_remove(desc);
 960	desc->is_in_client_list = false;
 961	list_add_tail(&desc->node, &d40c->pending_queue);
 962}
 963
 964static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
 965{
 966	return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
 967					node);
 968}
 969
 970static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
 971{
 972	return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
 973}
 974
 975static struct d40_desc *d40_first_done(struct d40_chan *d40c)
 976{
 977	return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
 978}
 979
 980static int d40_psize_2_burst_size(bool is_log, int psize)
 981{
 982	if (is_log) {
 983		if (psize == STEDMA40_PSIZE_LOG_1)
 984			return 1;
 985	} else {
 986		if (psize == STEDMA40_PSIZE_PHY_1)
 987			return 1;
 988	}
 989
 990	return 2 << psize;
 991}
 992
 993/*
 994 * The dma only supports transmitting packages up to
 995 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
 996 *
 997 * Calculate the total number of dma elements required to send the entire sg list.
 998 */
 999static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
1000{
1001	int dmalen;
1002	u32 max_w = max(data_width1, data_width2);
1003	u32 min_w = min(data_width1, data_width2);
1004	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1005
1006	if (seg_max > STEDMA40_MAX_SEG_SIZE)
1007		seg_max -= max_w;
1008
1009	if (!IS_ALIGNED(size, max_w))
1010		return -EINVAL;
1011
1012	if (size <= seg_max)
1013		dmalen = 1;
1014	else {
1015		dmalen = size / seg_max;
1016		if (dmalen * seg_max < size)
1017			dmalen++;
1018	}
1019	return dmalen;
1020}
1021
1022static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1023			   u32 data_width1, u32 data_width2)
1024{
1025	struct scatterlist *sg;
1026	int i;
1027	int len = 0;
1028	int ret;
1029
1030	for_each_sg(sgl, sg, sg_len, i) {
1031		ret = d40_size_2_dmalen(sg_dma_len(sg),
1032					data_width1, data_width2);
1033		if (ret < 0)
1034			return ret;
1035		len += ret;
1036	}
1037	return len;
1038}
1039
1040static int __d40_execute_command_phy(struct d40_chan *d40c,
1041				     enum d40_command command)
1042{
1043	u32 status;
1044	int i;
1045	void __iomem *active_reg;
1046	int ret = 0;
1047	unsigned long flags;
1048	u32 wmask;
1049
1050	if (command == D40_DMA_STOP) {
1051		ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1052		if (ret)
1053			return ret;
1054	}
1055
1056	spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1057
1058	if (d40c->phy_chan->num % 2 == 0)
1059		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1060	else
1061		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1062
1063	if (command == D40_DMA_SUSPEND_REQ) {
1064		status = (readl(active_reg) &
1065			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1066			D40_CHAN_POS(d40c->phy_chan->num);
1067
1068		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1069			goto unlock;
1070	}
1071
1072	wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1073	writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1074	       active_reg);
1075
1076	if (command == D40_DMA_SUSPEND_REQ) {
1077
1078		for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1079			status = (readl(active_reg) &
1080				  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1081				D40_CHAN_POS(d40c->phy_chan->num);
1082
1083			cpu_relax();
1084			/*
1085			 * Reduce the number of bus accesses while
1086			 * waiting for the DMA to suspend.
1087			 */
1088			udelay(3);
1089
1090			if (status == D40_DMA_STOP ||
1091			    status == D40_DMA_SUSPENDED)
1092				break;
1093		}
1094
1095		if (i == D40_SUSPEND_MAX_IT) {
1096			chan_err(d40c,
1097				"unable to suspend the chl %d (log: %d) status %x\n",
1098				d40c->phy_chan->num, d40c->log_num,
1099				status);
1100			dump_stack();
1101			ret = -EBUSY;
1102		}
1103
1104	}
1105 unlock:
1106	spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1107	return ret;
1108}
1109
1110static void d40_term_all(struct d40_chan *d40c)
1111{
1112	struct d40_desc *d40d;
1113	struct d40_desc *_d;
1114
1115	/* Release completed descriptors */
1116	while ((d40d = d40_first_done(d40c))) {
1117		d40_desc_remove(d40d);
1118		d40_desc_free(d40c, d40d);
1119	}
1120
1121	/* Release active descriptors */
1122	while ((d40d = d40_first_active_get(d40c))) {
1123		d40_desc_remove(d40d);
1124		d40_desc_free(d40c, d40d);
1125	}
1126
1127	/* Release queued descriptors waiting for transfer */
1128	while ((d40d = d40_first_queued(d40c))) {
1129		d40_desc_remove(d40d);
1130		d40_desc_free(d40c, d40d);
1131	}
1132
1133	/* Release pending descriptors */
1134	while ((d40d = d40_first_pending(d40c))) {
1135		d40_desc_remove(d40d);
1136		d40_desc_free(d40c, d40d);
1137	}
1138
1139	/* Release client owned descriptors */
1140	if (!list_empty(&d40c->client))
1141		list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1142			d40_desc_remove(d40d);
1143			d40_desc_free(d40c, d40d);
1144		}
1145
1146	/* Release descriptors in prepare queue */
1147	if (!list_empty(&d40c->prepare_queue))
1148		list_for_each_entry_safe(d40d, _d,
1149					 &d40c->prepare_queue, node) {
1150			d40_desc_remove(d40d);
1151			d40_desc_free(d40c, d40d);
1152		}
1153
1154	d40c->pending_tx = 0;
1155}
1156
1157static void __d40_config_set_event(struct d40_chan *d40c,
1158				   enum d40_events event_type, u32 event,
1159				   int reg)
1160{
1161	void __iomem *addr = chan_base(d40c) + reg;
1162	int tries;
1163	u32 status;
1164
1165	switch (event_type) {
1166
1167	case D40_DEACTIVATE_EVENTLINE:
1168
1169		writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1170		       | ~D40_EVENTLINE_MASK(event), addr);
1171		break;
1172
1173	case D40_SUSPEND_REQ_EVENTLINE:
1174		status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1175			  D40_EVENTLINE_POS(event);
1176
1177		if (status == D40_DEACTIVATE_EVENTLINE ||
1178		    status == D40_SUSPEND_REQ_EVENTLINE)
1179			break;
1180
1181		writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1182		       | ~D40_EVENTLINE_MASK(event), addr);
1183
1184		for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1185
1186			status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1187				  D40_EVENTLINE_POS(event);
1188
1189			cpu_relax();
1190			/*
1191			 * Reduce the number of bus accesses while
1192			 * waiting for the DMA to suspend.
1193			 */
1194			udelay(3);
1195
1196			if (status == D40_DEACTIVATE_EVENTLINE)
1197				break;
1198		}
1199
1200		if (tries == D40_SUSPEND_MAX_IT) {
1201			chan_err(d40c,
1202				"unable to stop the event_line chl %d (log: %d)"
1203				"status %x\n", d40c->phy_chan->num,
1204				 d40c->log_num, status);
1205		}
1206		break;
1207
1208	case D40_ACTIVATE_EVENTLINE:
1209	/*
1210	 * The hardware sometimes doesn't register the enable when src and dst
1211	 * event lines are active on the same logical channel.  Retry to ensure
1212	 * it does.  Usually only one retry is sufficient.
1213	 */
1214		tries = 100;
1215		while (--tries) {
1216			writel((D40_ACTIVATE_EVENTLINE <<
1217				D40_EVENTLINE_POS(event)) |
1218				~D40_EVENTLINE_MASK(event), addr);
1219
1220			if (readl(addr) & D40_EVENTLINE_MASK(event))
1221				break;
1222		}
1223
1224		if (tries != 99)
1225			dev_dbg(chan2dev(d40c),
1226				"[%s] workaround enable S%cLNK (%d tries)\n",
1227				__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1228				100 - tries);
1229
1230		WARN_ON(!tries);
1231		break;
1232
1233	case D40_ROUND_EVENTLINE:
1234		BUG();
1235		break;
1236
1237	}
1238}
1239
1240static void d40_config_set_event(struct d40_chan *d40c,
1241				 enum d40_events event_type)
1242{
1243	u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1244
1245	/* Enable event line connected to device (or memcpy) */
1246	if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1247	    (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1248		__d40_config_set_event(d40c, event_type, event,
1249				       D40_CHAN_REG_SSLNK);
1250
1251	if (d40c->dma_cfg.dir !=  DMA_DEV_TO_MEM)
1252		__d40_config_set_event(d40c, event_type, event,
1253				       D40_CHAN_REG_SDLNK);
1254}
1255
1256static u32 d40_chan_has_events(struct d40_chan *d40c)
1257{
1258	void __iomem *chanbase = chan_base(d40c);
1259	u32 val;
1260
1261	val = readl(chanbase + D40_CHAN_REG_SSLNK);
1262	val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1263
1264	return val;
1265}
1266
1267static int
1268__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1269{
1270	unsigned long flags;
1271	int ret = 0;
1272	u32 active_status;
1273	void __iomem *active_reg;
1274
1275	if (d40c->phy_chan->num % 2 == 0)
1276		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1277	else
1278		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1279
1280
1281	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1282
1283	switch (command) {
1284	case D40_DMA_STOP:
1285	case D40_DMA_SUSPEND_REQ:
1286
1287		active_status = (readl(active_reg) &
1288				 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1289				 D40_CHAN_POS(d40c->phy_chan->num);
1290
1291		if (active_status == D40_DMA_RUN)
1292			d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1293		else
1294			d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1295
1296		if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1297			ret = __d40_execute_command_phy(d40c, command);
1298
1299		break;
1300
1301	case D40_DMA_RUN:
1302
1303		d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1304		ret = __d40_execute_command_phy(d40c, command);
1305		break;
1306
1307	case D40_DMA_SUSPENDED:
1308		BUG();
1309		break;
1310	}
1311
1312	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1313	return ret;
1314}
1315
1316static int d40_channel_execute_command(struct d40_chan *d40c,
1317				       enum d40_command command)
1318{
1319	if (chan_is_logical(d40c))
1320		return __d40_execute_command_log(d40c, command);
1321	else
1322		return __d40_execute_command_phy(d40c, command);
1323}
1324
1325static u32 d40_get_prmo(struct d40_chan *d40c)
1326{
1327	static const unsigned int phy_map[] = {
1328		[STEDMA40_PCHAN_BASIC_MODE]
1329			= D40_DREG_PRMO_PCHAN_BASIC,
1330		[STEDMA40_PCHAN_MODULO_MODE]
1331			= D40_DREG_PRMO_PCHAN_MODULO,
1332		[STEDMA40_PCHAN_DOUBLE_DST_MODE]
1333			= D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1334	};
1335	static const unsigned int log_map[] = {
1336		[STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1337			= D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1338		[STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1339			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1340		[STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1341			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1342	};
1343
1344	if (chan_is_physical(d40c))
1345		return phy_map[d40c->dma_cfg.mode_opt];
1346	else
1347		return log_map[d40c->dma_cfg.mode_opt];
1348}
1349
1350static void d40_config_write(struct d40_chan *d40c)
1351{
1352	u32 addr_base;
1353	u32 var;
1354
1355	/* Odd addresses are even addresses + 4 */
1356	addr_base = (d40c->phy_chan->num % 2) * 4;
1357	/* Setup channel mode to logical or physical */
1358	var = ((u32)(chan_is_logical(d40c)) + 1) <<
1359		D40_CHAN_POS(d40c->phy_chan->num);
1360	writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1361
1362	/* Setup operational mode option register */
1363	var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1364
1365	writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1366
1367	if (chan_is_logical(d40c)) {
1368		int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1369			   & D40_SREG_ELEM_LOG_LIDX_MASK;
1370		void __iomem *chanbase = chan_base(d40c);
1371
1372		/* Set default config for CFG reg */
1373		writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1374		writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1375
1376		/* Set LIDX for lcla */
1377		writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1378		writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1379
1380		/* Clear LNK which will be used by d40_chan_has_events() */
1381		writel(0, chanbase + D40_CHAN_REG_SSLNK);
1382		writel(0, chanbase + D40_CHAN_REG_SDLNK);
1383	}
1384}
1385
1386static u32 d40_residue(struct d40_chan *d40c)
1387{
1388	u32 num_elt;
1389
1390	if (chan_is_logical(d40c))
1391		num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1392			>> D40_MEM_LCSP2_ECNT_POS;
1393	else {
1394		u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1395		num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1396			  >> D40_SREG_ELEM_PHY_ECNT_POS;
1397	}
1398
1399	return num_elt * d40c->dma_cfg.dst_info.data_width;
1400}
1401
1402static bool d40_tx_is_linked(struct d40_chan *d40c)
1403{
1404	bool is_link;
1405
1406	if (chan_is_logical(d40c))
1407		is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
1408	else
1409		is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1410			  & D40_SREG_LNK_PHYS_LNK_MASK;
1411
1412	return is_link;
1413}
1414
1415static int d40_pause(struct dma_chan *chan)
1416{
1417	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1418	int res = 0;
1419	unsigned long flags;
1420
1421	if (d40c->phy_chan == NULL) {
1422		chan_err(d40c, "Channel is not allocated!\n");
1423		return -EINVAL;
1424	}
1425
1426	if (!d40c->busy)
1427		return 0;
1428
1429	spin_lock_irqsave(&d40c->lock, flags);
1430	pm_runtime_get_sync(d40c->base->dev);
1431
1432	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1433
1434	pm_runtime_mark_last_busy(d40c->base->dev);
1435	pm_runtime_put_autosuspend(d40c->base->dev);
1436	spin_unlock_irqrestore(&d40c->lock, flags);
1437	return res;
1438}
1439
1440static int d40_resume(struct dma_chan *chan)
1441{
1442	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1443	int res = 0;
1444	unsigned long flags;
1445
1446	if (d40c->phy_chan == NULL) {
1447		chan_err(d40c, "Channel is not allocated!\n");
1448		return -EINVAL;
1449	}
1450
1451	if (!d40c->busy)
1452		return 0;
1453
1454	spin_lock_irqsave(&d40c->lock, flags);
1455	pm_runtime_get_sync(d40c->base->dev);
1456
1457	/* If bytes left to transfer or linked tx resume job */
1458	if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1459		res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1460
1461	pm_runtime_mark_last_busy(d40c->base->dev);
1462	pm_runtime_put_autosuspend(d40c->base->dev);
1463	spin_unlock_irqrestore(&d40c->lock, flags);
1464	return res;
1465}
1466
1467static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1468{
1469	struct d40_chan *d40c = container_of(tx->chan,
1470					     struct d40_chan,
1471					     chan);
1472	struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1473	unsigned long flags;
1474	dma_cookie_t cookie;
1475
1476	spin_lock_irqsave(&d40c->lock, flags);
1477	cookie = dma_cookie_assign(tx);
1478	d40_desc_queue(d40c, d40d);
1479	spin_unlock_irqrestore(&d40c->lock, flags);
1480
1481	return cookie;
1482}
1483
1484static int d40_start(struct d40_chan *d40c)
1485{
1486	return d40_channel_execute_command(d40c, D40_DMA_RUN);
1487}
1488
1489static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1490{
1491	struct d40_desc *d40d;
1492	int err;
1493
1494	/* Start queued jobs, if any */
1495	d40d = d40_first_queued(d40c);
1496
1497	if (d40d != NULL) {
1498		if (!d40c->busy) {
1499			d40c->busy = true;
1500			pm_runtime_get_sync(d40c->base->dev);
1501		}
1502
1503		/* Remove from queue */
1504		d40_desc_remove(d40d);
1505
1506		/* Add to active queue */
1507		d40_desc_submit(d40c, d40d);
1508
1509		/* Initiate DMA job */
1510		d40_desc_load(d40c, d40d);
1511
1512		/* Start dma job */
1513		err = d40_start(d40c);
1514
1515		if (err)
1516			return NULL;
1517	}
1518
1519	return d40d;
1520}
1521
1522/* called from interrupt context */
1523static void dma_tc_handle(struct d40_chan *d40c)
1524{
1525	struct d40_desc *d40d;
1526
1527	/* Get first active entry from list */
1528	d40d = d40_first_active_get(d40c);
1529
1530	if (d40d == NULL)
1531		return;
1532
1533	if (d40d->cyclic) {
1534		/*
1535		 * If this was a paritially loaded list, we need to reloaded
1536		 * it, and only when the list is completed.  We need to check
1537		 * for done because the interrupt will hit for every link, and
1538		 * not just the last one.
1539		 */
1540		if (d40d->lli_current < d40d->lli_len
1541		    && !d40_tx_is_linked(d40c)
1542		    && !d40_residue(d40c)) {
1543			d40_lcla_free_all(d40c, d40d);
1544			d40_desc_load(d40c, d40d);
1545			(void) d40_start(d40c);
1546
1547			if (d40d->lli_current == d40d->lli_len)
1548				d40d->lli_current = 0;
1549		}
1550	} else {
1551		d40_lcla_free_all(d40c, d40d);
1552
1553		if (d40d->lli_current < d40d->lli_len) {
1554			d40_desc_load(d40c, d40d);
1555			/* Start dma job */
1556			(void) d40_start(d40c);
1557			return;
1558		}
1559
1560		if (d40_queue_start(d40c) == NULL) {
1561			d40c->busy = false;
1562
1563			pm_runtime_mark_last_busy(d40c->base->dev);
1564			pm_runtime_put_autosuspend(d40c->base->dev);
1565		}
1566
1567		d40_desc_remove(d40d);
1568		d40_desc_done(d40c, d40d);
1569	}
1570
1571	d40c->pending_tx++;
1572	tasklet_schedule(&d40c->tasklet);
1573
1574}
1575
1576static void dma_tasklet(unsigned long data)
1577{
1578	struct d40_chan *d40c = (struct d40_chan *) data;
1579	struct d40_desc *d40d;
1580	unsigned long flags;
1581	bool callback_active;
1582	struct dmaengine_desc_callback cb;
1583
1584	spin_lock_irqsave(&d40c->lock, flags);
1585
1586	/* Get first entry from the done list */
1587	d40d = d40_first_done(d40c);
1588	if (d40d == NULL) {
1589		/* Check if we have reached here for cyclic job */
1590		d40d = d40_first_active_get(d40c);
1591		if (d40d == NULL || !d40d->cyclic)
1592			goto check_pending_tx;
1593	}
1594
1595	if (!d40d->cyclic)
1596		dma_cookie_complete(&d40d->txd);
1597
1598	/*
1599	 * If terminating a channel pending_tx is set to zero.
1600	 * This prevents any finished active jobs to return to the client.
1601	 */
1602	if (d40c->pending_tx == 0) {
1603		spin_unlock_irqrestore(&d40c->lock, flags);
1604		return;
1605	}
1606
1607	/* Callback to client */
1608	callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1609	dmaengine_desc_get_callback(&d40d->txd, &cb);
1610
1611	if (!d40d->cyclic) {
1612		if (async_tx_test_ack(&d40d->txd)) {
1613			d40_desc_remove(d40d);
1614			d40_desc_free(d40c, d40d);
1615		} else if (!d40d->is_in_client_list) {
1616			d40_desc_remove(d40d);
1617			d40_lcla_free_all(d40c, d40d);
1618			list_add_tail(&d40d->node, &d40c->client);
1619			d40d->is_in_client_list = true;
1620		}
1621	}
1622
1623	d40c->pending_tx--;
1624
1625	if (d40c->pending_tx)
1626		tasklet_schedule(&d40c->tasklet);
1627
1628	spin_unlock_irqrestore(&d40c->lock, flags);
1629
1630	if (callback_active)
1631		dmaengine_desc_callback_invoke(&cb, NULL);
1632
1633	return;
1634 check_pending_tx:
1635	/* Rescue manouver if receiving double interrupts */
1636	if (d40c->pending_tx > 0)
1637		d40c->pending_tx--;
1638	spin_unlock_irqrestore(&d40c->lock, flags);
1639}
1640
1641static irqreturn_t d40_handle_interrupt(int irq, void *data)
1642{
1643	int i;
1644	u32 idx;
1645	u32 row;
1646	long chan = -1;
1647	struct d40_chan *d40c;
1648	unsigned long flags;
1649	struct d40_base *base = data;
1650	u32 *regs = base->regs_interrupt;
1651	struct d40_interrupt_lookup *il = base->gen_dmac.il;
1652	u32 il_size = base->gen_dmac.il_size;
1653
1654	spin_lock_irqsave(&base->interrupt_lock, flags);
1655
1656	/* Read interrupt status of both logical and physical channels */
1657	for (i = 0; i < il_size; i++)
1658		regs[i] = readl(base->virtbase + il[i].src);
1659
1660	for (;;) {
1661
1662		chan = find_next_bit((unsigned long *)regs,
1663				     BITS_PER_LONG * il_size, chan + 1);
1664
1665		/* No more set bits found? */
1666		if (chan == BITS_PER_LONG * il_size)
1667			break;
1668
1669		row = chan / BITS_PER_LONG;
1670		idx = chan & (BITS_PER_LONG - 1);
1671
1672		if (il[row].offset == D40_PHY_CHAN)
1673			d40c = base->lookup_phy_chans[idx];
1674		else
1675			d40c = base->lookup_log_chans[il[row].offset + idx];
1676
1677		if (!d40c) {
1678			/*
1679			 * No error because this can happen if something else
1680			 * in the system is using the channel.
1681			 */
1682			continue;
1683		}
1684
1685		/* ACK interrupt */
1686		writel(BIT(idx), base->virtbase + il[row].clr);
1687
1688		spin_lock(&d40c->lock);
1689
1690		if (!il[row].is_error)
1691			dma_tc_handle(d40c);
1692		else
1693			d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1694				chan, il[row].offset, idx);
1695
1696		spin_unlock(&d40c->lock);
1697	}
1698
1699	spin_unlock_irqrestore(&base->interrupt_lock, flags);
1700
1701	return IRQ_HANDLED;
1702}
1703
1704static int d40_validate_conf(struct d40_chan *d40c,
1705			     struct stedma40_chan_cfg *conf)
1706{
1707	int res = 0;
1708	bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1709
1710	if (!conf->dir) {
1711		chan_err(d40c, "Invalid direction.\n");
1712		res = -EINVAL;
1713	}
1714
1715	if ((is_log && conf->dev_type > d40c->base->num_log_chans)  ||
1716	    (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1717	    (conf->dev_type < 0)) {
1718		chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1719		res = -EINVAL;
1720	}
1721
1722	if (conf->dir == DMA_DEV_TO_DEV) {
1723		/*
1724		 * DMAC HW supports it. Will be added to this driver,
1725		 * in case any dma client requires it.
1726		 */
1727		chan_err(d40c, "periph to periph not supported\n");
1728		res = -EINVAL;
1729	}
1730
1731	if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1732	    conf->src_info.data_width !=
1733	    d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1734	    conf->dst_info.data_width) {
1735		/*
1736		 * The DMAC hardware only supports
1737		 * src (burst x width) == dst (burst x width)
1738		 */
1739
1740		chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1741		res = -EINVAL;
1742	}
1743
1744	return res;
1745}
1746
1747static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1748			       bool is_src, int log_event_line, bool is_log,
1749			       bool *first_user)
1750{
1751	unsigned long flags;
1752	spin_lock_irqsave(&phy->lock, flags);
1753
1754	*first_user = ((phy->allocated_src | phy->allocated_dst)
1755			== D40_ALLOC_FREE);
1756
1757	if (!is_log) {
1758		/* Physical interrupts are masked per physical full channel */
1759		if (phy->allocated_src == D40_ALLOC_FREE &&
1760		    phy->allocated_dst == D40_ALLOC_FREE) {
1761			phy->allocated_dst = D40_ALLOC_PHY;
1762			phy->allocated_src = D40_ALLOC_PHY;
1763			goto found_unlock;
1764		} else
1765			goto not_found_unlock;
1766	}
1767
1768	/* Logical channel */
1769	if (is_src) {
1770		if (phy->allocated_src == D40_ALLOC_PHY)
1771			goto not_found_unlock;
1772
1773		if (phy->allocated_src == D40_ALLOC_FREE)
1774			phy->allocated_src = D40_ALLOC_LOG_FREE;
1775
1776		if (!(phy->allocated_src & BIT(log_event_line))) {
1777			phy->allocated_src |= BIT(log_event_line);
1778			goto found_unlock;
1779		} else
1780			goto not_found_unlock;
1781	} else {
1782		if (phy->allocated_dst == D40_ALLOC_PHY)
1783			goto not_found_unlock;
1784
1785		if (phy->allocated_dst == D40_ALLOC_FREE)
1786			phy->allocated_dst = D40_ALLOC_LOG_FREE;
1787
1788		if (!(phy->allocated_dst & BIT(log_event_line))) {
1789			phy->allocated_dst |= BIT(log_event_line);
1790			goto found_unlock;
1791		}
1792	}
1793 not_found_unlock:
1794	spin_unlock_irqrestore(&phy->lock, flags);
1795	return false;
1796 found_unlock:
1797	spin_unlock_irqrestore(&phy->lock, flags);
1798	return true;
1799}
1800
1801static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1802			       int log_event_line)
1803{
1804	unsigned long flags;
1805	bool is_free = false;
1806
1807	spin_lock_irqsave(&phy->lock, flags);
1808	if (!log_event_line) {
1809		phy->allocated_dst = D40_ALLOC_FREE;
1810		phy->allocated_src = D40_ALLOC_FREE;
1811		is_free = true;
1812		goto unlock;
1813	}
1814
1815	/* Logical channel */
1816	if (is_src) {
1817		phy->allocated_src &= ~BIT(log_event_line);
1818		if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1819			phy->allocated_src = D40_ALLOC_FREE;
1820	} else {
1821		phy->allocated_dst &= ~BIT(log_event_line);
1822		if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1823			phy->allocated_dst = D40_ALLOC_FREE;
1824	}
1825
1826	is_free = ((phy->allocated_src | phy->allocated_dst) ==
1827		   D40_ALLOC_FREE);
1828 unlock:
1829	spin_unlock_irqrestore(&phy->lock, flags);
1830
1831	return is_free;
1832}
1833
1834static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1835{
1836	int dev_type = d40c->dma_cfg.dev_type;
1837	int event_group;
1838	int event_line;
1839	struct d40_phy_res *phys;
1840	int i;
1841	int j;
1842	int log_num;
1843	int num_phy_chans;
1844	bool is_src;
1845	bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1846
1847	phys = d40c->base->phy_res;
1848	num_phy_chans = d40c->base->num_phy_chans;
1849
1850	if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1851		log_num = 2 * dev_type;
1852		is_src = true;
1853	} else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1854		   d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1855		/* dst event lines are used for logical memcpy */
1856		log_num = 2 * dev_type + 1;
1857		is_src = false;
1858	} else
1859		return -EINVAL;
1860
1861	event_group = D40_TYPE_TO_GROUP(dev_type);
1862	event_line = D40_TYPE_TO_EVENT(dev_type);
1863
1864	if (!is_log) {
1865		if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1866			/* Find physical half channel */
1867			if (d40c->dma_cfg.use_fixed_channel) {
1868				i = d40c->dma_cfg.phy_channel;
1869				if (d40_alloc_mask_set(&phys[i], is_src,
1870						       0, is_log,
1871						       first_phy_user))
1872					goto found_phy;
1873			} else {
1874				for (i = 0; i < num_phy_chans; i++) {
1875					if (d40_alloc_mask_set(&phys[i], is_src,
1876						       0, is_log,
1877						       first_phy_user))
1878						goto found_phy;
1879				}
1880			}
1881		} else
1882			for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1883				int phy_num = j  + event_group * 2;
1884				for (i = phy_num; i < phy_num + 2; i++) {
1885					if (d40_alloc_mask_set(&phys[i],
1886							       is_src,
1887							       0,
1888							       is_log,
1889							       first_phy_user))
1890						goto found_phy;
1891				}
1892			}
1893		return -EINVAL;
1894found_phy:
1895		d40c->phy_chan = &phys[i];
1896		d40c->log_num = D40_PHY_CHAN;
1897		goto out;
1898	}
1899	if (dev_type == -1)
1900		return -EINVAL;
1901
1902	/* Find logical channel */
1903	for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1904		int phy_num = j + event_group * 2;
1905
1906		if (d40c->dma_cfg.use_fixed_channel) {
1907			i = d40c->dma_cfg.phy_channel;
1908
1909			if ((i != phy_num) && (i != phy_num + 1)) {
1910				dev_err(chan2dev(d40c),
1911					"invalid fixed phy channel %d\n", i);
1912				return -EINVAL;
1913			}
1914
1915			if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1916					       is_log, first_phy_user))
1917				goto found_log;
1918
1919			dev_err(chan2dev(d40c),
1920				"could not allocate fixed phy channel %d\n", i);
1921			return -EINVAL;
1922		}
1923
1924		/*
1925		 * Spread logical channels across all available physical rather
1926		 * than pack every logical channel at the first available phy
1927		 * channels.
1928		 */
1929		if (is_src) {
1930			for (i = phy_num; i < phy_num + 2; i++) {
1931				if (d40_alloc_mask_set(&phys[i], is_src,
1932						       event_line, is_log,
1933						       first_phy_user))
1934					goto found_log;
1935			}
1936		} else {
1937			for (i = phy_num + 1; i >= phy_num; i--) {
1938				if (d40_alloc_mask_set(&phys[i], is_src,
1939						       event_line, is_log,
1940						       first_phy_user))
1941					goto found_log;
1942			}
1943		}
1944	}
1945	return -EINVAL;
1946
1947found_log:
1948	d40c->phy_chan = &phys[i];
1949	d40c->log_num = log_num;
1950out:
1951
1952	if (is_log)
1953		d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1954	else
1955		d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1956
1957	return 0;
1958
1959}
1960
1961static int d40_config_memcpy(struct d40_chan *d40c)
1962{
1963	dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1964
1965	if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1966		d40c->dma_cfg = dma40_memcpy_conf_log;
1967		d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1968
1969		d40_log_cfg(&d40c->dma_cfg,
1970			    &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1971
1972	} else if (dma_has_cap(DMA_MEMCPY, cap) &&
1973		   dma_has_cap(DMA_SLAVE, cap)) {
1974		d40c->dma_cfg = dma40_memcpy_conf_phy;
1975
1976		/* Generate interrrupt at end of transfer or relink. */
1977		d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1978
1979		/* Generate interrupt on error. */
1980		d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1981		d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1982
1983	} else {
1984		chan_err(d40c, "No memcpy\n");
1985		return -EINVAL;
1986	}
1987
1988	return 0;
1989}
1990
1991static int d40_free_dma(struct d40_chan *d40c)
1992{
1993
1994	int res = 0;
1995	u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1996	struct d40_phy_res *phy = d40c->phy_chan;
1997	bool is_src;
1998
1999	/* Terminate all queued and active transfers */
2000	d40_term_all(d40c);
2001
2002	if (phy == NULL) {
2003		chan_err(d40c, "phy == null\n");
2004		return -EINVAL;
2005	}
2006
2007	if (phy->allocated_src == D40_ALLOC_FREE &&
2008	    phy->allocated_dst == D40_ALLOC_FREE) {
2009		chan_err(d40c, "channel already free\n");
2010		return -EINVAL;
2011	}
2012
2013	if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2014	    d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2015		is_src = false;
2016	else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2017		is_src = true;
2018	else {
2019		chan_err(d40c, "Unknown direction\n");
2020		return -EINVAL;
2021	}
2022
2023	pm_runtime_get_sync(d40c->base->dev);
2024	res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2025	if (res) {
2026		chan_err(d40c, "stop failed\n");
2027		goto mark_last_busy;
2028	}
2029
2030	d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2031
2032	if (chan_is_logical(d40c))
2033		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2034	else
2035		d40c->base->lookup_phy_chans[phy->num] = NULL;
2036
2037	if (d40c->busy) {
2038		pm_runtime_mark_last_busy(d40c->base->dev);
2039		pm_runtime_put_autosuspend(d40c->base->dev);
2040	}
2041
2042	d40c->busy = false;
2043	d40c->phy_chan = NULL;
2044	d40c->configured = false;
2045 mark_last_busy:
2046	pm_runtime_mark_last_busy(d40c->base->dev);
2047	pm_runtime_put_autosuspend(d40c->base->dev);
2048	return res;
2049}
2050
2051static bool d40_is_paused(struct d40_chan *d40c)
2052{
2053	void __iomem *chanbase = chan_base(d40c);
2054	bool is_paused = false;
2055	unsigned long flags;
2056	void __iomem *active_reg;
2057	u32 status;
2058	u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2059
2060	spin_lock_irqsave(&d40c->lock, flags);
2061
2062	if (chan_is_physical(d40c)) {
2063		if (d40c->phy_chan->num % 2 == 0)
2064			active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2065		else
2066			active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2067
2068		status = (readl(active_reg) &
2069			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2070			D40_CHAN_POS(d40c->phy_chan->num);
2071		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2072			is_paused = true;
2073		goto unlock;
2074	}
2075
2076	if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2077	    d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2078		status = readl(chanbase + D40_CHAN_REG_SDLNK);
2079	} else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2080		status = readl(chanbase + D40_CHAN_REG_SSLNK);
2081	} else {
2082		chan_err(d40c, "Unknown direction\n");
2083		goto unlock;
2084	}
2085
2086	status = (status & D40_EVENTLINE_MASK(event)) >>
2087		D40_EVENTLINE_POS(event);
2088
2089	if (status != D40_DMA_RUN)
2090		is_paused = true;
2091 unlock:
2092	spin_unlock_irqrestore(&d40c->lock, flags);
2093	return is_paused;
2094
2095}
2096
2097static u32 stedma40_residue(struct dma_chan *chan)
2098{
2099	struct d40_chan *d40c =
2100		container_of(chan, struct d40_chan, chan);
2101	u32 bytes_left;
2102	unsigned long flags;
2103
2104	spin_lock_irqsave(&d40c->lock, flags);
2105	bytes_left = d40_residue(d40c);
2106	spin_unlock_irqrestore(&d40c->lock, flags);
2107
2108	return bytes_left;
2109}
2110
2111static int
2112d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2113		struct scatterlist *sg_src, struct scatterlist *sg_dst,
2114		unsigned int sg_len, dma_addr_t src_dev_addr,
2115		dma_addr_t dst_dev_addr)
2116{
2117	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2118	struct stedma40_half_channel_info *src_info = &cfg->src_info;
2119	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2120	int ret;
2121
2122	ret = d40_log_sg_to_lli(sg_src, sg_len,
2123				src_dev_addr,
2124				desc->lli_log.src,
2125				chan->log_def.lcsp1,
2126				src_info->data_width,
2127				dst_info->data_width);
2128
2129	ret = d40_log_sg_to_lli(sg_dst, sg_len,
2130				dst_dev_addr,
2131				desc->lli_log.dst,
2132				chan->log_def.lcsp3,
2133				dst_info->data_width,
2134				src_info->data_width);
2135
2136	return ret < 0 ? ret : 0;
2137}
2138
2139static int
2140d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2141		struct scatterlist *sg_src, struct scatterlist *sg_dst,
2142		unsigned int sg_len, dma_addr_t src_dev_addr,
2143		dma_addr_t dst_dev_addr)
2144{
2145	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2146	struct stedma40_half_channel_info *src_info = &cfg->src_info;
2147	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2148	unsigned long flags = 0;
2149	int ret;
2150
2151	if (desc->cyclic)
2152		flags |= LLI_CYCLIC | LLI_TERM_INT;
2153
2154	ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2155				desc->lli_phy.src,
2156				virt_to_phys(desc->lli_phy.src),
2157				chan->src_def_cfg,
2158				src_info, dst_info, flags);
2159
2160	ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2161				desc->lli_phy.dst,
2162				virt_to_phys(desc->lli_phy.dst),
2163				chan->dst_def_cfg,
2164				dst_info, src_info, flags);
2165
2166	dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2167				   desc->lli_pool.size, DMA_TO_DEVICE);
2168
2169	return ret < 0 ? ret : 0;
2170}
2171
2172static struct d40_desc *
2173d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2174	      unsigned int sg_len, unsigned long dma_flags)
2175{
2176	struct stedma40_chan_cfg *cfg;
2177	struct d40_desc *desc;
2178	int ret;
2179
2180	desc = d40_desc_get(chan);
2181	if (!desc)
2182		return NULL;
2183
2184	cfg = &chan->dma_cfg;
2185	desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2186					cfg->dst_info.data_width);
2187	if (desc->lli_len < 0) {
2188		chan_err(chan, "Unaligned size\n");
2189		goto free_desc;
2190	}
2191
2192	ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2193	if (ret < 0) {
2194		chan_err(chan, "Could not allocate lli\n");
2195		goto free_desc;
2196	}
2197
2198	desc->lli_current = 0;
2199	desc->txd.flags = dma_flags;
2200	desc->txd.tx_submit = d40_tx_submit;
2201
2202	dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2203
2204	return desc;
2205 free_desc:
2206	d40_desc_free(chan, desc);
2207	return NULL;
2208}
2209
2210static struct dma_async_tx_descriptor *
2211d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2212	    struct scatterlist *sg_dst, unsigned int sg_len,
2213	    enum dma_transfer_direction direction, unsigned long dma_flags)
2214{
2215	struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2216	dma_addr_t src_dev_addr;
2217	dma_addr_t dst_dev_addr;
2218	struct d40_desc *desc;
2219	unsigned long flags;
2220	int ret;
2221
2222	if (!chan->phy_chan) {
2223		chan_err(chan, "Cannot prepare unallocated channel\n");
2224		return NULL;
2225	}
2226
2227	d40_set_runtime_config_write(dchan, &chan->slave_config, direction);
2228
2229	spin_lock_irqsave(&chan->lock, flags);
2230
2231	desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2232	if (desc == NULL)
2233		goto unlock;
2234
2235	if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2236		desc->cyclic = true;
2237
2238	src_dev_addr = 0;
2239	dst_dev_addr = 0;
2240	if (direction == DMA_DEV_TO_MEM)
2241		src_dev_addr = chan->runtime_addr;
2242	else if (direction == DMA_MEM_TO_DEV)
2243		dst_dev_addr = chan->runtime_addr;
2244
2245	if (chan_is_logical(chan))
2246		ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2247				      sg_len, src_dev_addr, dst_dev_addr);
2248	else
2249		ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2250				      sg_len, src_dev_addr, dst_dev_addr);
2251
2252	if (ret) {
2253		chan_err(chan, "Failed to prepare %s sg job: %d\n",
2254			 chan_is_logical(chan) ? "log" : "phy", ret);
2255		goto free_desc;
2256	}
2257
2258	/*
2259	 * add descriptor to the prepare queue in order to be able
2260	 * to free them later in terminate_all
2261	 */
2262	list_add_tail(&desc->node, &chan->prepare_queue);
2263
2264	spin_unlock_irqrestore(&chan->lock, flags);
2265
2266	return &desc->txd;
2267 free_desc:
2268	d40_desc_free(chan, desc);
2269 unlock:
2270	spin_unlock_irqrestore(&chan->lock, flags);
2271	return NULL;
2272}
2273
2274bool stedma40_filter(struct dma_chan *chan, void *data)
2275{
2276	struct stedma40_chan_cfg *info = data;
2277	struct d40_chan *d40c =
2278		container_of(chan, struct d40_chan, chan);
2279	int err;
2280
2281	if (data) {
2282		err = d40_validate_conf(d40c, info);
2283		if (!err)
2284			d40c->dma_cfg = *info;
2285	} else
2286		err = d40_config_memcpy(d40c);
2287
2288	if (!err)
2289		d40c->configured = true;
2290
2291	return err == 0;
2292}
2293EXPORT_SYMBOL(stedma40_filter);
2294
2295static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2296{
2297	bool realtime = d40c->dma_cfg.realtime;
2298	bool highprio = d40c->dma_cfg.high_priority;
2299	u32 rtreg;
2300	u32 event = D40_TYPE_TO_EVENT(dev_type);
2301	u32 group = D40_TYPE_TO_GROUP(dev_type);
2302	u32 bit = BIT(event);
2303	u32 prioreg;
2304	struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2305
2306	rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2307	/*
2308	 * Due to a hardware bug, in some cases a logical channel triggered by
2309	 * a high priority destination event line can generate extra packet
2310	 * transactions.
2311	 *
2312	 * The workaround is to not set the high priority level for the
2313	 * destination event lines that trigger logical channels.
2314	 */
2315	if (!src && chan_is_logical(d40c))
2316		highprio = false;
2317
2318	prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2319
2320	/* Destination event lines are stored in the upper halfword */
2321	if (!src)
2322		bit <<= 16;
2323
2324	writel(bit, d40c->base->virtbase + prioreg + group * 4);
2325	writel(bit, d40c->base->virtbase + rtreg + group * 4);
2326}
2327
2328static void d40_set_prio_realtime(struct d40_chan *d40c)
2329{
2330	if (d40c->base->rev < 3)
2331		return;
2332
2333	if ((d40c->dma_cfg.dir ==  DMA_DEV_TO_MEM) ||
2334	    (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2335		__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2336
2337	if ((d40c->dma_cfg.dir ==  DMA_MEM_TO_DEV) ||
2338	    (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2339		__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2340}
2341
2342#define D40_DT_FLAGS_MODE(flags)       ((flags >> 0) & 0x1)
2343#define D40_DT_FLAGS_DIR(flags)        ((flags >> 1) & 0x1)
2344#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2345#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2346#define D40_DT_FLAGS_HIGH_PRIO(flags)  ((flags >> 4) & 0x1)
2347
2348static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2349				  struct of_dma *ofdma)
2350{
2351	struct stedma40_chan_cfg cfg;
2352	dma_cap_mask_t cap;
2353	u32 flags;
2354
2355	memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2356
2357	dma_cap_zero(cap);
2358	dma_cap_set(DMA_SLAVE, cap);
2359
2360	cfg.dev_type = dma_spec->args[0];
2361	flags = dma_spec->args[2];
2362
2363	switch (D40_DT_FLAGS_MODE(flags)) {
2364	case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2365	case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2366	}
2367
2368	switch (D40_DT_FLAGS_DIR(flags)) {
2369	case 0:
2370		cfg.dir = DMA_MEM_TO_DEV;
2371		cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2372		break;
2373	case 1:
2374		cfg.dir = DMA_DEV_TO_MEM;
2375		cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2376		break;
2377	}
2378
2379	if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2380		cfg.phy_channel = dma_spec->args[1];
2381		cfg.use_fixed_channel = true;
2382	}
2383
2384	if (D40_DT_FLAGS_HIGH_PRIO(flags))
2385		cfg.high_priority = true;
2386
2387	return dma_request_channel(cap, stedma40_filter, &cfg);
2388}
2389
2390/* DMA ENGINE functions */
2391static int d40_alloc_chan_resources(struct dma_chan *chan)
2392{
2393	int err;
2394	unsigned long flags;
2395	struct d40_chan *d40c =
2396		container_of(chan, struct d40_chan, chan);
2397	bool is_free_phy;
2398	spin_lock_irqsave(&d40c->lock, flags);
2399
2400	dma_cookie_init(chan);
2401
2402	/* If no dma configuration is set use default configuration (memcpy) */
2403	if (!d40c->configured) {
2404		err = d40_config_memcpy(d40c);
2405		if (err) {
2406			chan_err(d40c, "Failed to configure memcpy channel\n");
2407			goto mark_last_busy;
2408		}
2409	}
2410
2411	err = d40_allocate_channel(d40c, &is_free_phy);
2412	if (err) {
2413		chan_err(d40c, "Failed to allocate channel\n");
2414		d40c->configured = false;
2415		goto mark_last_busy;
2416	}
2417
2418	pm_runtime_get_sync(d40c->base->dev);
2419
2420	d40_set_prio_realtime(d40c);
2421
2422	if (chan_is_logical(d40c)) {
2423		if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2424			d40c->lcpa = d40c->base->lcpa_base +
2425				d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2426		else
2427			d40c->lcpa = d40c->base->lcpa_base +
2428				d40c->dma_cfg.dev_type *
2429				D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2430
2431		/* Unmask the Global Interrupt Mask. */
2432		d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2433		d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2434	}
2435
2436	dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2437		 chan_is_logical(d40c) ? "logical" : "physical",
2438		 d40c->phy_chan->num,
2439		 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2440
2441
2442	/*
2443	 * Only write channel configuration to the DMA if the physical
2444	 * resource is free. In case of multiple logical channels
2445	 * on the same physical resource, only the first write is necessary.
2446	 */
2447	if (is_free_phy)
2448		d40_config_write(d40c);
2449 mark_last_busy:
2450	pm_runtime_mark_last_busy(d40c->base->dev);
2451	pm_runtime_put_autosuspend(d40c->base->dev);
2452	spin_unlock_irqrestore(&d40c->lock, flags);
2453	return err;
2454}
2455
2456static void d40_free_chan_resources(struct dma_chan *chan)
2457{
2458	struct d40_chan *d40c =
2459		container_of(chan, struct d40_chan, chan);
2460	int err;
2461	unsigned long flags;
2462
2463	if (d40c->phy_chan == NULL) {
2464		chan_err(d40c, "Cannot free unallocated channel\n");
2465		return;
2466	}
2467
2468	spin_lock_irqsave(&d40c->lock, flags);
2469
2470	err = d40_free_dma(d40c);
2471
2472	if (err)
2473		chan_err(d40c, "Failed to free channel\n");
2474	spin_unlock_irqrestore(&d40c->lock, flags);
2475}
2476
2477static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2478						       dma_addr_t dst,
2479						       dma_addr_t src,
2480						       size_t size,
2481						       unsigned long dma_flags)
2482{
2483	struct scatterlist dst_sg;
2484	struct scatterlist src_sg;
2485
2486	sg_init_table(&dst_sg, 1);
2487	sg_init_table(&src_sg, 1);
2488
2489	sg_dma_address(&dst_sg) = dst;
2490	sg_dma_address(&src_sg) = src;
2491
2492	sg_dma_len(&dst_sg) = size;
2493	sg_dma_len(&src_sg) = size;
2494
2495	return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2496			   DMA_MEM_TO_MEM, dma_flags);
2497}
2498
2499static struct dma_async_tx_descriptor *
 
 
 
 
 
 
 
 
 
 
 
 
 
2500d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2501		  unsigned int sg_len, enum dma_transfer_direction direction,
2502		  unsigned long dma_flags, void *context)
2503{
2504	if (!is_slave_direction(direction))
2505		return NULL;
2506
2507	return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2508}
2509
2510static struct dma_async_tx_descriptor *
2511dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2512		     size_t buf_len, size_t period_len,
2513		     enum dma_transfer_direction direction, unsigned long flags)
2514{
2515	unsigned int periods = buf_len / period_len;
2516	struct dma_async_tx_descriptor *txd;
2517	struct scatterlist *sg;
2518	int i;
2519
2520	sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2521	if (!sg)
2522		return NULL;
2523
2524	for (i = 0; i < periods; i++) {
2525		sg_dma_address(&sg[i]) = dma_addr;
2526		sg_dma_len(&sg[i]) = period_len;
2527		dma_addr += period_len;
2528	}
2529
2530	sg_chain(sg, periods + 1, sg);
 
 
 
2531
2532	txd = d40_prep_sg(chan, sg, sg, periods, direction,
2533			  DMA_PREP_INTERRUPT);
2534
2535	kfree(sg);
2536
2537	return txd;
2538}
2539
2540static enum dma_status d40_tx_status(struct dma_chan *chan,
2541				     dma_cookie_t cookie,
2542				     struct dma_tx_state *txstate)
2543{
2544	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2545	enum dma_status ret;
2546
2547	if (d40c->phy_chan == NULL) {
2548		chan_err(d40c, "Cannot read status of unallocated channel\n");
2549		return -EINVAL;
2550	}
2551
2552	ret = dma_cookie_status(chan, cookie, txstate);
2553	if (ret != DMA_COMPLETE && txstate)
2554		dma_set_residue(txstate, stedma40_residue(chan));
2555
2556	if (d40_is_paused(d40c))
2557		ret = DMA_PAUSED;
2558
2559	return ret;
2560}
2561
2562static void d40_issue_pending(struct dma_chan *chan)
2563{
2564	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2565	unsigned long flags;
2566
2567	if (d40c->phy_chan == NULL) {
2568		chan_err(d40c, "Channel is not allocated!\n");
2569		return;
2570	}
2571
2572	spin_lock_irqsave(&d40c->lock, flags);
2573
2574	list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2575
2576	/* Busy means that queued jobs are already being processed */
2577	if (!d40c->busy)
2578		(void) d40_queue_start(d40c);
2579
2580	spin_unlock_irqrestore(&d40c->lock, flags);
2581}
2582
2583static int d40_terminate_all(struct dma_chan *chan)
2584{
2585	unsigned long flags;
2586	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2587	int ret;
2588
2589	if (d40c->phy_chan == NULL) {
2590		chan_err(d40c, "Channel is not allocated!\n");
2591		return -EINVAL;
2592	}
2593
2594	spin_lock_irqsave(&d40c->lock, flags);
2595
2596	pm_runtime_get_sync(d40c->base->dev);
2597	ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2598	if (ret)
2599		chan_err(d40c, "Failed to stop channel\n");
2600
2601	d40_term_all(d40c);
2602	pm_runtime_mark_last_busy(d40c->base->dev);
2603	pm_runtime_put_autosuspend(d40c->base->dev);
2604	if (d40c->busy) {
2605		pm_runtime_mark_last_busy(d40c->base->dev);
2606		pm_runtime_put_autosuspend(d40c->base->dev);
2607	}
2608	d40c->busy = false;
2609
2610	spin_unlock_irqrestore(&d40c->lock, flags);
2611	return 0;
2612}
2613
2614static int
2615dma40_config_to_halfchannel(struct d40_chan *d40c,
2616			    struct stedma40_half_channel_info *info,
2617			    u32 maxburst)
2618{
2619	int psize;
2620
2621	if (chan_is_logical(d40c)) {
2622		if (maxburst >= 16)
2623			psize = STEDMA40_PSIZE_LOG_16;
2624		else if (maxburst >= 8)
2625			psize = STEDMA40_PSIZE_LOG_8;
2626		else if (maxburst >= 4)
2627			psize = STEDMA40_PSIZE_LOG_4;
2628		else
2629			psize = STEDMA40_PSIZE_LOG_1;
2630	} else {
2631		if (maxburst >= 16)
2632			psize = STEDMA40_PSIZE_PHY_16;
2633		else if (maxburst >= 8)
2634			psize = STEDMA40_PSIZE_PHY_8;
2635		else if (maxburst >= 4)
2636			psize = STEDMA40_PSIZE_PHY_4;
2637		else
2638			psize = STEDMA40_PSIZE_PHY_1;
2639	}
2640
2641	info->psize = psize;
2642	info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2643
2644	return 0;
2645}
2646
 
2647static int d40_set_runtime_config(struct dma_chan *chan,
2648				  struct dma_slave_config *config)
2649{
2650	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2651
2652	memcpy(&d40c->slave_config, config, sizeof(*config));
2653
2654	return 0;
2655}
2656
2657/* Runtime reconfiguration extension */
2658static int d40_set_runtime_config_write(struct dma_chan *chan,
2659				  struct dma_slave_config *config,
2660				  enum dma_transfer_direction direction)
2661{
2662	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2663	struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2664	enum dma_slave_buswidth src_addr_width, dst_addr_width;
2665	dma_addr_t config_addr;
2666	u32 src_maxburst, dst_maxburst;
2667	int ret;
2668
2669	if (d40c->phy_chan == NULL) {
2670		chan_err(d40c, "Channel is not allocated!\n");
2671		return -EINVAL;
2672	}
2673
2674	src_addr_width = config->src_addr_width;
2675	src_maxburst = config->src_maxburst;
2676	dst_addr_width = config->dst_addr_width;
2677	dst_maxburst = config->dst_maxburst;
2678
2679	if (direction == DMA_DEV_TO_MEM) {
2680		config_addr = config->src_addr;
2681
2682		if (cfg->dir != DMA_DEV_TO_MEM)
2683			dev_dbg(d40c->base->dev,
2684				"channel was not configured for peripheral "
2685				"to memory transfer (%d) overriding\n",
2686				cfg->dir);
2687		cfg->dir = DMA_DEV_TO_MEM;
2688
2689		/* Configure the memory side */
2690		if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2691			dst_addr_width = src_addr_width;
2692		if (dst_maxburst == 0)
2693			dst_maxburst = src_maxburst;
2694
2695	} else if (direction == DMA_MEM_TO_DEV) {
2696		config_addr = config->dst_addr;
2697
2698		if (cfg->dir != DMA_MEM_TO_DEV)
2699			dev_dbg(d40c->base->dev,
2700				"channel was not configured for memory "
2701				"to peripheral transfer (%d) overriding\n",
2702				cfg->dir);
2703		cfg->dir = DMA_MEM_TO_DEV;
2704
2705		/* Configure the memory side */
2706		if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2707			src_addr_width = dst_addr_width;
2708		if (src_maxburst == 0)
2709			src_maxburst = dst_maxburst;
2710	} else {
2711		dev_err(d40c->base->dev,
2712			"unrecognized channel direction %d\n",
2713			direction);
2714		return -EINVAL;
2715	}
2716
2717	if (config_addr <= 0) {
2718		dev_err(d40c->base->dev, "no address supplied\n");
2719		return -EINVAL;
2720	}
2721
2722	if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2723		dev_err(d40c->base->dev,
2724			"src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2725			src_maxburst,
2726			src_addr_width,
2727			dst_maxburst,
2728			dst_addr_width);
2729		return -EINVAL;
2730	}
2731
2732	if (src_maxburst > 16) {
2733		src_maxburst = 16;
2734		dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2735	} else if (dst_maxburst > 16) {
2736		dst_maxburst = 16;
2737		src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2738	}
2739
2740	/* Only valid widths are; 1, 2, 4 and 8. */
2741	if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2742	    src_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2743	    dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2744	    dst_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2745	    !is_power_of_2(src_addr_width) ||
2746	    !is_power_of_2(dst_addr_width))
2747		return -EINVAL;
2748
2749	cfg->src_info.data_width = src_addr_width;
2750	cfg->dst_info.data_width = dst_addr_width;
2751
2752	ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2753					  src_maxburst);
2754	if (ret)
2755		return ret;
2756
2757	ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2758					  dst_maxburst);
2759	if (ret)
2760		return ret;
2761
2762	/* Fill in register values */
2763	if (chan_is_logical(d40c))
2764		d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2765	else
2766		d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2767
2768	/* These settings will take precedence later */
2769	d40c->runtime_addr = config_addr;
2770	d40c->runtime_direction = direction;
2771	dev_dbg(d40c->base->dev,
2772		"configured channel %s for %s, data width %d/%d, "
2773		"maxburst %d/%d elements, LE, no flow control\n",
2774		dma_chan_name(chan),
2775		(direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2776		src_addr_width, dst_addr_width,
2777		src_maxburst, dst_maxburst);
2778
2779	return 0;
2780}
2781
2782/* Initialization functions */
2783
2784static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2785				 struct d40_chan *chans, int offset,
2786				 int num_chans)
2787{
2788	int i = 0;
2789	struct d40_chan *d40c;
2790
2791	INIT_LIST_HEAD(&dma->channels);
2792
2793	for (i = offset; i < offset + num_chans; i++) {
2794		d40c = &chans[i];
2795		d40c->base = base;
2796		d40c->chan.device = dma;
2797
2798		spin_lock_init(&d40c->lock);
2799
2800		d40c->log_num = D40_PHY_CHAN;
2801
2802		INIT_LIST_HEAD(&d40c->done);
2803		INIT_LIST_HEAD(&d40c->active);
2804		INIT_LIST_HEAD(&d40c->queue);
2805		INIT_LIST_HEAD(&d40c->pending_queue);
2806		INIT_LIST_HEAD(&d40c->client);
2807		INIT_LIST_HEAD(&d40c->prepare_queue);
2808
2809		tasklet_init(&d40c->tasklet, dma_tasklet,
2810			     (unsigned long) d40c);
2811
2812		list_add_tail(&d40c->chan.device_node,
2813			      &dma->channels);
2814	}
2815}
2816
2817static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2818{
2819	if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
2820		dev->device_prep_slave_sg = d40_prep_slave_sg;
2821		dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2822	}
2823
2824	if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2825		dev->device_prep_dma_memcpy = d40_prep_memcpy;
2826		dev->directions = BIT(DMA_MEM_TO_MEM);
2827		/*
2828		 * This controller can only access address at even
2829		 * 32bit boundaries, i.e. 2^2
2830		 */
2831		dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2832	}
2833
 
 
 
2834	if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2835		dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2836
2837	dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2838	dev->device_free_chan_resources = d40_free_chan_resources;
2839	dev->device_issue_pending = d40_issue_pending;
2840	dev->device_tx_status = d40_tx_status;
2841	dev->device_config = d40_set_runtime_config;
2842	dev->device_pause = d40_pause;
2843	dev->device_resume = d40_resume;
2844	dev->device_terminate_all = d40_terminate_all;
2845	dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2846	dev->dev = base->dev;
2847}
2848
2849static int __init d40_dmaengine_init(struct d40_base *base,
2850				     int num_reserved_chans)
2851{
2852	int err ;
2853
2854	d40_chan_init(base, &base->dma_slave, base->log_chans,
2855		      0, base->num_log_chans);
2856
2857	dma_cap_zero(base->dma_slave.cap_mask);
2858	dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2859	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2860
2861	d40_ops_init(base, &base->dma_slave);
2862
2863	err = dmaenginem_async_device_register(&base->dma_slave);
2864
2865	if (err) {
2866		d40_err(base->dev, "Failed to register slave channels\n");
2867		goto exit;
2868	}
2869
2870	d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2871		      base->num_log_chans, base->num_memcpy_chans);
2872
2873	dma_cap_zero(base->dma_memcpy.cap_mask);
2874	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
 
2875
2876	d40_ops_init(base, &base->dma_memcpy);
2877
2878	err = dmaenginem_async_device_register(&base->dma_memcpy);
2879
2880	if (err) {
2881		d40_err(base->dev,
2882			"Failed to register memcpy only channels\n");
2883		goto exit;
2884	}
2885
2886	d40_chan_init(base, &base->dma_both, base->phy_chans,
2887		      0, num_reserved_chans);
2888
2889	dma_cap_zero(base->dma_both.cap_mask);
2890	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2891	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
 
2892	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2893
2894	d40_ops_init(base, &base->dma_both);
2895	err = dmaenginem_async_device_register(&base->dma_both);
2896
2897	if (err) {
2898		d40_err(base->dev,
2899			"Failed to register logical and physical capable channels\n");
2900		goto exit;
2901	}
2902	return 0;
 
 
 
 
2903 exit:
2904	return err;
2905}
2906
2907/* Suspend resume functionality */
2908#ifdef CONFIG_PM_SLEEP
2909static int dma40_suspend(struct device *dev)
2910{
2911	struct d40_base *base = dev_get_drvdata(dev);
 
2912	int ret;
2913
2914	ret = pm_runtime_force_suspend(dev);
2915	if (ret)
2916		return ret;
2917
2918	if (base->lcpa_regulator)
2919		ret = regulator_disable(base->lcpa_regulator);
2920	return ret;
2921}
2922
2923static int dma40_resume(struct device *dev)
2924{
2925	struct d40_base *base = dev_get_drvdata(dev);
 
2926	int ret = 0;
2927
2928	if (base->lcpa_regulator) {
2929		ret = regulator_enable(base->lcpa_regulator);
2930		if (ret)
2931			return ret;
2932	}
2933
2934	return pm_runtime_force_resume(dev);
2935}
2936#endif
2937
2938#ifdef CONFIG_PM
2939static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2940			 u32 *regaddr, int num, bool save)
2941{
2942	int i;
2943
2944	for (i = 0; i < num; i++) {
2945		void __iomem *addr = baseaddr + regaddr[i];
2946
2947		if (save)
2948			backup[i] = readl_relaxed(addr);
2949		else
2950			writel_relaxed(backup[i], addr);
2951	}
2952}
2953
2954static void d40_save_restore_registers(struct d40_base *base, bool save)
2955{
2956	int i;
2957
2958	/* Save/Restore channel specific registers */
2959	for (i = 0; i < base->num_phy_chans; i++) {
2960		void __iomem *addr;
2961		int idx;
2962
2963		if (base->phy_res[i].reserved)
2964			continue;
2965
2966		addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2967		idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2968
2969		dma40_backup(addr, &base->reg_val_backup_chan[idx],
2970			     d40_backup_regs_chan,
2971			     ARRAY_SIZE(d40_backup_regs_chan),
2972			     save);
2973	}
2974
2975	/* Save/Restore global registers */
2976	dma40_backup(base->virtbase, base->reg_val_backup,
2977		     d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2978		     save);
2979
2980	/* Save/Restore registers only existing on dma40 v3 and later */
2981	if (base->gen_dmac.backup)
2982		dma40_backup(base->virtbase, base->reg_val_backup_v4,
2983			     base->gen_dmac.backup,
2984			base->gen_dmac.backup_size,
2985			save);
2986}
2987
2988static int dma40_runtime_suspend(struct device *dev)
2989{
2990	struct d40_base *base = dev_get_drvdata(dev);
 
2991
2992	d40_save_restore_registers(base, true);
2993
2994	/* Don't disable/enable clocks for v1 due to HW bugs */
2995	if (base->rev != 1)
2996		writel_relaxed(base->gcc_pwr_off_mask,
2997			       base->virtbase + D40_DREG_GCC);
2998
2999	return 0;
3000}
3001
3002static int dma40_runtime_resume(struct device *dev)
3003{
3004	struct d40_base *base = dev_get_drvdata(dev);
 
3005
3006	d40_save_restore_registers(base, false);
3007
3008	writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3009		       base->virtbase + D40_DREG_GCC);
3010	return 0;
3011}
3012#endif
3013
3014static const struct dev_pm_ops dma40_pm_ops = {
3015	SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3016	SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3017				dma40_runtime_resume,
3018				NULL)
3019};
3020
3021/* Initialization functions. */
3022
3023static int __init d40_phy_res_init(struct d40_base *base)
3024{
3025	int i;
3026	int num_phy_chans_avail = 0;
3027	u32 val[2];
3028	int odd_even_bit = -2;
3029	int gcc = D40_DREG_GCC_ENA;
3030
3031	val[0] = readl(base->virtbase + D40_DREG_PRSME);
3032	val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3033
3034	for (i = 0; i < base->num_phy_chans; i++) {
3035		base->phy_res[i].num = i;
3036		odd_even_bit += 2 * ((i % 2) == 0);
3037		if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3038			/* Mark security only channels as occupied */
3039			base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3040			base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3041			base->phy_res[i].reserved = true;
3042			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3043						       D40_DREG_GCC_SRC);
3044			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3045						       D40_DREG_GCC_DST);
3046
3047
3048		} else {
3049			base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3050			base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3051			base->phy_res[i].reserved = false;
3052			num_phy_chans_avail++;
3053		}
3054		spin_lock_init(&base->phy_res[i].lock);
3055	}
3056
3057	/* Mark disabled channels as occupied */
3058	for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3059		int chan = base->plat_data->disabled_channels[i];
3060
3061		base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3062		base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3063		base->phy_res[chan].reserved = true;
3064		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3065					       D40_DREG_GCC_SRC);
3066		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3067					       D40_DREG_GCC_DST);
3068		num_phy_chans_avail--;
3069	}
3070
3071	/* Mark soft_lli channels */
3072	for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3073		int chan = base->plat_data->soft_lli_chans[i];
3074
3075		base->phy_res[chan].use_soft_lli = true;
3076	}
3077
3078	dev_info(base->dev, "%d of %d physical DMA channels available\n",
3079		 num_phy_chans_avail, base->num_phy_chans);
3080
3081	/* Verify settings extended vs standard */
3082	val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3083
3084	for (i = 0; i < base->num_phy_chans; i++) {
3085
3086		if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3087		    (val[0] & 0x3) != 1)
3088			dev_info(base->dev,
3089				 "[%s] INFO: channel %d is misconfigured (%d)\n",
3090				 __func__, i, val[0] & 0x3);
3091
3092		val[0] = val[0] >> 2;
3093	}
3094
3095	/*
3096	 * To keep things simple, Enable all clocks initially.
3097	 * The clocks will get managed later post channel allocation.
3098	 * The clocks for the event lines on which reserved channels exists
3099	 * are not managed here.
3100	 */
3101	writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3102	base->gcc_pwr_off_mask = gcc;
3103
3104	return num_phy_chans_avail;
3105}
3106
3107static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3108{
3109	struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3110	struct clk *clk;
3111	void __iomem *virtbase;
3112	struct resource *res;
3113	struct d40_base *base;
3114	int num_log_chans;
3115	int num_phy_chans;
3116	int num_memcpy_chans;
3117	int clk_ret = -EINVAL;
3118	int i;
3119	u32 pid;
3120	u32 cid;
3121	u8 rev;
3122
3123	clk = clk_get(&pdev->dev, NULL);
3124	if (IS_ERR(clk)) {
3125		d40_err(&pdev->dev, "No matching clock found\n");
3126		goto check_prepare_enabled;
3127	}
3128
3129	clk_ret = clk_prepare_enable(clk);
3130	if (clk_ret) {
3131		d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3132		goto disable_unprepare;
3133	}
3134
3135	/* Get IO for DMAC base address */
3136	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3137	if (!res)
3138		goto disable_unprepare;
3139
3140	if (request_mem_region(res->start, resource_size(res),
3141			       D40_NAME " I/O base") == NULL)
3142		goto release_region;
3143
3144	virtbase = ioremap(res->start, resource_size(res));
3145	if (!virtbase)
3146		goto release_region;
3147
3148	/* This is just a regular AMBA PrimeCell ID actually */
3149	for (pid = 0, i = 0; i < 4; i++)
3150		pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3151			& 255) << (i * 8);
3152	for (cid = 0, i = 0; i < 4; i++)
3153		cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3154			& 255) << (i * 8);
3155
3156	if (cid != AMBA_CID) {
3157		d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3158		goto unmap_io;
3159	}
3160	if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3161		d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3162			AMBA_MANF_BITS(pid),
3163			AMBA_VENDOR_ST);
3164		goto unmap_io;
3165	}
3166	/*
3167	 * HW revision:
3168	 * DB8500ed has revision 0
3169	 * ? has revision 1
3170	 * DB8500v1 has revision 2
3171	 * DB8500v2 has revision 3
3172	 * AP9540v1 has revision 4
3173	 * DB8540v1 has revision 4
3174	 */
3175	rev = AMBA_REV_BITS(pid);
3176	if (rev < 2) {
3177		d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3178		goto unmap_io;
3179	}
3180
3181	/* The number of physical channels on this HW */
3182	if (plat_data->num_of_phy_chans)
3183		num_phy_chans = plat_data->num_of_phy_chans;
3184	else
3185		num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3186
3187	/* The number of channels used for memcpy */
3188	if (plat_data->num_of_memcpy_chans)
3189		num_memcpy_chans = plat_data->num_of_memcpy_chans;
3190	else
3191		num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3192
3193	num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3194
3195	dev_info(&pdev->dev,
3196		 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3197		 rev, &res->start, num_phy_chans, num_log_chans);
3198
3199	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3200		       (num_phy_chans + num_log_chans + num_memcpy_chans) *
3201		       sizeof(struct d40_chan), GFP_KERNEL);
3202
3203	if (base == NULL)
3204		goto unmap_io;
3205
3206	base->rev = rev;
3207	base->clk = clk;
3208	base->num_memcpy_chans = num_memcpy_chans;
3209	base->num_phy_chans = num_phy_chans;
3210	base->num_log_chans = num_log_chans;
3211	base->phy_start = res->start;
3212	base->phy_size = resource_size(res);
3213	base->virtbase = virtbase;
3214	base->plat_data = plat_data;
3215	base->dev = &pdev->dev;
3216	base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3217	base->log_chans = &base->phy_chans[num_phy_chans];
3218
3219	if (base->plat_data->num_of_phy_chans == 14) {
3220		base->gen_dmac.backup = d40_backup_regs_v4b;
3221		base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3222		base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3223		base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3224		base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3225		base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3226		base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3227		base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3228		base->gen_dmac.il = il_v4b;
3229		base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3230		base->gen_dmac.init_reg = dma_init_reg_v4b;
3231		base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3232	} else {
3233		if (base->rev >= 3) {
3234			base->gen_dmac.backup = d40_backup_regs_v4a;
3235			base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3236		}
3237		base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3238		base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3239		base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3240		base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3241		base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3242		base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3243		base->gen_dmac.il = il_v4a;
3244		base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3245		base->gen_dmac.init_reg = dma_init_reg_v4a;
3246		base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3247	}
3248
3249	base->phy_res = kcalloc(num_phy_chans,
3250				sizeof(*base->phy_res),
3251				GFP_KERNEL);
3252	if (!base->phy_res)
3253		goto free_base;
3254
3255	base->lookup_phy_chans = kcalloc(num_phy_chans,
3256					 sizeof(*base->lookup_phy_chans),
3257					 GFP_KERNEL);
3258	if (!base->lookup_phy_chans)
3259		goto free_phy_res;
3260
3261	base->lookup_log_chans = kcalloc(num_log_chans,
3262					 sizeof(*base->lookup_log_chans),
3263					 GFP_KERNEL);
3264	if (!base->lookup_log_chans)
3265		goto free_phy_chans;
3266
3267	base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3268						  sizeof(d40_backup_regs_chan),
3269						  GFP_KERNEL);
3270	if (!base->reg_val_backup_chan)
3271		goto free_log_chans;
3272
3273	base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3274					    * D40_LCLA_LINK_PER_EVENT_GRP,
3275					    sizeof(*base->lcla_pool.alloc_map),
3276					    GFP_KERNEL);
3277	if (!base->lcla_pool.alloc_map)
3278		goto free_backup_chan;
3279
3280	base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
3281					     sizeof(*base->regs_interrupt),
3282					     GFP_KERNEL);
3283	if (!base->regs_interrupt)
3284		goto free_map;
3285
3286	base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3287					    0, SLAB_HWCACHE_ALIGN,
3288					    NULL);
3289	if (base->desc_slab == NULL)
3290		goto free_regs;
3291
3292
3293	return base;
3294 free_regs:
3295	kfree(base->regs_interrupt);
3296 free_map:
3297	kfree(base->lcla_pool.alloc_map);
3298 free_backup_chan:
3299	kfree(base->reg_val_backup_chan);
3300 free_log_chans:
3301	kfree(base->lookup_log_chans);
3302 free_phy_chans:
3303	kfree(base->lookup_phy_chans);
3304 free_phy_res:
3305	kfree(base->phy_res);
3306 free_base:
3307	kfree(base);
3308 unmap_io:
3309	iounmap(virtbase);
3310 release_region:
3311	release_mem_region(res->start, resource_size(res));
3312 check_prepare_enabled:
3313	if (!clk_ret)
3314 disable_unprepare:
3315		clk_disable_unprepare(clk);
3316	if (!IS_ERR(clk))
3317		clk_put(clk);
3318	return NULL;
3319}
3320
3321static void __init d40_hw_init(struct d40_base *base)
3322{
3323
3324	int i;
3325	u32 prmseo[2] = {0, 0};
3326	u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3327	u32 pcmis = 0;
3328	u32 pcicr = 0;
3329	struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3330	u32 reg_size = base->gen_dmac.init_reg_size;
3331
3332	for (i = 0; i < reg_size; i++)
3333		writel(dma_init_reg[i].val,
3334		       base->virtbase + dma_init_reg[i].reg);
3335
3336	/* Configure all our dma channels to default settings */
3337	for (i = 0; i < base->num_phy_chans; i++) {
3338
3339		activeo[i % 2] = activeo[i % 2] << 2;
3340
3341		if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3342		    == D40_ALLOC_PHY) {
3343			activeo[i % 2] |= 3;
3344			continue;
3345		}
3346
3347		/* Enable interrupt # */
3348		pcmis = (pcmis << 1) | 1;
3349
3350		/* Clear interrupt # */
3351		pcicr = (pcicr << 1) | 1;
3352
3353		/* Set channel to physical mode */
3354		prmseo[i % 2] = prmseo[i % 2] << 2;
3355		prmseo[i % 2] |= 1;
3356
3357	}
3358
3359	writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3360	writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3361	writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3362	writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3363
3364	/* Write which interrupt to enable */
3365	writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3366
3367	/* Write which interrupt to clear */
3368	writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3369
3370	/* These are __initdata and cannot be accessed after init */
3371	base->gen_dmac.init_reg = NULL;
3372	base->gen_dmac.init_reg_size = 0;
3373}
3374
3375static int __init d40_lcla_allocate(struct d40_base *base)
3376{
3377	struct d40_lcla_pool *pool = &base->lcla_pool;
3378	unsigned long *page_list;
3379	int i, j;
3380	int ret;
3381
3382	/*
3383	 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3384	 * To full fill this hardware requirement without wasting 256 kb
3385	 * we allocate pages until we get an aligned one.
3386	 */
3387	page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3388				  sizeof(*page_list),
3389				  GFP_KERNEL);
3390	if (!page_list)
3391		return -ENOMEM;
3392
3393	/* Calculating how many pages that are required */
3394	base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3395
3396	for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3397		page_list[i] = __get_free_pages(GFP_KERNEL,
3398						base->lcla_pool.pages);
3399		if (!page_list[i]) {
3400
3401			d40_err(base->dev, "Failed to allocate %d pages.\n",
3402				base->lcla_pool.pages);
3403			ret = -ENOMEM;
3404
3405			for (j = 0; j < i; j++)
3406				free_pages(page_list[j], base->lcla_pool.pages);
3407			goto free_page_list;
3408		}
3409
3410		if ((virt_to_phys((void *)page_list[i]) &
3411		     (LCLA_ALIGNMENT - 1)) == 0)
3412			break;
3413	}
3414
3415	for (j = 0; j < i; j++)
3416		free_pages(page_list[j], base->lcla_pool.pages);
3417
3418	if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3419		base->lcla_pool.base = (void *)page_list[i];
3420	} else {
3421		/*
3422		 * After many attempts and no succees with finding the correct
3423		 * alignment, try with allocating a big buffer.
3424		 */
3425		dev_warn(base->dev,
3426			 "[%s] Failed to get %d pages @ 18 bit align.\n",
3427			 __func__, base->lcla_pool.pages);
3428		base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3429							 base->num_phy_chans +
3430							 LCLA_ALIGNMENT,
3431							 GFP_KERNEL);
3432		if (!base->lcla_pool.base_unaligned) {
3433			ret = -ENOMEM;
3434			goto free_page_list;
3435		}
3436
3437		base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3438						 LCLA_ALIGNMENT);
3439	}
3440
3441	pool->dma_addr = dma_map_single(base->dev, pool->base,
3442					SZ_1K * base->num_phy_chans,
3443					DMA_TO_DEVICE);
3444	if (dma_mapping_error(base->dev, pool->dma_addr)) {
3445		pool->dma_addr = 0;
3446		ret = -ENOMEM;
3447		goto free_page_list;
3448	}
3449
3450	writel(virt_to_phys(base->lcla_pool.base),
3451	       base->virtbase + D40_DREG_LCLA);
3452	ret = 0;
3453 free_page_list:
3454	kfree(page_list);
3455	return ret;
3456}
3457
3458static int __init d40_of_probe(struct platform_device *pdev,
3459			       struct device_node *np)
3460{
3461	struct stedma40_platform_data *pdata;
3462	int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3463	const __be32 *list;
3464
3465	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3466	if (!pdata)
3467		return -ENOMEM;
3468
3469	/* If absent this value will be obtained from h/w. */
3470	of_property_read_u32(np, "dma-channels", &num_phy);
3471	if (num_phy > 0)
3472		pdata->num_of_phy_chans = num_phy;
3473
3474	list = of_get_property(np, "memcpy-channels", &num_memcpy);
3475	num_memcpy /= sizeof(*list);
3476
3477	if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3478		d40_err(&pdev->dev,
3479			"Invalid number of memcpy channels specified (%d)\n",
3480			num_memcpy);
3481		return -EINVAL;
3482	}
3483	pdata->num_of_memcpy_chans = num_memcpy;
3484
3485	of_property_read_u32_array(np, "memcpy-channels",
3486				   dma40_memcpy_channels,
3487				   num_memcpy);
3488
3489	list = of_get_property(np, "disabled-channels", &num_disabled);
3490	num_disabled /= sizeof(*list);
3491
3492	if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3493		d40_err(&pdev->dev,
3494			"Invalid number of disabled channels specified (%d)\n",
3495			num_disabled);
3496		return -EINVAL;
3497	}
3498
3499	of_property_read_u32_array(np, "disabled-channels",
3500				   pdata->disabled_channels,
3501				   num_disabled);
3502	pdata->disabled_channels[num_disabled] = -1;
3503
3504	pdev->dev.platform_data = pdata;
3505
3506	return 0;
3507}
3508
3509static int __init d40_probe(struct platform_device *pdev)
3510{
3511	struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3512	struct device_node *np = pdev->dev.of_node;
3513	int ret = -ENOENT;
3514	struct d40_base *base;
3515	struct resource *res;
3516	int num_reserved_chans;
3517	u32 val;
3518
3519	if (!plat_data) {
3520		if (np) {
3521			if (d40_of_probe(pdev, np)) {
3522				ret = -ENOMEM;
3523				goto report_failure;
3524			}
3525		} else {
3526			d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3527			goto report_failure;
3528		}
3529	}
3530
3531	base = d40_hw_detect_init(pdev);
3532	if (!base)
3533		goto report_failure;
3534
3535	num_reserved_chans = d40_phy_res_init(base);
3536
3537	platform_set_drvdata(pdev, base);
3538
3539	spin_lock_init(&base->interrupt_lock);
3540	spin_lock_init(&base->execmd_lock);
3541
3542	/* Get IO for logical channel parameter address */
3543	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3544	if (!res) {
3545		ret = -ENOENT;
3546		d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3547		goto destroy_cache;
3548	}
3549	base->lcpa_size = resource_size(res);
3550	base->phy_lcpa = res->start;
3551
3552	if (request_mem_region(res->start, resource_size(res),
3553			       D40_NAME " I/O lcpa") == NULL) {
3554		ret = -EBUSY;
3555		d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3556		goto destroy_cache;
3557	}
3558
3559	/* We make use of ESRAM memory for this. */
3560	val = readl(base->virtbase + D40_DREG_LCPA);
3561	if (res->start != val && val != 0) {
3562		dev_warn(&pdev->dev,
3563			 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3564			 __func__, val, &res->start);
3565	} else
3566		writel(res->start, base->virtbase + D40_DREG_LCPA);
3567
3568	base->lcpa_base = ioremap(res->start, resource_size(res));
3569	if (!base->lcpa_base) {
3570		ret = -ENOMEM;
3571		d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3572		goto destroy_cache;
3573	}
3574	/* If lcla has to be located in ESRAM we don't need to allocate */
3575	if (base->plat_data->use_esram_lcla) {
3576		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3577							"lcla_esram");
3578		if (!res) {
3579			ret = -ENOENT;
3580			d40_err(&pdev->dev,
3581				"No \"lcla_esram\" memory resource\n");
3582			goto destroy_cache;
3583		}
3584		base->lcla_pool.base = ioremap(res->start,
3585						resource_size(res));
3586		if (!base->lcla_pool.base) {
3587			ret = -ENOMEM;
3588			d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3589			goto destroy_cache;
3590		}
3591		writel(res->start, base->virtbase + D40_DREG_LCLA);
3592
3593	} else {
3594		ret = d40_lcla_allocate(base);
3595		if (ret) {
3596			d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3597			goto destroy_cache;
3598		}
3599	}
3600
3601	spin_lock_init(&base->lcla_pool.lock);
3602
3603	base->irq = platform_get_irq(pdev, 0);
3604
3605	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3606	if (ret) {
3607		d40_err(&pdev->dev, "No IRQ defined\n");
3608		goto destroy_cache;
3609	}
3610
3611	if (base->plat_data->use_esram_lcla) {
3612
3613		base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3614		if (IS_ERR(base->lcpa_regulator)) {
3615			d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3616			ret = PTR_ERR(base->lcpa_regulator);
3617			base->lcpa_regulator = NULL;
3618			goto destroy_cache;
3619		}
3620
3621		ret = regulator_enable(base->lcpa_regulator);
3622		if (ret) {
3623			d40_err(&pdev->dev,
3624				"Failed to enable lcpa_regulator\n");
3625			regulator_put(base->lcpa_regulator);
3626			base->lcpa_regulator = NULL;
3627			goto destroy_cache;
3628		}
3629	}
3630
3631	writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3632
3633	pm_runtime_irq_safe(base->dev);
3634	pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3635	pm_runtime_use_autosuspend(base->dev);
3636	pm_runtime_mark_last_busy(base->dev);
3637	pm_runtime_set_active(base->dev);
3638	pm_runtime_enable(base->dev);
3639
3640	ret = d40_dmaengine_init(base, num_reserved_chans);
3641	if (ret)
3642		goto destroy_cache;
3643
3644	base->dev->dma_parms = &base->dma_parms;
3645	ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3646	if (ret) {
3647		d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3648		goto destroy_cache;
3649	}
3650
3651	d40_hw_init(base);
3652
3653	if (np) {
3654		ret = of_dma_controller_register(np, d40_xlate, NULL);
3655		if (ret)
3656			dev_err(&pdev->dev,
3657				"could not register of_dma_controller\n");
3658	}
3659
3660	dev_info(base->dev, "initialized\n");
3661	return 0;
3662 destroy_cache:
3663	kmem_cache_destroy(base->desc_slab);
3664	if (base->virtbase)
3665		iounmap(base->virtbase);
3666
3667	if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3668		iounmap(base->lcla_pool.base);
3669		base->lcla_pool.base = NULL;
3670	}
3671
3672	if (base->lcla_pool.dma_addr)
3673		dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3674				 SZ_1K * base->num_phy_chans,
3675				 DMA_TO_DEVICE);
3676
3677	if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3678		free_pages((unsigned long)base->lcla_pool.base,
3679			   base->lcla_pool.pages);
3680
3681	kfree(base->lcla_pool.base_unaligned);
3682
3683	if (base->phy_lcpa)
3684		release_mem_region(base->phy_lcpa,
3685				   base->lcpa_size);
3686	if (base->phy_start)
3687		release_mem_region(base->phy_start,
3688				   base->phy_size);
3689	if (base->clk) {
3690		clk_disable_unprepare(base->clk);
3691		clk_put(base->clk);
3692	}
3693
3694	if (base->lcpa_regulator) {
3695		regulator_disable(base->lcpa_regulator);
3696		regulator_put(base->lcpa_regulator);
3697	}
3698
3699	kfree(base->lcla_pool.alloc_map);
3700	kfree(base->lookup_log_chans);
3701	kfree(base->lookup_phy_chans);
3702	kfree(base->phy_res);
3703	kfree(base);
3704 report_failure:
3705	d40_err(&pdev->dev, "probe failed\n");
3706	return ret;
3707}
3708
3709static const struct of_device_id d40_match[] = {
3710        { .compatible = "stericsson,dma40", },
3711        {}
3712};
3713
3714static struct platform_driver d40_driver = {
3715	.driver = {
3716		.name  = D40_NAME,
3717		.pm = &dma40_pm_ops,
3718		.of_match_table = d40_match,
3719	},
3720};
3721
3722static int __init stedma40_init(void)
3723{
3724	return platform_driver_probe(&d40_driver, d40_probe);
3725}
3726subsys_initcall(stedma40_init);
v4.10.11
 
   1/*
   2 * Copyright (C) Ericsson AB 2007-2008
   3 * Copyright (C) ST-Ericsson SA 2008-2010
   4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
   5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
   6 * License terms: GNU General Public License (GPL) version 2
   7 */
   8
   9#include <linux/dma-mapping.h>
  10#include <linux/kernel.h>
  11#include <linux/slab.h>
  12#include <linux/export.h>
  13#include <linux/dmaengine.h>
  14#include <linux/platform_device.h>
  15#include <linux/clk.h>
  16#include <linux/delay.h>
  17#include <linux/log2.h>
  18#include <linux/pm.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/err.h>
  21#include <linux/of.h>
  22#include <linux/of_dma.h>
  23#include <linux/amba/bus.h>
  24#include <linux/regulator/consumer.h>
  25#include <linux/platform_data/dma-ste-dma40.h>
  26
  27#include "dmaengine.h"
  28#include "ste_dma40_ll.h"
  29
  30#define D40_NAME "dma40"
  31
  32#define D40_PHY_CHAN -1
  33
  34/* For masking out/in 2 bit channel positions */
  35#define D40_CHAN_POS(chan)  (2 * (chan / 2))
  36#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
  37
  38/* Maximum iterations taken before giving up suspending a channel */
  39#define D40_SUSPEND_MAX_IT 500
  40
  41/* Milliseconds */
  42#define DMA40_AUTOSUSPEND_DELAY	100
  43
  44/* Hardware requirement on LCLA alignment */
  45#define LCLA_ALIGNMENT 0x40000
  46
  47/* Max number of links per event group */
  48#define D40_LCLA_LINK_PER_EVENT_GRP 128
  49#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
  50
  51/* Max number of logical channels per physical channel */
  52#define D40_MAX_LOG_CHAN_PER_PHY 32
  53
  54/* Attempts before giving up to trying to get pages that are aligned */
  55#define MAX_LCLA_ALLOC_ATTEMPTS 256
  56
  57/* Bit markings for allocation map */
  58#define D40_ALLOC_FREE		BIT(31)
  59#define D40_ALLOC_PHY		BIT(30)
  60#define D40_ALLOC_LOG_FREE	0
  61
  62#define D40_MEMCPY_MAX_CHANS	8
  63
  64/* Reserved event lines for memcpy only. */
  65#define DB8500_DMA_MEMCPY_EV_0	51
  66#define DB8500_DMA_MEMCPY_EV_1	56
  67#define DB8500_DMA_MEMCPY_EV_2	57
  68#define DB8500_DMA_MEMCPY_EV_3	58
  69#define DB8500_DMA_MEMCPY_EV_4	59
  70#define DB8500_DMA_MEMCPY_EV_5	60
  71
  72static int dma40_memcpy_channels[] = {
  73	DB8500_DMA_MEMCPY_EV_0,
  74	DB8500_DMA_MEMCPY_EV_1,
  75	DB8500_DMA_MEMCPY_EV_2,
  76	DB8500_DMA_MEMCPY_EV_3,
  77	DB8500_DMA_MEMCPY_EV_4,
  78	DB8500_DMA_MEMCPY_EV_5,
  79};
  80
  81/* Default configuration for physcial memcpy */
  82static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
  83	.mode = STEDMA40_MODE_PHYSICAL,
  84	.dir = DMA_MEM_TO_MEM,
  85
  86	.src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
  87	.src_info.psize = STEDMA40_PSIZE_PHY_1,
  88	.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  89
  90	.dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
  91	.dst_info.psize = STEDMA40_PSIZE_PHY_1,
  92	.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  93};
  94
  95/* Default configuration for logical memcpy */
  96static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
  97	.mode = STEDMA40_MODE_LOGICAL,
  98	.dir = DMA_MEM_TO_MEM,
  99
 100	.src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 101	.src_info.psize = STEDMA40_PSIZE_LOG_1,
 102	.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
 103
 104	.dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 105	.dst_info.psize = STEDMA40_PSIZE_LOG_1,
 106	.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
 107};
 108
 109/**
 110 * enum 40_command - The different commands and/or statuses.
 111 *
 112 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
 113 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
 114 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
 115 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
 116 */
 117enum d40_command {
 118	D40_DMA_STOP		= 0,
 119	D40_DMA_RUN		= 1,
 120	D40_DMA_SUSPEND_REQ	= 2,
 121	D40_DMA_SUSPENDED	= 3
 122};
 123
 124/*
 125 * enum d40_events - The different Event Enables for the event lines.
 126 *
 127 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
 128 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
 129 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
 130 * @D40_ROUND_EVENTLINE: Status check for event line.
 131 */
 132
 133enum d40_events {
 134	D40_DEACTIVATE_EVENTLINE	= 0,
 135	D40_ACTIVATE_EVENTLINE		= 1,
 136	D40_SUSPEND_REQ_EVENTLINE	= 2,
 137	D40_ROUND_EVENTLINE		= 3
 138};
 139
 140/*
 141 * These are the registers that has to be saved and later restored
 142 * when the DMA hw is powered off.
 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
 144 */
 145static u32 d40_backup_regs[] = {
 146	D40_DREG_LCPA,
 147	D40_DREG_LCLA,
 148	D40_DREG_PRMSE,
 149	D40_DREG_PRMSO,
 150	D40_DREG_PRMOE,
 151	D40_DREG_PRMOO,
 152};
 153
 154#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
 155
 156/*
 157 * since 9540 and 8540 has the same HW revision
 158 * use v4a for 9540 or ealier
 159 * use v4b for 8540 or later
 160 * HW revision:
 161 * DB8500ed has revision 0
 162 * DB8500v1 has revision 2
 163 * DB8500v2 has revision 3
 164 * AP9540v1 has revision 4
 165 * DB8540v1 has revision 4
 166 * TODO: Check if all these registers have to be saved/restored on dma40 v4a
 167 */
 168static u32 d40_backup_regs_v4a[] = {
 169	D40_DREG_PSEG1,
 170	D40_DREG_PSEG2,
 171	D40_DREG_PSEG3,
 172	D40_DREG_PSEG4,
 173	D40_DREG_PCEG1,
 174	D40_DREG_PCEG2,
 175	D40_DREG_PCEG3,
 176	D40_DREG_PCEG4,
 177	D40_DREG_RSEG1,
 178	D40_DREG_RSEG2,
 179	D40_DREG_RSEG3,
 180	D40_DREG_RSEG4,
 181	D40_DREG_RCEG1,
 182	D40_DREG_RCEG2,
 183	D40_DREG_RCEG3,
 184	D40_DREG_RCEG4,
 185};
 186
 187#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
 188
 189static u32 d40_backup_regs_v4b[] = {
 190	D40_DREG_CPSEG1,
 191	D40_DREG_CPSEG2,
 192	D40_DREG_CPSEG3,
 193	D40_DREG_CPSEG4,
 194	D40_DREG_CPSEG5,
 195	D40_DREG_CPCEG1,
 196	D40_DREG_CPCEG2,
 197	D40_DREG_CPCEG3,
 198	D40_DREG_CPCEG4,
 199	D40_DREG_CPCEG5,
 200	D40_DREG_CRSEG1,
 201	D40_DREG_CRSEG2,
 202	D40_DREG_CRSEG3,
 203	D40_DREG_CRSEG4,
 204	D40_DREG_CRSEG5,
 205	D40_DREG_CRCEG1,
 206	D40_DREG_CRCEG2,
 207	D40_DREG_CRCEG3,
 208	D40_DREG_CRCEG4,
 209	D40_DREG_CRCEG5,
 210};
 211
 212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
 213
 214static u32 d40_backup_regs_chan[] = {
 215	D40_CHAN_REG_SSCFG,
 216	D40_CHAN_REG_SSELT,
 217	D40_CHAN_REG_SSPTR,
 218	D40_CHAN_REG_SSLNK,
 219	D40_CHAN_REG_SDCFG,
 220	D40_CHAN_REG_SDELT,
 221	D40_CHAN_REG_SDPTR,
 222	D40_CHAN_REG_SDLNK,
 223};
 224
 225#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
 226			     BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
 227
 228/**
 229 * struct d40_interrupt_lookup - lookup table for interrupt handler
 230 *
 231 * @src: Interrupt mask register.
 232 * @clr: Interrupt clear register.
 233 * @is_error: true if this is an error interrupt.
 234 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
 235 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
 236 */
 237struct d40_interrupt_lookup {
 238	u32 src;
 239	u32 clr;
 240	bool is_error;
 241	int offset;
 242};
 243
 244
 245static struct d40_interrupt_lookup il_v4a[] = {
 246	{D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
 247	{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
 248	{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
 249	{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
 250	{D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
 251	{D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
 252	{D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
 253	{D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
 254	{D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
 255	{D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
 256};
 257
 258static struct d40_interrupt_lookup il_v4b[] = {
 259	{D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false,  0},
 260	{D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
 261	{D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
 262	{D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
 263	{D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
 264	{D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true,   0},
 265	{D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true,  32},
 266	{D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true,  64},
 267	{D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true,  96},
 268	{D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true,  128},
 269	{D40_DREG_CPCTIS,  D40_DREG_CPCICR,  false, D40_PHY_CHAN},
 270	{D40_DREG_CPCEIS,  D40_DREG_CPCICR,  true,  D40_PHY_CHAN},
 271};
 272
 273/**
 274 * struct d40_reg_val - simple lookup struct
 275 *
 276 * @reg: The register.
 277 * @val: The value that belongs to the register in reg.
 278 */
 279struct d40_reg_val {
 280	unsigned int reg;
 281	unsigned int val;
 282};
 283
 284static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
 285	/* Clock every part of the DMA block from start */
 286	{ .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
 287
 288	/* Interrupts on all logical channels */
 289	{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
 290	{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
 291	{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
 292	{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
 293	{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
 294	{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
 295	{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
 296	{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
 297	{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
 298	{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
 299	{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
 300	{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
 301};
 302static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
 303	/* Clock every part of the DMA block from start */
 304	{ .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
 305
 306	/* Interrupts on all logical channels */
 307	{ .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
 308	{ .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
 309	{ .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
 310	{ .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
 311	{ .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
 312	{ .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
 313	{ .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
 314	{ .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
 315	{ .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
 316	{ .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
 317	{ .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
 318	{ .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
 319	{ .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
 320	{ .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
 321	{ .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
 322};
 323
 324/**
 325 * struct d40_lli_pool - Structure for keeping LLIs in memory
 326 *
 327 * @base: Pointer to memory area when the pre_alloc_lli's are not large
 328 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
 329 * pre_alloc_lli is used.
 330 * @dma_addr: DMA address, if mapped
 331 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
 332 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
 333 * one buffer to one buffer.
 334 */
 335struct d40_lli_pool {
 336	void	*base;
 337	int	 size;
 338	dma_addr_t	dma_addr;
 339	/* Space for dst and src, plus an extra for padding */
 340	u8	 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
 341};
 342
 343/**
 344 * struct d40_desc - A descriptor is one DMA job.
 345 *
 346 * @lli_phy: LLI settings for physical channel. Both src and dst=
 347 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
 348 * lli_len equals one.
 349 * @lli_log: Same as above but for logical channels.
 350 * @lli_pool: The pool with two entries pre-allocated.
 351 * @lli_len: Number of llis of current descriptor.
 352 * @lli_current: Number of transferred llis.
 353 * @lcla_alloc: Number of LCLA entries allocated.
 354 * @txd: DMA engine struct. Used for among other things for communication
 355 * during a transfer.
 356 * @node: List entry.
 357 * @is_in_client_list: true if the client owns this descriptor.
 358 * @cyclic: true if this is a cyclic job
 359 *
 360 * This descriptor is used for both logical and physical transfers.
 361 */
 362struct d40_desc {
 363	/* LLI physical */
 364	struct d40_phy_lli_bidir	 lli_phy;
 365	/* LLI logical */
 366	struct d40_log_lli_bidir	 lli_log;
 367
 368	struct d40_lli_pool		 lli_pool;
 369	int				 lli_len;
 370	int				 lli_current;
 371	int				 lcla_alloc;
 372
 373	struct dma_async_tx_descriptor	 txd;
 374	struct list_head		 node;
 375
 376	bool				 is_in_client_list;
 377	bool				 cyclic;
 378};
 379
 380/**
 381 * struct d40_lcla_pool - LCLA pool settings and data.
 382 *
 383 * @base: The virtual address of LCLA. 18 bit aligned.
 
 384 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
 385 * This pointer is only there for clean-up on error.
 386 * @pages: The number of pages needed for all physical channels.
 387 * Only used later for clean-up on error
 388 * @lock: Lock to protect the content in this struct.
 389 * @alloc_map: big map over which LCLA entry is own by which job.
 390 */
 391struct d40_lcla_pool {
 392	void		*base;
 393	dma_addr_t	dma_addr;
 394	void		*base_unaligned;
 395	int		 pages;
 396	spinlock_t	 lock;
 397	struct d40_desc	**alloc_map;
 398};
 399
 400/**
 401 * struct d40_phy_res - struct for handling eventlines mapped to physical
 402 * channels.
 403 *
 404 * @lock: A lock protection this entity.
 405 * @reserved: True if used by secure world or otherwise.
 406 * @num: The physical channel number of this entity.
 407 * @allocated_src: Bit mapped to show which src event line's are mapped to
 408 * this physical channel. Can also be free or physically allocated.
 409 * @allocated_dst: Same as for src but is dst.
 410 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
 411 * event line number.
 412 * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
 413 */
 414struct d40_phy_res {
 415	spinlock_t lock;
 416	bool	   reserved;
 417	int	   num;
 418	u32	   allocated_src;
 419	u32	   allocated_dst;
 420	bool	   use_soft_lli;
 421};
 422
 423struct d40_base;
 424
 425/**
 426 * struct d40_chan - Struct that describes a channel.
 427 *
 428 * @lock: A spinlock to protect this struct.
 429 * @log_num: The logical number, if any of this channel.
 430 * @pending_tx: The number of pending transfers. Used between interrupt handler
 431 * and tasklet.
 432 * @busy: Set to true when transfer is ongoing on this channel.
 433 * @phy_chan: Pointer to physical channel which this instance runs on. If this
 434 * point is NULL, then the channel is not allocated.
 435 * @chan: DMA engine handle.
 436 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
 437 * transfer and call client callback.
 438 * @client: Cliented owned descriptor list.
 439 * @pending_queue: Submitted jobs, to be issued by issue_pending()
 440 * @active: Active descriptor.
 441 * @done: Completed jobs
 442 * @queue: Queued jobs.
 443 * @prepare_queue: Prepared jobs.
 444 * @dma_cfg: The client configuration of this dma channel.
 
 445 * @configured: whether the dma_cfg configuration is valid
 446 * @base: Pointer to the device instance struct.
 447 * @src_def_cfg: Default cfg register setting for src.
 448 * @dst_def_cfg: Default cfg register setting for dst.
 449 * @log_def: Default logical channel settings.
 450 * @lcpa: Pointer to dst and src lcpa settings.
 451 * @runtime_addr: runtime configured address.
 452 * @runtime_direction: runtime configured direction.
 453 *
 454 * This struct can either "be" a logical or a physical channel.
 455 */
 456struct d40_chan {
 457	spinlock_t			 lock;
 458	int				 log_num;
 459	int				 pending_tx;
 460	bool				 busy;
 461	struct d40_phy_res		*phy_chan;
 462	struct dma_chan			 chan;
 463	struct tasklet_struct		 tasklet;
 464	struct list_head		 client;
 465	struct list_head		 pending_queue;
 466	struct list_head		 active;
 467	struct list_head		 done;
 468	struct list_head		 queue;
 469	struct list_head		 prepare_queue;
 470	struct stedma40_chan_cfg	 dma_cfg;
 
 471	bool				 configured;
 472	struct d40_base			*base;
 473	/* Default register configurations */
 474	u32				 src_def_cfg;
 475	u32				 dst_def_cfg;
 476	struct d40_def_lcsp		 log_def;
 477	struct d40_log_lli_full		*lcpa;
 478	/* Runtime reconfiguration */
 479	dma_addr_t			runtime_addr;
 480	enum dma_transfer_direction	runtime_direction;
 481};
 482
 483/**
 484 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
 485 * controller
 486 *
 487 * @backup: the pointer to the registers address array for backup
 488 * @backup_size: the size of the registers address array for backup
 489 * @realtime_en: the realtime enable register
 490 * @realtime_clear: the realtime clear register
 491 * @high_prio_en: the high priority enable register
 492 * @high_prio_clear: the high priority clear register
 493 * @interrupt_en: the interrupt enable register
 494 * @interrupt_clear: the interrupt clear register
 495 * @il: the pointer to struct d40_interrupt_lookup
 496 * @il_size: the size of d40_interrupt_lookup array
 497 * @init_reg: the pointer to the struct d40_reg_val
 498 * @init_reg_size: the size of d40_reg_val array
 499 */
 500struct d40_gen_dmac {
 501	u32				*backup;
 502	u32				 backup_size;
 503	u32				 realtime_en;
 504	u32				 realtime_clear;
 505	u32				 high_prio_en;
 506	u32				 high_prio_clear;
 507	u32				 interrupt_en;
 508	u32				 interrupt_clear;
 509	struct d40_interrupt_lookup	*il;
 510	u32				 il_size;
 511	struct d40_reg_val		*init_reg;
 512	u32				 init_reg_size;
 513};
 514
 515/**
 516 * struct d40_base - The big global struct, one for each probe'd instance.
 517 *
 518 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
 519 * @execmd_lock: Lock for execute command usage since several channels share
 520 * the same physical register.
 521 * @dev: The device structure.
 522 * @virtbase: The virtual base address of the DMA's register.
 523 * @rev: silicon revision detected.
 524 * @clk: Pointer to the DMA clock structure.
 525 * @phy_start: Physical memory start of the DMA registers.
 526 * @phy_size: Size of the DMA register map.
 527 * @irq: The IRQ number.
 528 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
 529 * transfers).
 530 * @num_phy_chans: The number of physical channels. Read from HW. This
 531 * is the number of available channels for this driver, not counting "Secure
 532 * mode" allocated physical channels.
 533 * @num_log_chans: The number of logical channels. Calculated from
 534 * num_phy_chans.
 
 535 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
 536 * @dma_slave: dma_device channels that can do only do slave transfers.
 537 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
 538 * @phy_chans: Room for all possible physical channels in system.
 539 * @log_chans: Room for all possible logical channels in system.
 540 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
 541 * to log_chans entries.
 542 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
 543 * to phy_chans entries.
 544 * @plat_data: Pointer to provided platform_data which is the driver
 545 * configuration.
 546 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
 547 * @phy_res: Vector containing all physical channels.
 548 * @lcla_pool: lcla pool settings and data.
 549 * @lcpa_base: The virtual mapped address of LCPA.
 550 * @phy_lcpa: The physical address of the LCPA.
 551 * @lcpa_size: The size of the LCPA area.
 552 * @desc_slab: cache for descriptors.
 553 * @reg_val_backup: Here the values of some hardware registers are stored
 554 * before the DMA is powered off. They are restored when the power is back on.
 555 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
 556 * later
 557 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
 
 558 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
 559 * @gen_dmac: the struct for generic registers values to represent u8500/8540
 560 * DMA controller
 561 */
 562struct d40_base {
 563	spinlock_t			 interrupt_lock;
 564	spinlock_t			 execmd_lock;
 565	struct device			 *dev;
 566	void __iomem			 *virtbase;
 567	u8				  rev:4;
 568	struct clk			 *clk;
 569	phys_addr_t			  phy_start;
 570	resource_size_t			  phy_size;
 571	int				  irq;
 572	int				  num_memcpy_chans;
 573	int				  num_phy_chans;
 574	int				  num_log_chans;
 575	struct device_dma_parameters	  dma_parms;
 576	struct dma_device		  dma_both;
 577	struct dma_device		  dma_slave;
 578	struct dma_device		  dma_memcpy;
 579	struct d40_chan			 *phy_chans;
 580	struct d40_chan			 *log_chans;
 581	struct d40_chan			**lookup_log_chans;
 582	struct d40_chan			**lookup_phy_chans;
 583	struct stedma40_platform_data	 *plat_data;
 584	struct regulator		 *lcpa_regulator;
 585	/* Physical half channels */
 586	struct d40_phy_res		 *phy_res;
 587	struct d40_lcla_pool		  lcla_pool;
 588	void				 *lcpa_base;
 589	dma_addr_t			  phy_lcpa;
 590	resource_size_t			  lcpa_size;
 591	struct kmem_cache		 *desc_slab;
 592	u32				  reg_val_backup[BACKUP_REGS_SZ];
 593	u32				  reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
 594	u32				 *reg_val_backup_chan;
 
 595	u16				  gcc_pwr_off_mask;
 596	struct d40_gen_dmac		  gen_dmac;
 597};
 598
 599static struct device *chan2dev(struct d40_chan *d40c)
 600{
 601	return &d40c->chan.dev->device;
 602}
 603
 604static bool chan_is_physical(struct d40_chan *chan)
 605{
 606	return chan->log_num == D40_PHY_CHAN;
 607}
 608
 609static bool chan_is_logical(struct d40_chan *chan)
 610{
 611	return !chan_is_physical(chan);
 612}
 613
 614static void __iomem *chan_base(struct d40_chan *chan)
 615{
 616	return chan->base->virtbase + D40_DREG_PCBASE +
 617	       chan->phy_chan->num * D40_DREG_PCDELTA;
 618}
 619
 620#define d40_err(dev, format, arg...)		\
 621	dev_err(dev, "[%s] " format, __func__, ## arg)
 622
 623#define chan_err(d40c, format, arg...)		\
 624	d40_err(chan2dev(d40c), format, ## arg)
 625
 
 
 
 
 626static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
 627			      int lli_len)
 628{
 629	bool is_log = chan_is_logical(d40c);
 630	u32 align;
 631	void *base;
 632
 633	if (is_log)
 634		align = sizeof(struct d40_log_lli);
 635	else
 636		align = sizeof(struct d40_phy_lli);
 637
 638	if (lli_len == 1) {
 639		base = d40d->lli_pool.pre_alloc_lli;
 640		d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
 641		d40d->lli_pool.base = NULL;
 642	} else {
 643		d40d->lli_pool.size = lli_len * 2 * align;
 644
 645		base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
 646		d40d->lli_pool.base = base;
 647
 648		if (d40d->lli_pool.base == NULL)
 649			return -ENOMEM;
 650	}
 651
 652	if (is_log) {
 653		d40d->lli_log.src = PTR_ALIGN(base, align);
 654		d40d->lli_log.dst = d40d->lli_log.src + lli_len;
 655
 656		d40d->lli_pool.dma_addr = 0;
 657	} else {
 658		d40d->lli_phy.src = PTR_ALIGN(base, align);
 659		d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
 660
 661		d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
 662							 d40d->lli_phy.src,
 663							 d40d->lli_pool.size,
 664							 DMA_TO_DEVICE);
 665
 666		if (dma_mapping_error(d40c->base->dev,
 667				      d40d->lli_pool.dma_addr)) {
 668			kfree(d40d->lli_pool.base);
 669			d40d->lli_pool.base = NULL;
 670			d40d->lli_pool.dma_addr = 0;
 671			return -ENOMEM;
 672		}
 673	}
 674
 675	return 0;
 676}
 677
 678static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
 679{
 680	if (d40d->lli_pool.dma_addr)
 681		dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
 682				 d40d->lli_pool.size, DMA_TO_DEVICE);
 683
 684	kfree(d40d->lli_pool.base);
 685	d40d->lli_pool.base = NULL;
 686	d40d->lli_pool.size = 0;
 687	d40d->lli_log.src = NULL;
 688	d40d->lli_log.dst = NULL;
 689	d40d->lli_phy.src = NULL;
 690	d40d->lli_phy.dst = NULL;
 691}
 692
 693static int d40_lcla_alloc_one(struct d40_chan *d40c,
 694			      struct d40_desc *d40d)
 695{
 696	unsigned long flags;
 697	int i;
 698	int ret = -EINVAL;
 699
 700	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
 701
 702	/*
 703	 * Allocate both src and dst at the same time, therefore the half
 704	 * start on 1 since 0 can't be used since zero is used as end marker.
 705	 */
 706	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
 707		int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
 708
 709		if (!d40c->base->lcla_pool.alloc_map[idx]) {
 710			d40c->base->lcla_pool.alloc_map[idx] = d40d;
 711			d40d->lcla_alloc++;
 712			ret = i;
 713			break;
 714		}
 715	}
 716
 717	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
 718
 719	return ret;
 720}
 721
 722static int d40_lcla_free_all(struct d40_chan *d40c,
 723			     struct d40_desc *d40d)
 724{
 725	unsigned long flags;
 726	int i;
 727	int ret = -EINVAL;
 728
 729	if (chan_is_physical(d40c))
 730		return 0;
 731
 732	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
 733
 734	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
 735		int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
 736
 737		if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
 738			d40c->base->lcla_pool.alloc_map[idx] = NULL;
 739			d40d->lcla_alloc--;
 740			if (d40d->lcla_alloc == 0) {
 741				ret = 0;
 742				break;
 743			}
 744		}
 745	}
 746
 747	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
 748
 749	return ret;
 750
 751}
 752
 753static void d40_desc_remove(struct d40_desc *d40d)
 754{
 755	list_del(&d40d->node);
 756}
 757
 758static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
 759{
 760	struct d40_desc *desc = NULL;
 761
 762	if (!list_empty(&d40c->client)) {
 763		struct d40_desc *d;
 764		struct d40_desc *_d;
 765
 766		list_for_each_entry_safe(d, _d, &d40c->client, node) {
 767			if (async_tx_test_ack(&d->txd)) {
 768				d40_desc_remove(d);
 769				desc = d;
 770				memset(desc, 0, sizeof(*desc));
 771				break;
 772			}
 773		}
 774	}
 775
 776	if (!desc)
 777		desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
 778
 779	if (desc)
 780		INIT_LIST_HEAD(&desc->node);
 781
 782	return desc;
 783}
 784
 785static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
 786{
 787
 788	d40_pool_lli_free(d40c, d40d);
 789	d40_lcla_free_all(d40c, d40d);
 790	kmem_cache_free(d40c->base->desc_slab, d40d);
 791}
 792
 793static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
 794{
 795	list_add_tail(&desc->node, &d40c->active);
 796}
 797
 798static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
 799{
 800	struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
 801	struct d40_phy_lli *lli_src = desc->lli_phy.src;
 802	void __iomem *base = chan_base(chan);
 803
 804	writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
 805	writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
 806	writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
 807	writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
 808
 809	writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
 810	writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
 811	writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
 812	writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
 813}
 814
 815static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
 816{
 817	list_add_tail(&desc->node, &d40c->done);
 818}
 819
 820static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
 821{
 822	struct d40_lcla_pool *pool = &chan->base->lcla_pool;
 823	struct d40_log_lli_bidir *lli = &desc->lli_log;
 824	int lli_current = desc->lli_current;
 825	int lli_len = desc->lli_len;
 826	bool cyclic = desc->cyclic;
 827	int curr_lcla = -EINVAL;
 828	int first_lcla = 0;
 829	bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
 830	bool linkback;
 831
 832	/*
 833	 * We may have partially running cyclic transfers, in case we did't get
 834	 * enough LCLA entries.
 835	 */
 836	linkback = cyclic && lli_current == 0;
 837
 838	/*
 839	 * For linkback, we need one LCLA even with only one link, because we
 840	 * can't link back to the one in LCPA space
 841	 */
 842	if (linkback || (lli_len - lli_current > 1)) {
 843		/*
 844		 * If the channel is expected to use only soft_lli don't
 845		 * allocate a lcla. This is to avoid a HW issue that exists
 846		 * in some controller during a peripheral to memory transfer
 847		 * that uses linked lists.
 848		 */
 849		if (!(chan->phy_chan->use_soft_lli &&
 850			chan->dma_cfg.dir == DMA_DEV_TO_MEM))
 851			curr_lcla = d40_lcla_alloc_one(chan, desc);
 852
 853		first_lcla = curr_lcla;
 854	}
 855
 856	/*
 857	 * For linkback, we normally load the LCPA in the loop since we need to
 858	 * link it to the second LCLA and not the first.  However, if we
 859	 * couldn't even get a first LCLA, then we have to run in LCPA and
 860	 * reload manually.
 861	 */
 862	if (!linkback || curr_lcla == -EINVAL) {
 863		unsigned int flags = 0;
 864
 865		if (curr_lcla == -EINVAL)
 866			flags |= LLI_TERM_INT;
 867
 868		d40_log_lli_lcpa_write(chan->lcpa,
 869				       &lli->dst[lli_current],
 870				       &lli->src[lli_current],
 871				       curr_lcla,
 872				       flags);
 873		lli_current++;
 874	}
 875
 876	if (curr_lcla < 0)
 877		goto set_current;
 878
 879	for (; lli_current < lli_len; lli_current++) {
 880		unsigned int lcla_offset = chan->phy_chan->num * 1024 +
 881					   8 * curr_lcla * 2;
 882		struct d40_log_lli *lcla = pool->base + lcla_offset;
 883		unsigned int flags = 0;
 884		int next_lcla;
 885
 886		if (lli_current + 1 < lli_len)
 887			next_lcla = d40_lcla_alloc_one(chan, desc);
 888		else
 889			next_lcla = linkback ? first_lcla : -EINVAL;
 890
 891		if (cyclic || next_lcla == -EINVAL)
 892			flags |= LLI_TERM_INT;
 893
 894		if (linkback && curr_lcla == first_lcla) {
 895			/* First link goes in both LCPA and LCLA */
 896			d40_log_lli_lcpa_write(chan->lcpa,
 897					       &lli->dst[lli_current],
 898					       &lli->src[lli_current],
 899					       next_lcla, flags);
 900		}
 901
 902		/*
 903		 * One unused LCLA in the cyclic case if the very first
 904		 * next_lcla fails...
 905		 */
 906		d40_log_lli_lcla_write(lcla,
 907				       &lli->dst[lli_current],
 908				       &lli->src[lli_current],
 909				       next_lcla, flags);
 910
 911		/*
 912		 * Cache maintenance is not needed if lcla is
 913		 * mapped in esram
 914		 */
 915		if (!use_esram_lcla) {
 916			dma_sync_single_range_for_device(chan->base->dev,
 917						pool->dma_addr, lcla_offset,
 918						2 * sizeof(struct d40_log_lli),
 919						DMA_TO_DEVICE);
 920		}
 921		curr_lcla = next_lcla;
 922
 923		if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
 924			lli_current++;
 925			break;
 926		}
 927	}
 928 set_current:
 929	desc->lli_current = lli_current;
 930}
 931
 932static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
 933{
 934	if (chan_is_physical(d40c)) {
 935		d40_phy_lli_load(d40c, d40d);
 936		d40d->lli_current = d40d->lli_len;
 937	} else
 938		d40_log_lli_to_lcxa(d40c, d40d);
 939}
 940
 941static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
 942{
 943	return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
 944}
 945
 946/* remove desc from current queue and add it to the pending_queue */
 947static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
 948{
 949	d40_desc_remove(desc);
 950	desc->is_in_client_list = false;
 951	list_add_tail(&desc->node, &d40c->pending_queue);
 952}
 953
 954static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
 955{
 956	return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
 957					node);
 958}
 959
 960static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
 961{
 962	return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
 963}
 964
 965static struct d40_desc *d40_first_done(struct d40_chan *d40c)
 966{
 967	return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
 968}
 969
 970static int d40_psize_2_burst_size(bool is_log, int psize)
 971{
 972	if (is_log) {
 973		if (psize == STEDMA40_PSIZE_LOG_1)
 974			return 1;
 975	} else {
 976		if (psize == STEDMA40_PSIZE_PHY_1)
 977			return 1;
 978	}
 979
 980	return 2 << psize;
 981}
 982
 983/*
 984 * The dma only supports transmitting packages up to
 985 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
 986 *
 987 * Calculate the total number of dma elements required to send the entire sg list.
 988 */
 989static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
 990{
 991	int dmalen;
 992	u32 max_w = max(data_width1, data_width2);
 993	u32 min_w = min(data_width1, data_width2);
 994	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
 995
 996	if (seg_max > STEDMA40_MAX_SEG_SIZE)
 997		seg_max -= max_w;
 998
 999	if (!IS_ALIGNED(size, max_w))
1000		return -EINVAL;
1001
1002	if (size <= seg_max)
1003		dmalen = 1;
1004	else {
1005		dmalen = size / seg_max;
1006		if (dmalen * seg_max < size)
1007			dmalen++;
1008	}
1009	return dmalen;
1010}
1011
1012static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1013			   u32 data_width1, u32 data_width2)
1014{
1015	struct scatterlist *sg;
1016	int i;
1017	int len = 0;
1018	int ret;
1019
1020	for_each_sg(sgl, sg, sg_len, i) {
1021		ret = d40_size_2_dmalen(sg_dma_len(sg),
1022					data_width1, data_width2);
1023		if (ret < 0)
1024			return ret;
1025		len += ret;
1026	}
1027	return len;
1028}
1029
1030static int __d40_execute_command_phy(struct d40_chan *d40c,
1031				     enum d40_command command)
1032{
1033	u32 status;
1034	int i;
1035	void __iomem *active_reg;
1036	int ret = 0;
1037	unsigned long flags;
1038	u32 wmask;
1039
1040	if (command == D40_DMA_STOP) {
1041		ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1042		if (ret)
1043			return ret;
1044	}
1045
1046	spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1047
1048	if (d40c->phy_chan->num % 2 == 0)
1049		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1050	else
1051		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1052
1053	if (command == D40_DMA_SUSPEND_REQ) {
1054		status = (readl(active_reg) &
1055			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1056			D40_CHAN_POS(d40c->phy_chan->num);
1057
1058		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1059			goto unlock;
1060	}
1061
1062	wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1063	writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1064	       active_reg);
1065
1066	if (command == D40_DMA_SUSPEND_REQ) {
1067
1068		for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1069			status = (readl(active_reg) &
1070				  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1071				D40_CHAN_POS(d40c->phy_chan->num);
1072
1073			cpu_relax();
1074			/*
1075			 * Reduce the number of bus accesses while
1076			 * waiting for the DMA to suspend.
1077			 */
1078			udelay(3);
1079
1080			if (status == D40_DMA_STOP ||
1081			    status == D40_DMA_SUSPENDED)
1082				break;
1083		}
1084
1085		if (i == D40_SUSPEND_MAX_IT) {
1086			chan_err(d40c,
1087				"unable to suspend the chl %d (log: %d) status %x\n",
1088				d40c->phy_chan->num, d40c->log_num,
1089				status);
1090			dump_stack();
1091			ret = -EBUSY;
1092		}
1093
1094	}
1095 unlock:
1096	spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1097	return ret;
1098}
1099
1100static void d40_term_all(struct d40_chan *d40c)
1101{
1102	struct d40_desc *d40d;
1103	struct d40_desc *_d;
1104
1105	/* Release completed descriptors */
1106	while ((d40d = d40_first_done(d40c))) {
1107		d40_desc_remove(d40d);
1108		d40_desc_free(d40c, d40d);
1109	}
1110
1111	/* Release active descriptors */
1112	while ((d40d = d40_first_active_get(d40c))) {
1113		d40_desc_remove(d40d);
1114		d40_desc_free(d40c, d40d);
1115	}
1116
1117	/* Release queued descriptors waiting for transfer */
1118	while ((d40d = d40_first_queued(d40c))) {
1119		d40_desc_remove(d40d);
1120		d40_desc_free(d40c, d40d);
1121	}
1122
1123	/* Release pending descriptors */
1124	while ((d40d = d40_first_pending(d40c))) {
1125		d40_desc_remove(d40d);
1126		d40_desc_free(d40c, d40d);
1127	}
1128
1129	/* Release client owned descriptors */
1130	if (!list_empty(&d40c->client))
1131		list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1132			d40_desc_remove(d40d);
1133			d40_desc_free(d40c, d40d);
1134		}
1135
1136	/* Release descriptors in prepare queue */
1137	if (!list_empty(&d40c->prepare_queue))
1138		list_for_each_entry_safe(d40d, _d,
1139					 &d40c->prepare_queue, node) {
1140			d40_desc_remove(d40d);
1141			d40_desc_free(d40c, d40d);
1142		}
1143
1144	d40c->pending_tx = 0;
1145}
1146
1147static void __d40_config_set_event(struct d40_chan *d40c,
1148				   enum d40_events event_type, u32 event,
1149				   int reg)
1150{
1151	void __iomem *addr = chan_base(d40c) + reg;
1152	int tries;
1153	u32 status;
1154
1155	switch (event_type) {
1156
1157	case D40_DEACTIVATE_EVENTLINE:
1158
1159		writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1160		       | ~D40_EVENTLINE_MASK(event), addr);
1161		break;
1162
1163	case D40_SUSPEND_REQ_EVENTLINE:
1164		status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1165			  D40_EVENTLINE_POS(event);
1166
1167		if (status == D40_DEACTIVATE_EVENTLINE ||
1168		    status == D40_SUSPEND_REQ_EVENTLINE)
1169			break;
1170
1171		writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1172		       | ~D40_EVENTLINE_MASK(event), addr);
1173
1174		for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1175
1176			status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1177				  D40_EVENTLINE_POS(event);
1178
1179			cpu_relax();
1180			/*
1181			 * Reduce the number of bus accesses while
1182			 * waiting for the DMA to suspend.
1183			 */
1184			udelay(3);
1185
1186			if (status == D40_DEACTIVATE_EVENTLINE)
1187				break;
1188		}
1189
1190		if (tries == D40_SUSPEND_MAX_IT) {
1191			chan_err(d40c,
1192				"unable to stop the event_line chl %d (log: %d)"
1193				"status %x\n", d40c->phy_chan->num,
1194				 d40c->log_num, status);
1195		}
1196		break;
1197
1198	case D40_ACTIVATE_EVENTLINE:
1199	/*
1200	 * The hardware sometimes doesn't register the enable when src and dst
1201	 * event lines are active on the same logical channel.  Retry to ensure
1202	 * it does.  Usually only one retry is sufficient.
1203	 */
1204		tries = 100;
1205		while (--tries) {
1206			writel((D40_ACTIVATE_EVENTLINE <<
1207				D40_EVENTLINE_POS(event)) |
1208				~D40_EVENTLINE_MASK(event), addr);
1209
1210			if (readl(addr) & D40_EVENTLINE_MASK(event))
1211				break;
1212		}
1213
1214		if (tries != 99)
1215			dev_dbg(chan2dev(d40c),
1216				"[%s] workaround enable S%cLNK (%d tries)\n",
1217				__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1218				100 - tries);
1219
1220		WARN_ON(!tries);
1221		break;
1222
1223	case D40_ROUND_EVENTLINE:
1224		BUG();
1225		break;
1226
1227	}
1228}
1229
1230static void d40_config_set_event(struct d40_chan *d40c,
1231				 enum d40_events event_type)
1232{
1233	u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1234
1235	/* Enable event line connected to device (or memcpy) */
1236	if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1237	    (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1238		__d40_config_set_event(d40c, event_type, event,
1239				       D40_CHAN_REG_SSLNK);
1240
1241	if (d40c->dma_cfg.dir !=  DMA_DEV_TO_MEM)
1242		__d40_config_set_event(d40c, event_type, event,
1243				       D40_CHAN_REG_SDLNK);
1244}
1245
1246static u32 d40_chan_has_events(struct d40_chan *d40c)
1247{
1248	void __iomem *chanbase = chan_base(d40c);
1249	u32 val;
1250
1251	val = readl(chanbase + D40_CHAN_REG_SSLNK);
1252	val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1253
1254	return val;
1255}
1256
1257static int
1258__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1259{
1260	unsigned long flags;
1261	int ret = 0;
1262	u32 active_status;
1263	void __iomem *active_reg;
1264
1265	if (d40c->phy_chan->num % 2 == 0)
1266		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1267	else
1268		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1269
1270
1271	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1272
1273	switch (command) {
1274	case D40_DMA_STOP:
1275	case D40_DMA_SUSPEND_REQ:
1276
1277		active_status = (readl(active_reg) &
1278				 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1279				 D40_CHAN_POS(d40c->phy_chan->num);
1280
1281		if (active_status == D40_DMA_RUN)
1282			d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1283		else
1284			d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1285
1286		if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1287			ret = __d40_execute_command_phy(d40c, command);
1288
1289		break;
1290
1291	case D40_DMA_RUN:
1292
1293		d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1294		ret = __d40_execute_command_phy(d40c, command);
1295		break;
1296
1297	case D40_DMA_SUSPENDED:
1298		BUG();
1299		break;
1300	}
1301
1302	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1303	return ret;
1304}
1305
1306static int d40_channel_execute_command(struct d40_chan *d40c,
1307				       enum d40_command command)
1308{
1309	if (chan_is_logical(d40c))
1310		return __d40_execute_command_log(d40c, command);
1311	else
1312		return __d40_execute_command_phy(d40c, command);
1313}
1314
1315static u32 d40_get_prmo(struct d40_chan *d40c)
1316{
1317	static const unsigned int phy_map[] = {
1318		[STEDMA40_PCHAN_BASIC_MODE]
1319			= D40_DREG_PRMO_PCHAN_BASIC,
1320		[STEDMA40_PCHAN_MODULO_MODE]
1321			= D40_DREG_PRMO_PCHAN_MODULO,
1322		[STEDMA40_PCHAN_DOUBLE_DST_MODE]
1323			= D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1324	};
1325	static const unsigned int log_map[] = {
1326		[STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1327			= D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1328		[STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1329			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1330		[STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1331			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1332	};
1333
1334	if (chan_is_physical(d40c))
1335		return phy_map[d40c->dma_cfg.mode_opt];
1336	else
1337		return log_map[d40c->dma_cfg.mode_opt];
1338}
1339
1340static void d40_config_write(struct d40_chan *d40c)
1341{
1342	u32 addr_base;
1343	u32 var;
1344
1345	/* Odd addresses are even addresses + 4 */
1346	addr_base = (d40c->phy_chan->num % 2) * 4;
1347	/* Setup channel mode to logical or physical */
1348	var = ((u32)(chan_is_logical(d40c)) + 1) <<
1349		D40_CHAN_POS(d40c->phy_chan->num);
1350	writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1351
1352	/* Setup operational mode option register */
1353	var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1354
1355	writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1356
1357	if (chan_is_logical(d40c)) {
1358		int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1359			   & D40_SREG_ELEM_LOG_LIDX_MASK;
1360		void __iomem *chanbase = chan_base(d40c);
1361
1362		/* Set default config for CFG reg */
1363		writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1364		writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1365
1366		/* Set LIDX for lcla */
1367		writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1368		writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1369
1370		/* Clear LNK which will be used by d40_chan_has_events() */
1371		writel(0, chanbase + D40_CHAN_REG_SSLNK);
1372		writel(0, chanbase + D40_CHAN_REG_SDLNK);
1373	}
1374}
1375
1376static u32 d40_residue(struct d40_chan *d40c)
1377{
1378	u32 num_elt;
1379
1380	if (chan_is_logical(d40c))
1381		num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1382			>> D40_MEM_LCSP2_ECNT_POS;
1383	else {
1384		u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1385		num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1386			  >> D40_SREG_ELEM_PHY_ECNT_POS;
1387	}
1388
1389	return num_elt * d40c->dma_cfg.dst_info.data_width;
1390}
1391
1392static bool d40_tx_is_linked(struct d40_chan *d40c)
1393{
1394	bool is_link;
1395
1396	if (chan_is_logical(d40c))
1397		is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
1398	else
1399		is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1400			  & D40_SREG_LNK_PHYS_LNK_MASK;
1401
1402	return is_link;
1403}
1404
1405static int d40_pause(struct dma_chan *chan)
1406{
1407	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1408	int res = 0;
1409	unsigned long flags;
1410
1411	if (d40c->phy_chan == NULL) {
1412		chan_err(d40c, "Channel is not allocated!\n");
1413		return -EINVAL;
1414	}
1415
1416	if (!d40c->busy)
1417		return 0;
1418
1419	spin_lock_irqsave(&d40c->lock, flags);
1420	pm_runtime_get_sync(d40c->base->dev);
1421
1422	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1423
1424	pm_runtime_mark_last_busy(d40c->base->dev);
1425	pm_runtime_put_autosuspend(d40c->base->dev);
1426	spin_unlock_irqrestore(&d40c->lock, flags);
1427	return res;
1428}
1429
1430static int d40_resume(struct dma_chan *chan)
1431{
1432	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1433	int res = 0;
1434	unsigned long flags;
1435
1436	if (d40c->phy_chan == NULL) {
1437		chan_err(d40c, "Channel is not allocated!\n");
1438		return -EINVAL;
1439	}
1440
1441	if (!d40c->busy)
1442		return 0;
1443
1444	spin_lock_irqsave(&d40c->lock, flags);
1445	pm_runtime_get_sync(d40c->base->dev);
1446
1447	/* If bytes left to transfer or linked tx resume job */
1448	if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1449		res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1450
1451	pm_runtime_mark_last_busy(d40c->base->dev);
1452	pm_runtime_put_autosuspend(d40c->base->dev);
1453	spin_unlock_irqrestore(&d40c->lock, flags);
1454	return res;
1455}
1456
1457static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1458{
1459	struct d40_chan *d40c = container_of(tx->chan,
1460					     struct d40_chan,
1461					     chan);
1462	struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1463	unsigned long flags;
1464	dma_cookie_t cookie;
1465
1466	spin_lock_irqsave(&d40c->lock, flags);
1467	cookie = dma_cookie_assign(tx);
1468	d40_desc_queue(d40c, d40d);
1469	spin_unlock_irqrestore(&d40c->lock, flags);
1470
1471	return cookie;
1472}
1473
1474static int d40_start(struct d40_chan *d40c)
1475{
1476	return d40_channel_execute_command(d40c, D40_DMA_RUN);
1477}
1478
1479static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1480{
1481	struct d40_desc *d40d;
1482	int err;
1483
1484	/* Start queued jobs, if any */
1485	d40d = d40_first_queued(d40c);
1486
1487	if (d40d != NULL) {
1488		if (!d40c->busy) {
1489			d40c->busy = true;
1490			pm_runtime_get_sync(d40c->base->dev);
1491		}
1492
1493		/* Remove from queue */
1494		d40_desc_remove(d40d);
1495
1496		/* Add to active queue */
1497		d40_desc_submit(d40c, d40d);
1498
1499		/* Initiate DMA job */
1500		d40_desc_load(d40c, d40d);
1501
1502		/* Start dma job */
1503		err = d40_start(d40c);
1504
1505		if (err)
1506			return NULL;
1507	}
1508
1509	return d40d;
1510}
1511
1512/* called from interrupt context */
1513static void dma_tc_handle(struct d40_chan *d40c)
1514{
1515	struct d40_desc *d40d;
1516
1517	/* Get first active entry from list */
1518	d40d = d40_first_active_get(d40c);
1519
1520	if (d40d == NULL)
1521		return;
1522
1523	if (d40d->cyclic) {
1524		/*
1525		 * If this was a paritially loaded list, we need to reloaded
1526		 * it, and only when the list is completed.  We need to check
1527		 * for done because the interrupt will hit for every link, and
1528		 * not just the last one.
1529		 */
1530		if (d40d->lli_current < d40d->lli_len
1531		    && !d40_tx_is_linked(d40c)
1532		    && !d40_residue(d40c)) {
1533			d40_lcla_free_all(d40c, d40d);
1534			d40_desc_load(d40c, d40d);
1535			(void) d40_start(d40c);
1536
1537			if (d40d->lli_current == d40d->lli_len)
1538				d40d->lli_current = 0;
1539		}
1540	} else {
1541		d40_lcla_free_all(d40c, d40d);
1542
1543		if (d40d->lli_current < d40d->lli_len) {
1544			d40_desc_load(d40c, d40d);
1545			/* Start dma job */
1546			(void) d40_start(d40c);
1547			return;
1548		}
1549
1550		if (d40_queue_start(d40c) == NULL) {
1551			d40c->busy = false;
1552
1553			pm_runtime_mark_last_busy(d40c->base->dev);
1554			pm_runtime_put_autosuspend(d40c->base->dev);
1555		}
1556
1557		d40_desc_remove(d40d);
1558		d40_desc_done(d40c, d40d);
1559	}
1560
1561	d40c->pending_tx++;
1562	tasklet_schedule(&d40c->tasklet);
1563
1564}
1565
1566static void dma_tasklet(unsigned long data)
1567{
1568	struct d40_chan *d40c = (struct d40_chan *) data;
1569	struct d40_desc *d40d;
1570	unsigned long flags;
1571	bool callback_active;
1572	struct dmaengine_desc_callback cb;
1573
1574	spin_lock_irqsave(&d40c->lock, flags);
1575
1576	/* Get first entry from the done list */
1577	d40d = d40_first_done(d40c);
1578	if (d40d == NULL) {
1579		/* Check if we have reached here for cyclic job */
1580		d40d = d40_first_active_get(d40c);
1581		if (d40d == NULL || !d40d->cyclic)
1582			goto check_pending_tx;
1583	}
1584
1585	if (!d40d->cyclic)
1586		dma_cookie_complete(&d40d->txd);
1587
1588	/*
1589	 * If terminating a channel pending_tx is set to zero.
1590	 * This prevents any finished active jobs to return to the client.
1591	 */
1592	if (d40c->pending_tx == 0) {
1593		spin_unlock_irqrestore(&d40c->lock, flags);
1594		return;
1595	}
1596
1597	/* Callback to client */
1598	callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1599	dmaengine_desc_get_callback(&d40d->txd, &cb);
1600
1601	if (!d40d->cyclic) {
1602		if (async_tx_test_ack(&d40d->txd)) {
1603			d40_desc_remove(d40d);
1604			d40_desc_free(d40c, d40d);
1605		} else if (!d40d->is_in_client_list) {
1606			d40_desc_remove(d40d);
1607			d40_lcla_free_all(d40c, d40d);
1608			list_add_tail(&d40d->node, &d40c->client);
1609			d40d->is_in_client_list = true;
1610		}
1611	}
1612
1613	d40c->pending_tx--;
1614
1615	if (d40c->pending_tx)
1616		tasklet_schedule(&d40c->tasklet);
1617
1618	spin_unlock_irqrestore(&d40c->lock, flags);
1619
1620	if (callback_active)
1621		dmaengine_desc_callback_invoke(&cb, NULL);
1622
1623	return;
1624 check_pending_tx:
1625	/* Rescue manouver if receiving double interrupts */
1626	if (d40c->pending_tx > 0)
1627		d40c->pending_tx--;
1628	spin_unlock_irqrestore(&d40c->lock, flags);
1629}
1630
1631static irqreturn_t d40_handle_interrupt(int irq, void *data)
1632{
1633	int i;
1634	u32 idx;
1635	u32 row;
1636	long chan = -1;
1637	struct d40_chan *d40c;
1638	unsigned long flags;
1639	struct d40_base *base = data;
1640	u32 regs[base->gen_dmac.il_size];
1641	struct d40_interrupt_lookup *il = base->gen_dmac.il;
1642	u32 il_size = base->gen_dmac.il_size;
1643
1644	spin_lock_irqsave(&base->interrupt_lock, flags);
1645
1646	/* Read interrupt status of both logical and physical channels */
1647	for (i = 0; i < il_size; i++)
1648		regs[i] = readl(base->virtbase + il[i].src);
1649
1650	for (;;) {
1651
1652		chan = find_next_bit((unsigned long *)regs,
1653				     BITS_PER_LONG * il_size, chan + 1);
1654
1655		/* No more set bits found? */
1656		if (chan == BITS_PER_LONG * il_size)
1657			break;
1658
1659		row = chan / BITS_PER_LONG;
1660		idx = chan & (BITS_PER_LONG - 1);
1661
1662		if (il[row].offset == D40_PHY_CHAN)
1663			d40c = base->lookup_phy_chans[idx];
1664		else
1665			d40c = base->lookup_log_chans[il[row].offset + idx];
1666
1667		if (!d40c) {
1668			/*
1669			 * No error because this can happen if something else
1670			 * in the system is using the channel.
1671			 */
1672			continue;
1673		}
1674
1675		/* ACK interrupt */
1676		writel(BIT(idx), base->virtbase + il[row].clr);
1677
1678		spin_lock(&d40c->lock);
1679
1680		if (!il[row].is_error)
1681			dma_tc_handle(d40c);
1682		else
1683			d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1684				chan, il[row].offset, idx);
1685
1686		spin_unlock(&d40c->lock);
1687	}
1688
1689	spin_unlock_irqrestore(&base->interrupt_lock, flags);
1690
1691	return IRQ_HANDLED;
1692}
1693
1694static int d40_validate_conf(struct d40_chan *d40c,
1695			     struct stedma40_chan_cfg *conf)
1696{
1697	int res = 0;
1698	bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1699
1700	if (!conf->dir) {
1701		chan_err(d40c, "Invalid direction.\n");
1702		res = -EINVAL;
1703	}
1704
1705	if ((is_log && conf->dev_type > d40c->base->num_log_chans)  ||
1706	    (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1707	    (conf->dev_type < 0)) {
1708		chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1709		res = -EINVAL;
1710	}
1711
1712	if (conf->dir == DMA_DEV_TO_DEV) {
1713		/*
1714		 * DMAC HW supports it. Will be added to this driver,
1715		 * in case any dma client requires it.
1716		 */
1717		chan_err(d40c, "periph to periph not supported\n");
1718		res = -EINVAL;
1719	}
1720
1721	if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1722	    conf->src_info.data_width !=
1723	    d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1724	    conf->dst_info.data_width) {
1725		/*
1726		 * The DMAC hardware only supports
1727		 * src (burst x width) == dst (burst x width)
1728		 */
1729
1730		chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1731		res = -EINVAL;
1732	}
1733
1734	return res;
1735}
1736
1737static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1738			       bool is_src, int log_event_line, bool is_log,
1739			       bool *first_user)
1740{
1741	unsigned long flags;
1742	spin_lock_irqsave(&phy->lock, flags);
1743
1744	*first_user = ((phy->allocated_src | phy->allocated_dst)
1745			== D40_ALLOC_FREE);
1746
1747	if (!is_log) {
1748		/* Physical interrupts are masked per physical full channel */
1749		if (phy->allocated_src == D40_ALLOC_FREE &&
1750		    phy->allocated_dst == D40_ALLOC_FREE) {
1751			phy->allocated_dst = D40_ALLOC_PHY;
1752			phy->allocated_src = D40_ALLOC_PHY;
1753			goto found_unlock;
1754		} else
1755			goto not_found_unlock;
1756	}
1757
1758	/* Logical channel */
1759	if (is_src) {
1760		if (phy->allocated_src == D40_ALLOC_PHY)
1761			goto not_found_unlock;
1762
1763		if (phy->allocated_src == D40_ALLOC_FREE)
1764			phy->allocated_src = D40_ALLOC_LOG_FREE;
1765
1766		if (!(phy->allocated_src & BIT(log_event_line))) {
1767			phy->allocated_src |= BIT(log_event_line);
1768			goto found_unlock;
1769		} else
1770			goto not_found_unlock;
1771	} else {
1772		if (phy->allocated_dst == D40_ALLOC_PHY)
1773			goto not_found_unlock;
1774
1775		if (phy->allocated_dst == D40_ALLOC_FREE)
1776			phy->allocated_dst = D40_ALLOC_LOG_FREE;
1777
1778		if (!(phy->allocated_dst & BIT(log_event_line))) {
1779			phy->allocated_dst |= BIT(log_event_line);
1780			goto found_unlock;
1781		}
1782	}
1783 not_found_unlock:
1784	spin_unlock_irqrestore(&phy->lock, flags);
1785	return false;
1786 found_unlock:
1787	spin_unlock_irqrestore(&phy->lock, flags);
1788	return true;
1789}
1790
1791static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1792			       int log_event_line)
1793{
1794	unsigned long flags;
1795	bool is_free = false;
1796
1797	spin_lock_irqsave(&phy->lock, flags);
1798	if (!log_event_line) {
1799		phy->allocated_dst = D40_ALLOC_FREE;
1800		phy->allocated_src = D40_ALLOC_FREE;
1801		is_free = true;
1802		goto unlock;
1803	}
1804
1805	/* Logical channel */
1806	if (is_src) {
1807		phy->allocated_src &= ~BIT(log_event_line);
1808		if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1809			phy->allocated_src = D40_ALLOC_FREE;
1810	} else {
1811		phy->allocated_dst &= ~BIT(log_event_line);
1812		if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1813			phy->allocated_dst = D40_ALLOC_FREE;
1814	}
1815
1816	is_free = ((phy->allocated_src | phy->allocated_dst) ==
1817		   D40_ALLOC_FREE);
1818 unlock:
1819	spin_unlock_irqrestore(&phy->lock, flags);
1820
1821	return is_free;
1822}
1823
1824static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1825{
1826	int dev_type = d40c->dma_cfg.dev_type;
1827	int event_group;
1828	int event_line;
1829	struct d40_phy_res *phys;
1830	int i;
1831	int j;
1832	int log_num;
1833	int num_phy_chans;
1834	bool is_src;
1835	bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1836
1837	phys = d40c->base->phy_res;
1838	num_phy_chans = d40c->base->num_phy_chans;
1839
1840	if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1841		log_num = 2 * dev_type;
1842		is_src = true;
1843	} else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1844		   d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1845		/* dst event lines are used for logical memcpy */
1846		log_num = 2 * dev_type + 1;
1847		is_src = false;
1848	} else
1849		return -EINVAL;
1850
1851	event_group = D40_TYPE_TO_GROUP(dev_type);
1852	event_line = D40_TYPE_TO_EVENT(dev_type);
1853
1854	if (!is_log) {
1855		if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1856			/* Find physical half channel */
1857			if (d40c->dma_cfg.use_fixed_channel) {
1858				i = d40c->dma_cfg.phy_channel;
1859				if (d40_alloc_mask_set(&phys[i], is_src,
1860						       0, is_log,
1861						       first_phy_user))
1862					goto found_phy;
1863			} else {
1864				for (i = 0; i < num_phy_chans; i++) {
1865					if (d40_alloc_mask_set(&phys[i], is_src,
1866						       0, is_log,
1867						       first_phy_user))
1868						goto found_phy;
1869				}
1870			}
1871		} else
1872			for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1873				int phy_num = j  + event_group * 2;
1874				for (i = phy_num; i < phy_num + 2; i++) {
1875					if (d40_alloc_mask_set(&phys[i],
1876							       is_src,
1877							       0,
1878							       is_log,
1879							       first_phy_user))
1880						goto found_phy;
1881				}
1882			}
1883		return -EINVAL;
1884found_phy:
1885		d40c->phy_chan = &phys[i];
1886		d40c->log_num = D40_PHY_CHAN;
1887		goto out;
1888	}
1889	if (dev_type == -1)
1890		return -EINVAL;
1891
1892	/* Find logical channel */
1893	for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1894		int phy_num = j + event_group * 2;
1895
1896		if (d40c->dma_cfg.use_fixed_channel) {
1897			i = d40c->dma_cfg.phy_channel;
1898
1899			if ((i != phy_num) && (i != phy_num + 1)) {
1900				dev_err(chan2dev(d40c),
1901					"invalid fixed phy channel %d\n", i);
1902				return -EINVAL;
1903			}
1904
1905			if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1906					       is_log, first_phy_user))
1907				goto found_log;
1908
1909			dev_err(chan2dev(d40c),
1910				"could not allocate fixed phy channel %d\n", i);
1911			return -EINVAL;
1912		}
1913
1914		/*
1915		 * Spread logical channels across all available physical rather
1916		 * than pack every logical channel at the first available phy
1917		 * channels.
1918		 */
1919		if (is_src) {
1920			for (i = phy_num; i < phy_num + 2; i++) {
1921				if (d40_alloc_mask_set(&phys[i], is_src,
1922						       event_line, is_log,
1923						       first_phy_user))
1924					goto found_log;
1925			}
1926		} else {
1927			for (i = phy_num + 1; i >= phy_num; i--) {
1928				if (d40_alloc_mask_set(&phys[i], is_src,
1929						       event_line, is_log,
1930						       first_phy_user))
1931					goto found_log;
1932			}
1933		}
1934	}
1935	return -EINVAL;
1936
1937found_log:
1938	d40c->phy_chan = &phys[i];
1939	d40c->log_num = log_num;
1940out:
1941
1942	if (is_log)
1943		d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1944	else
1945		d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1946
1947	return 0;
1948
1949}
1950
1951static int d40_config_memcpy(struct d40_chan *d40c)
1952{
1953	dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1954
1955	if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1956		d40c->dma_cfg = dma40_memcpy_conf_log;
1957		d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1958
1959		d40_log_cfg(&d40c->dma_cfg,
1960			    &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1961
1962	} else if (dma_has_cap(DMA_MEMCPY, cap) &&
1963		   dma_has_cap(DMA_SLAVE, cap)) {
1964		d40c->dma_cfg = dma40_memcpy_conf_phy;
1965
1966		/* Generate interrrupt at end of transfer or relink. */
1967		d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1968
1969		/* Generate interrupt on error. */
1970		d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1971		d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1972
1973	} else {
1974		chan_err(d40c, "No memcpy\n");
1975		return -EINVAL;
1976	}
1977
1978	return 0;
1979}
1980
1981static int d40_free_dma(struct d40_chan *d40c)
1982{
1983
1984	int res = 0;
1985	u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1986	struct d40_phy_res *phy = d40c->phy_chan;
1987	bool is_src;
1988
1989	/* Terminate all queued and active transfers */
1990	d40_term_all(d40c);
1991
1992	if (phy == NULL) {
1993		chan_err(d40c, "phy == null\n");
1994		return -EINVAL;
1995	}
1996
1997	if (phy->allocated_src == D40_ALLOC_FREE &&
1998	    phy->allocated_dst == D40_ALLOC_FREE) {
1999		chan_err(d40c, "channel already free\n");
2000		return -EINVAL;
2001	}
2002
2003	if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2004	    d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2005		is_src = false;
2006	else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2007		is_src = true;
2008	else {
2009		chan_err(d40c, "Unknown direction\n");
2010		return -EINVAL;
2011	}
2012
2013	pm_runtime_get_sync(d40c->base->dev);
2014	res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2015	if (res) {
2016		chan_err(d40c, "stop failed\n");
2017		goto mark_last_busy;
2018	}
2019
2020	d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2021
2022	if (chan_is_logical(d40c))
2023		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2024	else
2025		d40c->base->lookup_phy_chans[phy->num] = NULL;
2026
2027	if (d40c->busy) {
2028		pm_runtime_mark_last_busy(d40c->base->dev);
2029		pm_runtime_put_autosuspend(d40c->base->dev);
2030	}
2031
2032	d40c->busy = false;
2033	d40c->phy_chan = NULL;
2034	d40c->configured = false;
2035 mark_last_busy:
2036	pm_runtime_mark_last_busy(d40c->base->dev);
2037	pm_runtime_put_autosuspend(d40c->base->dev);
2038	return res;
2039}
2040
2041static bool d40_is_paused(struct d40_chan *d40c)
2042{
2043	void __iomem *chanbase = chan_base(d40c);
2044	bool is_paused = false;
2045	unsigned long flags;
2046	void __iomem *active_reg;
2047	u32 status;
2048	u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2049
2050	spin_lock_irqsave(&d40c->lock, flags);
2051
2052	if (chan_is_physical(d40c)) {
2053		if (d40c->phy_chan->num % 2 == 0)
2054			active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2055		else
2056			active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2057
2058		status = (readl(active_reg) &
2059			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2060			D40_CHAN_POS(d40c->phy_chan->num);
2061		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2062			is_paused = true;
2063		goto unlock;
2064	}
2065
2066	if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2067	    d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2068		status = readl(chanbase + D40_CHAN_REG_SDLNK);
2069	} else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2070		status = readl(chanbase + D40_CHAN_REG_SSLNK);
2071	} else {
2072		chan_err(d40c, "Unknown direction\n");
2073		goto unlock;
2074	}
2075
2076	status = (status & D40_EVENTLINE_MASK(event)) >>
2077		D40_EVENTLINE_POS(event);
2078
2079	if (status != D40_DMA_RUN)
2080		is_paused = true;
2081 unlock:
2082	spin_unlock_irqrestore(&d40c->lock, flags);
2083	return is_paused;
2084
2085}
2086
2087static u32 stedma40_residue(struct dma_chan *chan)
2088{
2089	struct d40_chan *d40c =
2090		container_of(chan, struct d40_chan, chan);
2091	u32 bytes_left;
2092	unsigned long flags;
2093
2094	spin_lock_irqsave(&d40c->lock, flags);
2095	bytes_left = d40_residue(d40c);
2096	spin_unlock_irqrestore(&d40c->lock, flags);
2097
2098	return bytes_left;
2099}
2100
2101static int
2102d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2103		struct scatterlist *sg_src, struct scatterlist *sg_dst,
2104		unsigned int sg_len, dma_addr_t src_dev_addr,
2105		dma_addr_t dst_dev_addr)
2106{
2107	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2108	struct stedma40_half_channel_info *src_info = &cfg->src_info;
2109	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2110	int ret;
2111
2112	ret = d40_log_sg_to_lli(sg_src, sg_len,
2113				src_dev_addr,
2114				desc->lli_log.src,
2115				chan->log_def.lcsp1,
2116				src_info->data_width,
2117				dst_info->data_width);
2118
2119	ret = d40_log_sg_to_lli(sg_dst, sg_len,
2120				dst_dev_addr,
2121				desc->lli_log.dst,
2122				chan->log_def.lcsp3,
2123				dst_info->data_width,
2124				src_info->data_width);
2125
2126	return ret < 0 ? ret : 0;
2127}
2128
2129static int
2130d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2131		struct scatterlist *sg_src, struct scatterlist *sg_dst,
2132		unsigned int sg_len, dma_addr_t src_dev_addr,
2133		dma_addr_t dst_dev_addr)
2134{
2135	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2136	struct stedma40_half_channel_info *src_info = &cfg->src_info;
2137	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2138	unsigned long flags = 0;
2139	int ret;
2140
2141	if (desc->cyclic)
2142		flags |= LLI_CYCLIC | LLI_TERM_INT;
2143
2144	ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2145				desc->lli_phy.src,
2146				virt_to_phys(desc->lli_phy.src),
2147				chan->src_def_cfg,
2148				src_info, dst_info, flags);
2149
2150	ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2151				desc->lli_phy.dst,
2152				virt_to_phys(desc->lli_phy.dst),
2153				chan->dst_def_cfg,
2154				dst_info, src_info, flags);
2155
2156	dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2157				   desc->lli_pool.size, DMA_TO_DEVICE);
2158
2159	return ret < 0 ? ret : 0;
2160}
2161
2162static struct d40_desc *
2163d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2164	      unsigned int sg_len, unsigned long dma_flags)
2165{
2166	struct stedma40_chan_cfg *cfg;
2167	struct d40_desc *desc;
2168	int ret;
2169
2170	desc = d40_desc_get(chan);
2171	if (!desc)
2172		return NULL;
2173
2174	cfg = &chan->dma_cfg;
2175	desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2176					cfg->dst_info.data_width);
2177	if (desc->lli_len < 0) {
2178		chan_err(chan, "Unaligned size\n");
2179		goto free_desc;
2180	}
2181
2182	ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2183	if (ret < 0) {
2184		chan_err(chan, "Could not allocate lli\n");
2185		goto free_desc;
2186	}
2187
2188	desc->lli_current = 0;
2189	desc->txd.flags = dma_flags;
2190	desc->txd.tx_submit = d40_tx_submit;
2191
2192	dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2193
2194	return desc;
2195 free_desc:
2196	d40_desc_free(chan, desc);
2197	return NULL;
2198}
2199
2200static struct dma_async_tx_descriptor *
2201d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2202	    struct scatterlist *sg_dst, unsigned int sg_len,
2203	    enum dma_transfer_direction direction, unsigned long dma_flags)
2204{
2205	struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2206	dma_addr_t src_dev_addr;
2207	dma_addr_t dst_dev_addr;
2208	struct d40_desc *desc;
2209	unsigned long flags;
2210	int ret;
2211
2212	if (!chan->phy_chan) {
2213		chan_err(chan, "Cannot prepare unallocated channel\n");
2214		return NULL;
2215	}
2216
 
 
2217	spin_lock_irqsave(&chan->lock, flags);
2218
2219	desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2220	if (desc == NULL)
2221		goto unlock;
2222
2223	if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2224		desc->cyclic = true;
2225
2226	src_dev_addr = 0;
2227	dst_dev_addr = 0;
2228	if (direction == DMA_DEV_TO_MEM)
2229		src_dev_addr = chan->runtime_addr;
2230	else if (direction == DMA_MEM_TO_DEV)
2231		dst_dev_addr = chan->runtime_addr;
2232
2233	if (chan_is_logical(chan))
2234		ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2235				      sg_len, src_dev_addr, dst_dev_addr);
2236	else
2237		ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2238				      sg_len, src_dev_addr, dst_dev_addr);
2239
2240	if (ret) {
2241		chan_err(chan, "Failed to prepare %s sg job: %d\n",
2242			 chan_is_logical(chan) ? "log" : "phy", ret);
2243		goto free_desc;
2244	}
2245
2246	/*
2247	 * add descriptor to the prepare queue in order to be able
2248	 * to free them later in terminate_all
2249	 */
2250	list_add_tail(&desc->node, &chan->prepare_queue);
2251
2252	spin_unlock_irqrestore(&chan->lock, flags);
2253
2254	return &desc->txd;
2255 free_desc:
2256	d40_desc_free(chan, desc);
2257 unlock:
2258	spin_unlock_irqrestore(&chan->lock, flags);
2259	return NULL;
2260}
2261
2262bool stedma40_filter(struct dma_chan *chan, void *data)
2263{
2264	struct stedma40_chan_cfg *info = data;
2265	struct d40_chan *d40c =
2266		container_of(chan, struct d40_chan, chan);
2267	int err;
2268
2269	if (data) {
2270		err = d40_validate_conf(d40c, info);
2271		if (!err)
2272			d40c->dma_cfg = *info;
2273	} else
2274		err = d40_config_memcpy(d40c);
2275
2276	if (!err)
2277		d40c->configured = true;
2278
2279	return err == 0;
2280}
2281EXPORT_SYMBOL(stedma40_filter);
2282
2283static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2284{
2285	bool realtime = d40c->dma_cfg.realtime;
2286	bool highprio = d40c->dma_cfg.high_priority;
2287	u32 rtreg;
2288	u32 event = D40_TYPE_TO_EVENT(dev_type);
2289	u32 group = D40_TYPE_TO_GROUP(dev_type);
2290	u32 bit = BIT(event);
2291	u32 prioreg;
2292	struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2293
2294	rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2295	/*
2296	 * Due to a hardware bug, in some cases a logical channel triggered by
2297	 * a high priority destination event line can generate extra packet
2298	 * transactions.
2299	 *
2300	 * The workaround is to not set the high priority level for the
2301	 * destination event lines that trigger logical channels.
2302	 */
2303	if (!src && chan_is_logical(d40c))
2304		highprio = false;
2305
2306	prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2307
2308	/* Destination event lines are stored in the upper halfword */
2309	if (!src)
2310		bit <<= 16;
2311
2312	writel(bit, d40c->base->virtbase + prioreg + group * 4);
2313	writel(bit, d40c->base->virtbase + rtreg + group * 4);
2314}
2315
2316static void d40_set_prio_realtime(struct d40_chan *d40c)
2317{
2318	if (d40c->base->rev < 3)
2319		return;
2320
2321	if ((d40c->dma_cfg.dir ==  DMA_DEV_TO_MEM) ||
2322	    (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2323		__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2324
2325	if ((d40c->dma_cfg.dir ==  DMA_MEM_TO_DEV) ||
2326	    (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2327		__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2328}
2329
2330#define D40_DT_FLAGS_MODE(flags)       ((flags >> 0) & 0x1)
2331#define D40_DT_FLAGS_DIR(flags)        ((flags >> 1) & 0x1)
2332#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2333#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2334#define D40_DT_FLAGS_HIGH_PRIO(flags)  ((flags >> 4) & 0x1)
2335
2336static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2337				  struct of_dma *ofdma)
2338{
2339	struct stedma40_chan_cfg cfg;
2340	dma_cap_mask_t cap;
2341	u32 flags;
2342
2343	memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2344
2345	dma_cap_zero(cap);
2346	dma_cap_set(DMA_SLAVE, cap);
2347
2348	cfg.dev_type = dma_spec->args[0];
2349	flags = dma_spec->args[2];
2350
2351	switch (D40_DT_FLAGS_MODE(flags)) {
2352	case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2353	case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2354	}
2355
2356	switch (D40_DT_FLAGS_DIR(flags)) {
2357	case 0:
2358		cfg.dir = DMA_MEM_TO_DEV;
2359		cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2360		break;
2361	case 1:
2362		cfg.dir = DMA_DEV_TO_MEM;
2363		cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2364		break;
2365	}
2366
2367	if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2368		cfg.phy_channel = dma_spec->args[1];
2369		cfg.use_fixed_channel = true;
2370	}
2371
2372	if (D40_DT_FLAGS_HIGH_PRIO(flags))
2373		cfg.high_priority = true;
2374
2375	return dma_request_channel(cap, stedma40_filter, &cfg);
2376}
2377
2378/* DMA ENGINE functions */
2379static int d40_alloc_chan_resources(struct dma_chan *chan)
2380{
2381	int err;
2382	unsigned long flags;
2383	struct d40_chan *d40c =
2384		container_of(chan, struct d40_chan, chan);
2385	bool is_free_phy;
2386	spin_lock_irqsave(&d40c->lock, flags);
2387
2388	dma_cookie_init(chan);
2389
2390	/* If no dma configuration is set use default configuration (memcpy) */
2391	if (!d40c->configured) {
2392		err = d40_config_memcpy(d40c);
2393		if (err) {
2394			chan_err(d40c, "Failed to configure memcpy channel\n");
2395			goto mark_last_busy;
2396		}
2397	}
2398
2399	err = d40_allocate_channel(d40c, &is_free_phy);
2400	if (err) {
2401		chan_err(d40c, "Failed to allocate channel\n");
2402		d40c->configured = false;
2403		goto mark_last_busy;
2404	}
2405
2406	pm_runtime_get_sync(d40c->base->dev);
2407
2408	d40_set_prio_realtime(d40c);
2409
2410	if (chan_is_logical(d40c)) {
2411		if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2412			d40c->lcpa = d40c->base->lcpa_base +
2413				d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2414		else
2415			d40c->lcpa = d40c->base->lcpa_base +
2416				d40c->dma_cfg.dev_type *
2417				D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2418
2419		/* Unmask the Global Interrupt Mask. */
2420		d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2421		d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2422	}
2423
2424	dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2425		 chan_is_logical(d40c) ? "logical" : "physical",
2426		 d40c->phy_chan->num,
2427		 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2428
2429
2430	/*
2431	 * Only write channel configuration to the DMA if the physical
2432	 * resource is free. In case of multiple logical channels
2433	 * on the same physical resource, only the first write is necessary.
2434	 */
2435	if (is_free_phy)
2436		d40_config_write(d40c);
2437 mark_last_busy:
2438	pm_runtime_mark_last_busy(d40c->base->dev);
2439	pm_runtime_put_autosuspend(d40c->base->dev);
2440	spin_unlock_irqrestore(&d40c->lock, flags);
2441	return err;
2442}
2443
2444static void d40_free_chan_resources(struct dma_chan *chan)
2445{
2446	struct d40_chan *d40c =
2447		container_of(chan, struct d40_chan, chan);
2448	int err;
2449	unsigned long flags;
2450
2451	if (d40c->phy_chan == NULL) {
2452		chan_err(d40c, "Cannot free unallocated channel\n");
2453		return;
2454	}
2455
2456	spin_lock_irqsave(&d40c->lock, flags);
2457
2458	err = d40_free_dma(d40c);
2459
2460	if (err)
2461		chan_err(d40c, "Failed to free channel\n");
2462	spin_unlock_irqrestore(&d40c->lock, flags);
2463}
2464
2465static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2466						       dma_addr_t dst,
2467						       dma_addr_t src,
2468						       size_t size,
2469						       unsigned long dma_flags)
2470{
2471	struct scatterlist dst_sg;
2472	struct scatterlist src_sg;
2473
2474	sg_init_table(&dst_sg, 1);
2475	sg_init_table(&src_sg, 1);
2476
2477	sg_dma_address(&dst_sg) = dst;
2478	sg_dma_address(&src_sg) = src;
2479
2480	sg_dma_len(&dst_sg) = size;
2481	sg_dma_len(&src_sg) = size;
2482
2483	return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2484			   DMA_MEM_TO_MEM, dma_flags);
2485}
2486
2487static struct dma_async_tx_descriptor *
2488d40_prep_memcpy_sg(struct dma_chan *chan,
2489		   struct scatterlist *dst_sg, unsigned int dst_nents,
2490		   struct scatterlist *src_sg, unsigned int src_nents,
2491		   unsigned long dma_flags)
2492{
2493	if (dst_nents != src_nents)
2494		return NULL;
2495
2496	return d40_prep_sg(chan, src_sg, dst_sg, src_nents,
2497			   DMA_MEM_TO_MEM, dma_flags);
2498}
2499
2500static struct dma_async_tx_descriptor *
2501d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2502		  unsigned int sg_len, enum dma_transfer_direction direction,
2503		  unsigned long dma_flags, void *context)
2504{
2505	if (!is_slave_direction(direction))
2506		return NULL;
2507
2508	return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2509}
2510
2511static struct dma_async_tx_descriptor *
2512dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2513		     size_t buf_len, size_t period_len,
2514		     enum dma_transfer_direction direction, unsigned long flags)
2515{
2516	unsigned int periods = buf_len / period_len;
2517	struct dma_async_tx_descriptor *txd;
2518	struct scatterlist *sg;
2519	int i;
2520
2521	sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2522	if (!sg)
2523		return NULL;
2524
2525	for (i = 0; i < periods; i++) {
2526		sg_dma_address(&sg[i]) = dma_addr;
2527		sg_dma_len(&sg[i]) = period_len;
2528		dma_addr += period_len;
2529	}
2530
2531	sg[periods].offset = 0;
2532	sg_dma_len(&sg[periods]) = 0;
2533	sg[periods].page_link =
2534		((unsigned long)sg | 0x01) & ~0x02;
2535
2536	txd = d40_prep_sg(chan, sg, sg, periods, direction,
2537			  DMA_PREP_INTERRUPT);
2538
2539	kfree(sg);
2540
2541	return txd;
2542}
2543
2544static enum dma_status d40_tx_status(struct dma_chan *chan,
2545				     dma_cookie_t cookie,
2546				     struct dma_tx_state *txstate)
2547{
2548	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2549	enum dma_status ret;
2550
2551	if (d40c->phy_chan == NULL) {
2552		chan_err(d40c, "Cannot read status of unallocated channel\n");
2553		return -EINVAL;
2554	}
2555
2556	ret = dma_cookie_status(chan, cookie, txstate);
2557	if (ret != DMA_COMPLETE && txstate)
2558		dma_set_residue(txstate, stedma40_residue(chan));
2559
2560	if (d40_is_paused(d40c))
2561		ret = DMA_PAUSED;
2562
2563	return ret;
2564}
2565
2566static void d40_issue_pending(struct dma_chan *chan)
2567{
2568	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2569	unsigned long flags;
2570
2571	if (d40c->phy_chan == NULL) {
2572		chan_err(d40c, "Channel is not allocated!\n");
2573		return;
2574	}
2575
2576	spin_lock_irqsave(&d40c->lock, flags);
2577
2578	list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2579
2580	/* Busy means that queued jobs are already being processed */
2581	if (!d40c->busy)
2582		(void) d40_queue_start(d40c);
2583
2584	spin_unlock_irqrestore(&d40c->lock, flags);
2585}
2586
2587static int d40_terminate_all(struct dma_chan *chan)
2588{
2589	unsigned long flags;
2590	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2591	int ret;
2592
2593	if (d40c->phy_chan == NULL) {
2594		chan_err(d40c, "Channel is not allocated!\n");
2595		return -EINVAL;
2596	}
2597
2598	spin_lock_irqsave(&d40c->lock, flags);
2599
2600	pm_runtime_get_sync(d40c->base->dev);
2601	ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2602	if (ret)
2603		chan_err(d40c, "Failed to stop channel\n");
2604
2605	d40_term_all(d40c);
2606	pm_runtime_mark_last_busy(d40c->base->dev);
2607	pm_runtime_put_autosuspend(d40c->base->dev);
2608	if (d40c->busy) {
2609		pm_runtime_mark_last_busy(d40c->base->dev);
2610		pm_runtime_put_autosuspend(d40c->base->dev);
2611	}
2612	d40c->busy = false;
2613
2614	spin_unlock_irqrestore(&d40c->lock, flags);
2615	return 0;
2616}
2617
2618static int
2619dma40_config_to_halfchannel(struct d40_chan *d40c,
2620			    struct stedma40_half_channel_info *info,
2621			    u32 maxburst)
2622{
2623	int psize;
2624
2625	if (chan_is_logical(d40c)) {
2626		if (maxburst >= 16)
2627			psize = STEDMA40_PSIZE_LOG_16;
2628		else if (maxburst >= 8)
2629			psize = STEDMA40_PSIZE_LOG_8;
2630		else if (maxburst >= 4)
2631			psize = STEDMA40_PSIZE_LOG_4;
2632		else
2633			psize = STEDMA40_PSIZE_LOG_1;
2634	} else {
2635		if (maxburst >= 16)
2636			psize = STEDMA40_PSIZE_PHY_16;
2637		else if (maxburst >= 8)
2638			psize = STEDMA40_PSIZE_PHY_8;
2639		else if (maxburst >= 4)
2640			psize = STEDMA40_PSIZE_PHY_4;
2641		else
2642			psize = STEDMA40_PSIZE_PHY_1;
2643	}
2644
2645	info->psize = psize;
2646	info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2647
2648	return 0;
2649}
2650
2651/* Runtime reconfiguration extension */
2652static int d40_set_runtime_config(struct dma_chan *chan,
2653				  struct dma_slave_config *config)
2654{
2655	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
 
 
 
 
 
 
 
 
 
 
 
 
2656	struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2657	enum dma_slave_buswidth src_addr_width, dst_addr_width;
2658	dma_addr_t config_addr;
2659	u32 src_maxburst, dst_maxburst;
2660	int ret;
2661
2662	if (d40c->phy_chan == NULL) {
2663		chan_err(d40c, "Channel is not allocated!\n");
2664		return -EINVAL;
2665	}
2666
2667	src_addr_width = config->src_addr_width;
2668	src_maxburst = config->src_maxburst;
2669	dst_addr_width = config->dst_addr_width;
2670	dst_maxburst = config->dst_maxburst;
2671
2672	if (config->direction == DMA_DEV_TO_MEM) {
2673		config_addr = config->src_addr;
2674
2675		if (cfg->dir != DMA_DEV_TO_MEM)
2676			dev_dbg(d40c->base->dev,
2677				"channel was not configured for peripheral "
2678				"to memory transfer (%d) overriding\n",
2679				cfg->dir);
2680		cfg->dir = DMA_DEV_TO_MEM;
2681
2682		/* Configure the memory side */
2683		if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2684			dst_addr_width = src_addr_width;
2685		if (dst_maxburst == 0)
2686			dst_maxburst = src_maxburst;
2687
2688	} else if (config->direction == DMA_MEM_TO_DEV) {
2689		config_addr = config->dst_addr;
2690
2691		if (cfg->dir != DMA_MEM_TO_DEV)
2692			dev_dbg(d40c->base->dev,
2693				"channel was not configured for memory "
2694				"to peripheral transfer (%d) overriding\n",
2695				cfg->dir);
2696		cfg->dir = DMA_MEM_TO_DEV;
2697
2698		/* Configure the memory side */
2699		if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2700			src_addr_width = dst_addr_width;
2701		if (src_maxburst == 0)
2702			src_maxburst = dst_maxburst;
2703	} else {
2704		dev_err(d40c->base->dev,
2705			"unrecognized channel direction %d\n",
2706			config->direction);
2707		return -EINVAL;
2708	}
2709
2710	if (config_addr <= 0) {
2711		dev_err(d40c->base->dev, "no address supplied\n");
2712		return -EINVAL;
2713	}
2714
2715	if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2716		dev_err(d40c->base->dev,
2717			"src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2718			src_maxburst,
2719			src_addr_width,
2720			dst_maxburst,
2721			dst_addr_width);
2722		return -EINVAL;
2723	}
2724
2725	if (src_maxburst > 16) {
2726		src_maxburst = 16;
2727		dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2728	} else if (dst_maxburst > 16) {
2729		dst_maxburst = 16;
2730		src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2731	}
2732
2733	/* Only valid widths are; 1, 2, 4 and 8. */
2734	if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2735	    src_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2736	    dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2737	    dst_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2738	    !is_power_of_2(src_addr_width) ||
2739	    !is_power_of_2(dst_addr_width))
2740		return -EINVAL;
2741
2742	cfg->src_info.data_width = src_addr_width;
2743	cfg->dst_info.data_width = dst_addr_width;
2744
2745	ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2746					  src_maxburst);
2747	if (ret)
2748		return ret;
2749
2750	ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2751					  dst_maxburst);
2752	if (ret)
2753		return ret;
2754
2755	/* Fill in register values */
2756	if (chan_is_logical(d40c))
2757		d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2758	else
2759		d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2760
2761	/* These settings will take precedence later */
2762	d40c->runtime_addr = config_addr;
2763	d40c->runtime_direction = config->direction;
2764	dev_dbg(d40c->base->dev,
2765		"configured channel %s for %s, data width %d/%d, "
2766		"maxburst %d/%d elements, LE, no flow control\n",
2767		dma_chan_name(chan),
2768		(config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2769		src_addr_width, dst_addr_width,
2770		src_maxburst, dst_maxburst);
2771
2772	return 0;
2773}
2774
2775/* Initialization functions */
2776
2777static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2778				 struct d40_chan *chans, int offset,
2779				 int num_chans)
2780{
2781	int i = 0;
2782	struct d40_chan *d40c;
2783
2784	INIT_LIST_HEAD(&dma->channels);
2785
2786	for (i = offset; i < offset + num_chans; i++) {
2787		d40c = &chans[i];
2788		d40c->base = base;
2789		d40c->chan.device = dma;
2790
2791		spin_lock_init(&d40c->lock);
2792
2793		d40c->log_num = D40_PHY_CHAN;
2794
2795		INIT_LIST_HEAD(&d40c->done);
2796		INIT_LIST_HEAD(&d40c->active);
2797		INIT_LIST_HEAD(&d40c->queue);
2798		INIT_LIST_HEAD(&d40c->pending_queue);
2799		INIT_LIST_HEAD(&d40c->client);
2800		INIT_LIST_HEAD(&d40c->prepare_queue);
2801
2802		tasklet_init(&d40c->tasklet, dma_tasklet,
2803			     (unsigned long) d40c);
2804
2805		list_add_tail(&d40c->chan.device_node,
2806			      &dma->channels);
2807	}
2808}
2809
2810static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2811{
2812	if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2813		dev->device_prep_slave_sg = d40_prep_slave_sg;
 
 
2814
2815	if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2816		dev->device_prep_dma_memcpy = d40_prep_memcpy;
2817
2818		/*
2819		 * This controller can only access address at even
2820		 * 32bit boundaries, i.e. 2^2
2821		 */
2822		dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2823	}
2824
2825	if (dma_has_cap(DMA_SG, dev->cap_mask))
2826		dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2827
2828	if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2829		dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2830
2831	dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2832	dev->device_free_chan_resources = d40_free_chan_resources;
2833	dev->device_issue_pending = d40_issue_pending;
2834	dev->device_tx_status = d40_tx_status;
2835	dev->device_config = d40_set_runtime_config;
2836	dev->device_pause = d40_pause;
2837	dev->device_resume = d40_resume;
2838	dev->device_terminate_all = d40_terminate_all;
 
2839	dev->dev = base->dev;
2840}
2841
2842static int __init d40_dmaengine_init(struct d40_base *base,
2843				     int num_reserved_chans)
2844{
2845	int err ;
2846
2847	d40_chan_init(base, &base->dma_slave, base->log_chans,
2848		      0, base->num_log_chans);
2849
2850	dma_cap_zero(base->dma_slave.cap_mask);
2851	dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2852	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2853
2854	d40_ops_init(base, &base->dma_slave);
2855
2856	err = dma_async_device_register(&base->dma_slave);
2857
2858	if (err) {
2859		d40_err(base->dev, "Failed to register slave channels\n");
2860		goto exit;
2861	}
2862
2863	d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2864		      base->num_log_chans, base->num_memcpy_chans);
2865
2866	dma_cap_zero(base->dma_memcpy.cap_mask);
2867	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2868	dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2869
2870	d40_ops_init(base, &base->dma_memcpy);
2871
2872	err = dma_async_device_register(&base->dma_memcpy);
2873
2874	if (err) {
2875		d40_err(base->dev,
2876			"Failed to register memcpy only channels\n");
2877		goto unregister_slave;
2878	}
2879
2880	d40_chan_init(base, &base->dma_both, base->phy_chans,
2881		      0, num_reserved_chans);
2882
2883	dma_cap_zero(base->dma_both.cap_mask);
2884	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2885	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2886	dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2887	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2888
2889	d40_ops_init(base, &base->dma_both);
2890	err = dma_async_device_register(&base->dma_both);
2891
2892	if (err) {
2893		d40_err(base->dev,
2894			"Failed to register logical and physical capable channels\n");
2895		goto unregister_memcpy;
2896	}
2897	return 0;
2898 unregister_memcpy:
2899	dma_async_device_unregister(&base->dma_memcpy);
2900 unregister_slave:
2901	dma_async_device_unregister(&base->dma_slave);
2902 exit:
2903	return err;
2904}
2905
2906/* Suspend resume functionality */
2907#ifdef CONFIG_PM_SLEEP
2908static int dma40_suspend(struct device *dev)
2909{
2910	struct platform_device *pdev = to_platform_device(dev);
2911	struct d40_base *base = platform_get_drvdata(pdev);
2912	int ret;
2913
2914	ret = pm_runtime_force_suspend(dev);
2915	if (ret)
2916		return ret;
2917
2918	if (base->lcpa_regulator)
2919		ret = regulator_disable(base->lcpa_regulator);
2920	return ret;
2921}
2922
2923static int dma40_resume(struct device *dev)
2924{
2925	struct platform_device *pdev = to_platform_device(dev);
2926	struct d40_base *base = platform_get_drvdata(pdev);
2927	int ret = 0;
2928
2929	if (base->lcpa_regulator) {
2930		ret = regulator_enable(base->lcpa_regulator);
2931		if (ret)
2932			return ret;
2933	}
2934
2935	return pm_runtime_force_resume(dev);
2936}
2937#endif
2938
2939#ifdef CONFIG_PM
2940static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2941			 u32 *regaddr, int num, bool save)
2942{
2943	int i;
2944
2945	for (i = 0; i < num; i++) {
2946		void __iomem *addr = baseaddr + regaddr[i];
2947
2948		if (save)
2949			backup[i] = readl_relaxed(addr);
2950		else
2951			writel_relaxed(backup[i], addr);
2952	}
2953}
2954
2955static void d40_save_restore_registers(struct d40_base *base, bool save)
2956{
2957	int i;
2958
2959	/* Save/Restore channel specific registers */
2960	for (i = 0; i < base->num_phy_chans; i++) {
2961		void __iomem *addr;
2962		int idx;
2963
2964		if (base->phy_res[i].reserved)
2965			continue;
2966
2967		addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2968		idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2969
2970		dma40_backup(addr, &base->reg_val_backup_chan[idx],
2971			     d40_backup_regs_chan,
2972			     ARRAY_SIZE(d40_backup_regs_chan),
2973			     save);
2974	}
2975
2976	/* Save/Restore global registers */
2977	dma40_backup(base->virtbase, base->reg_val_backup,
2978		     d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2979		     save);
2980
2981	/* Save/Restore registers only existing on dma40 v3 and later */
2982	if (base->gen_dmac.backup)
2983		dma40_backup(base->virtbase, base->reg_val_backup_v4,
2984			     base->gen_dmac.backup,
2985			base->gen_dmac.backup_size,
2986			save);
2987}
2988
2989static int dma40_runtime_suspend(struct device *dev)
2990{
2991	struct platform_device *pdev = to_platform_device(dev);
2992	struct d40_base *base = platform_get_drvdata(pdev);
2993
2994	d40_save_restore_registers(base, true);
2995
2996	/* Don't disable/enable clocks for v1 due to HW bugs */
2997	if (base->rev != 1)
2998		writel_relaxed(base->gcc_pwr_off_mask,
2999			       base->virtbase + D40_DREG_GCC);
3000
3001	return 0;
3002}
3003
3004static int dma40_runtime_resume(struct device *dev)
3005{
3006	struct platform_device *pdev = to_platform_device(dev);
3007	struct d40_base *base = platform_get_drvdata(pdev);
3008
3009	d40_save_restore_registers(base, false);
3010
3011	writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3012		       base->virtbase + D40_DREG_GCC);
3013	return 0;
3014}
3015#endif
3016
3017static const struct dev_pm_ops dma40_pm_ops = {
3018	SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3019	SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3020				dma40_runtime_resume,
3021				NULL)
3022};
3023
3024/* Initialization functions. */
3025
3026static int __init d40_phy_res_init(struct d40_base *base)
3027{
3028	int i;
3029	int num_phy_chans_avail = 0;
3030	u32 val[2];
3031	int odd_even_bit = -2;
3032	int gcc = D40_DREG_GCC_ENA;
3033
3034	val[0] = readl(base->virtbase + D40_DREG_PRSME);
3035	val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3036
3037	for (i = 0; i < base->num_phy_chans; i++) {
3038		base->phy_res[i].num = i;
3039		odd_even_bit += 2 * ((i % 2) == 0);
3040		if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3041			/* Mark security only channels as occupied */
3042			base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3043			base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3044			base->phy_res[i].reserved = true;
3045			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3046						       D40_DREG_GCC_SRC);
3047			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3048						       D40_DREG_GCC_DST);
3049
3050
3051		} else {
3052			base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3053			base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3054			base->phy_res[i].reserved = false;
3055			num_phy_chans_avail++;
3056		}
3057		spin_lock_init(&base->phy_res[i].lock);
3058	}
3059
3060	/* Mark disabled channels as occupied */
3061	for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3062		int chan = base->plat_data->disabled_channels[i];
3063
3064		base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3065		base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3066		base->phy_res[chan].reserved = true;
3067		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3068					       D40_DREG_GCC_SRC);
3069		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3070					       D40_DREG_GCC_DST);
3071		num_phy_chans_avail--;
3072	}
3073
3074	/* Mark soft_lli channels */
3075	for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3076		int chan = base->plat_data->soft_lli_chans[i];
3077
3078		base->phy_res[chan].use_soft_lli = true;
3079	}
3080
3081	dev_info(base->dev, "%d of %d physical DMA channels available\n",
3082		 num_phy_chans_avail, base->num_phy_chans);
3083
3084	/* Verify settings extended vs standard */
3085	val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3086
3087	for (i = 0; i < base->num_phy_chans; i++) {
3088
3089		if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3090		    (val[0] & 0x3) != 1)
3091			dev_info(base->dev,
3092				 "[%s] INFO: channel %d is misconfigured (%d)\n",
3093				 __func__, i, val[0] & 0x3);
3094
3095		val[0] = val[0] >> 2;
3096	}
3097
3098	/*
3099	 * To keep things simple, Enable all clocks initially.
3100	 * The clocks will get managed later post channel allocation.
3101	 * The clocks for the event lines on which reserved channels exists
3102	 * are not managed here.
3103	 */
3104	writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3105	base->gcc_pwr_off_mask = gcc;
3106
3107	return num_phy_chans_avail;
3108}
3109
3110static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3111{
3112	struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3113	struct clk *clk;
3114	void __iomem *virtbase;
3115	struct resource *res;
3116	struct d40_base *base;
3117	int num_log_chans;
3118	int num_phy_chans;
3119	int num_memcpy_chans;
3120	int clk_ret = -EINVAL;
3121	int i;
3122	u32 pid;
3123	u32 cid;
3124	u8 rev;
3125
3126	clk = clk_get(&pdev->dev, NULL);
3127	if (IS_ERR(clk)) {
3128		d40_err(&pdev->dev, "No matching clock found\n");
3129		goto check_prepare_enabled;
3130	}
3131
3132	clk_ret = clk_prepare_enable(clk);
3133	if (clk_ret) {
3134		d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3135		goto disable_unprepare;
3136	}
3137
3138	/* Get IO for DMAC base address */
3139	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3140	if (!res)
3141		goto disable_unprepare;
3142
3143	if (request_mem_region(res->start, resource_size(res),
3144			       D40_NAME " I/O base") == NULL)
3145		goto release_region;
3146
3147	virtbase = ioremap(res->start, resource_size(res));
3148	if (!virtbase)
3149		goto release_region;
3150
3151	/* This is just a regular AMBA PrimeCell ID actually */
3152	for (pid = 0, i = 0; i < 4; i++)
3153		pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3154			& 255) << (i * 8);
3155	for (cid = 0, i = 0; i < 4; i++)
3156		cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3157			& 255) << (i * 8);
3158
3159	if (cid != AMBA_CID) {
3160		d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3161		goto unmap_io;
3162	}
3163	if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3164		d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3165			AMBA_MANF_BITS(pid),
3166			AMBA_VENDOR_ST);
3167		goto unmap_io;
3168	}
3169	/*
3170	 * HW revision:
3171	 * DB8500ed has revision 0
3172	 * ? has revision 1
3173	 * DB8500v1 has revision 2
3174	 * DB8500v2 has revision 3
3175	 * AP9540v1 has revision 4
3176	 * DB8540v1 has revision 4
3177	 */
3178	rev = AMBA_REV_BITS(pid);
3179	if (rev < 2) {
3180		d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3181		goto unmap_io;
3182	}
3183
3184	/* The number of physical channels on this HW */
3185	if (plat_data->num_of_phy_chans)
3186		num_phy_chans = plat_data->num_of_phy_chans;
3187	else
3188		num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3189
3190	/* The number of channels used for memcpy */
3191	if (plat_data->num_of_memcpy_chans)
3192		num_memcpy_chans = plat_data->num_of_memcpy_chans;
3193	else
3194		num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3195
3196	num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3197
3198	dev_info(&pdev->dev,
3199		 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3200		 rev, &res->start, num_phy_chans, num_log_chans);
3201
3202	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3203		       (num_phy_chans + num_log_chans + num_memcpy_chans) *
3204		       sizeof(struct d40_chan), GFP_KERNEL);
3205
3206	if (base == NULL)
3207		goto unmap_io;
3208
3209	base->rev = rev;
3210	base->clk = clk;
3211	base->num_memcpy_chans = num_memcpy_chans;
3212	base->num_phy_chans = num_phy_chans;
3213	base->num_log_chans = num_log_chans;
3214	base->phy_start = res->start;
3215	base->phy_size = resource_size(res);
3216	base->virtbase = virtbase;
3217	base->plat_data = plat_data;
3218	base->dev = &pdev->dev;
3219	base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3220	base->log_chans = &base->phy_chans[num_phy_chans];
3221
3222	if (base->plat_data->num_of_phy_chans == 14) {
3223		base->gen_dmac.backup = d40_backup_regs_v4b;
3224		base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3225		base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3226		base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3227		base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3228		base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3229		base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3230		base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3231		base->gen_dmac.il = il_v4b;
3232		base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3233		base->gen_dmac.init_reg = dma_init_reg_v4b;
3234		base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3235	} else {
3236		if (base->rev >= 3) {
3237			base->gen_dmac.backup = d40_backup_regs_v4a;
3238			base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3239		}
3240		base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3241		base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3242		base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3243		base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3244		base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3245		base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3246		base->gen_dmac.il = il_v4a;
3247		base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3248		base->gen_dmac.init_reg = dma_init_reg_v4a;
3249		base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3250	}
3251
3252	base->phy_res = kcalloc(num_phy_chans,
3253				sizeof(*base->phy_res),
3254				GFP_KERNEL);
3255	if (!base->phy_res)
3256		goto free_base;
3257
3258	base->lookup_phy_chans = kcalloc(num_phy_chans,
3259					 sizeof(*base->lookup_phy_chans),
3260					 GFP_KERNEL);
3261	if (!base->lookup_phy_chans)
3262		goto free_phy_res;
3263
3264	base->lookup_log_chans = kcalloc(num_log_chans,
3265					 sizeof(*base->lookup_log_chans),
3266					 GFP_KERNEL);
3267	if (!base->lookup_log_chans)
3268		goto free_phy_chans;
3269
3270	base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3271						  sizeof(d40_backup_regs_chan),
3272						  GFP_KERNEL);
3273	if (!base->reg_val_backup_chan)
3274		goto free_log_chans;
3275
3276	base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3277					    * D40_LCLA_LINK_PER_EVENT_GRP,
3278					    sizeof(*base->lcla_pool.alloc_map),
3279					    GFP_KERNEL);
3280	if (!base->lcla_pool.alloc_map)
3281		goto free_backup_chan;
3282
 
 
 
 
 
 
3283	base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3284					    0, SLAB_HWCACHE_ALIGN,
3285					    NULL);
3286	if (base->desc_slab == NULL)
3287		goto free_map;
 
3288
3289	return base;
 
 
3290 free_map:
3291	kfree(base->lcla_pool.alloc_map);
3292 free_backup_chan:
3293	kfree(base->reg_val_backup_chan);
3294 free_log_chans:
3295	kfree(base->lookup_log_chans);
3296 free_phy_chans:
3297	kfree(base->lookup_phy_chans);
3298 free_phy_res:
3299	kfree(base->phy_res);
3300 free_base:
3301	kfree(base);
3302 unmap_io:
3303	iounmap(virtbase);
3304 release_region:
3305	release_mem_region(res->start, resource_size(res));
3306 check_prepare_enabled:
3307	if (!clk_ret)
3308 disable_unprepare:
3309		clk_disable_unprepare(clk);
3310	if (!IS_ERR(clk))
3311		clk_put(clk);
3312	return NULL;
3313}
3314
3315static void __init d40_hw_init(struct d40_base *base)
3316{
3317
3318	int i;
3319	u32 prmseo[2] = {0, 0};
3320	u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3321	u32 pcmis = 0;
3322	u32 pcicr = 0;
3323	struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3324	u32 reg_size = base->gen_dmac.init_reg_size;
3325
3326	for (i = 0; i < reg_size; i++)
3327		writel(dma_init_reg[i].val,
3328		       base->virtbase + dma_init_reg[i].reg);
3329
3330	/* Configure all our dma channels to default settings */
3331	for (i = 0; i < base->num_phy_chans; i++) {
3332
3333		activeo[i % 2] = activeo[i % 2] << 2;
3334
3335		if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3336		    == D40_ALLOC_PHY) {
3337			activeo[i % 2] |= 3;
3338			continue;
3339		}
3340
3341		/* Enable interrupt # */
3342		pcmis = (pcmis << 1) | 1;
3343
3344		/* Clear interrupt # */
3345		pcicr = (pcicr << 1) | 1;
3346
3347		/* Set channel to physical mode */
3348		prmseo[i % 2] = prmseo[i % 2] << 2;
3349		prmseo[i % 2] |= 1;
3350
3351	}
3352
3353	writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3354	writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3355	writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3356	writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3357
3358	/* Write which interrupt to enable */
3359	writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3360
3361	/* Write which interrupt to clear */
3362	writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3363
3364	/* These are __initdata and cannot be accessed after init */
3365	base->gen_dmac.init_reg = NULL;
3366	base->gen_dmac.init_reg_size = 0;
3367}
3368
3369static int __init d40_lcla_allocate(struct d40_base *base)
3370{
3371	struct d40_lcla_pool *pool = &base->lcla_pool;
3372	unsigned long *page_list;
3373	int i, j;
3374	int ret;
3375
3376	/*
3377	 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3378	 * To full fill this hardware requirement without wasting 256 kb
3379	 * we allocate pages until we get an aligned one.
3380	 */
3381	page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3382				  sizeof(*page_list),
3383				  GFP_KERNEL);
3384	if (!page_list)
3385		return -ENOMEM;
3386
3387	/* Calculating how many pages that are required */
3388	base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3389
3390	for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3391		page_list[i] = __get_free_pages(GFP_KERNEL,
3392						base->lcla_pool.pages);
3393		if (!page_list[i]) {
3394
3395			d40_err(base->dev, "Failed to allocate %d pages.\n",
3396				base->lcla_pool.pages);
3397			ret = -ENOMEM;
3398
3399			for (j = 0; j < i; j++)
3400				free_pages(page_list[j], base->lcla_pool.pages);
3401			goto free_page_list;
3402		}
3403
3404		if ((virt_to_phys((void *)page_list[i]) &
3405		     (LCLA_ALIGNMENT - 1)) == 0)
3406			break;
3407	}
3408
3409	for (j = 0; j < i; j++)
3410		free_pages(page_list[j], base->lcla_pool.pages);
3411
3412	if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3413		base->lcla_pool.base = (void *)page_list[i];
3414	} else {
3415		/*
3416		 * After many attempts and no succees with finding the correct
3417		 * alignment, try with allocating a big buffer.
3418		 */
3419		dev_warn(base->dev,
3420			 "[%s] Failed to get %d pages @ 18 bit align.\n",
3421			 __func__, base->lcla_pool.pages);
3422		base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3423							 base->num_phy_chans +
3424							 LCLA_ALIGNMENT,
3425							 GFP_KERNEL);
3426		if (!base->lcla_pool.base_unaligned) {
3427			ret = -ENOMEM;
3428			goto free_page_list;
3429		}
3430
3431		base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3432						 LCLA_ALIGNMENT);
3433	}
3434
3435	pool->dma_addr = dma_map_single(base->dev, pool->base,
3436					SZ_1K * base->num_phy_chans,
3437					DMA_TO_DEVICE);
3438	if (dma_mapping_error(base->dev, pool->dma_addr)) {
3439		pool->dma_addr = 0;
3440		ret = -ENOMEM;
3441		goto free_page_list;
3442	}
3443
3444	writel(virt_to_phys(base->lcla_pool.base),
3445	       base->virtbase + D40_DREG_LCLA);
3446	ret = 0;
3447 free_page_list:
3448	kfree(page_list);
3449	return ret;
3450}
3451
3452static int __init d40_of_probe(struct platform_device *pdev,
3453			       struct device_node *np)
3454{
3455	struct stedma40_platform_data *pdata;
3456	int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3457	const __be32 *list;
3458
3459	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3460	if (!pdata)
3461		return -ENOMEM;
3462
3463	/* If absent this value will be obtained from h/w. */
3464	of_property_read_u32(np, "dma-channels", &num_phy);
3465	if (num_phy > 0)
3466		pdata->num_of_phy_chans = num_phy;
3467
3468	list = of_get_property(np, "memcpy-channels", &num_memcpy);
3469	num_memcpy /= sizeof(*list);
3470
3471	if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3472		d40_err(&pdev->dev,
3473			"Invalid number of memcpy channels specified (%d)\n",
3474			num_memcpy);
3475		return -EINVAL;
3476	}
3477	pdata->num_of_memcpy_chans = num_memcpy;
3478
3479	of_property_read_u32_array(np, "memcpy-channels",
3480				   dma40_memcpy_channels,
3481				   num_memcpy);
3482
3483	list = of_get_property(np, "disabled-channels", &num_disabled);
3484	num_disabled /= sizeof(*list);
3485
3486	if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3487		d40_err(&pdev->dev,
3488			"Invalid number of disabled channels specified (%d)\n",
3489			num_disabled);
3490		return -EINVAL;
3491	}
3492
3493	of_property_read_u32_array(np, "disabled-channels",
3494				   pdata->disabled_channels,
3495				   num_disabled);
3496	pdata->disabled_channels[num_disabled] = -1;
3497
3498	pdev->dev.platform_data = pdata;
3499
3500	return 0;
3501}
3502
3503static int __init d40_probe(struct platform_device *pdev)
3504{
3505	struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3506	struct device_node *np = pdev->dev.of_node;
3507	int ret = -ENOENT;
3508	struct d40_base *base;
3509	struct resource *res;
3510	int num_reserved_chans;
3511	u32 val;
3512
3513	if (!plat_data) {
3514		if (np) {
3515			if (d40_of_probe(pdev, np)) {
3516				ret = -ENOMEM;
3517				goto report_failure;
3518			}
3519		} else {
3520			d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3521			goto report_failure;
3522		}
3523	}
3524
3525	base = d40_hw_detect_init(pdev);
3526	if (!base)
3527		goto report_failure;
3528
3529	num_reserved_chans = d40_phy_res_init(base);
3530
3531	platform_set_drvdata(pdev, base);
3532
3533	spin_lock_init(&base->interrupt_lock);
3534	spin_lock_init(&base->execmd_lock);
3535
3536	/* Get IO for logical channel parameter address */
3537	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3538	if (!res) {
3539		ret = -ENOENT;
3540		d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3541		goto destroy_cache;
3542	}
3543	base->lcpa_size = resource_size(res);
3544	base->phy_lcpa = res->start;
3545
3546	if (request_mem_region(res->start, resource_size(res),
3547			       D40_NAME " I/O lcpa") == NULL) {
3548		ret = -EBUSY;
3549		d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3550		goto destroy_cache;
3551	}
3552
3553	/* We make use of ESRAM memory for this. */
3554	val = readl(base->virtbase + D40_DREG_LCPA);
3555	if (res->start != val && val != 0) {
3556		dev_warn(&pdev->dev,
3557			 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3558			 __func__, val, &res->start);
3559	} else
3560		writel(res->start, base->virtbase + D40_DREG_LCPA);
3561
3562	base->lcpa_base = ioremap(res->start, resource_size(res));
3563	if (!base->lcpa_base) {
3564		ret = -ENOMEM;
3565		d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3566		goto destroy_cache;
3567	}
3568	/* If lcla has to be located in ESRAM we don't need to allocate */
3569	if (base->plat_data->use_esram_lcla) {
3570		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3571							"lcla_esram");
3572		if (!res) {
3573			ret = -ENOENT;
3574			d40_err(&pdev->dev,
3575				"No \"lcla_esram\" memory resource\n");
3576			goto destroy_cache;
3577		}
3578		base->lcla_pool.base = ioremap(res->start,
3579						resource_size(res));
3580		if (!base->lcla_pool.base) {
3581			ret = -ENOMEM;
3582			d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3583			goto destroy_cache;
3584		}
3585		writel(res->start, base->virtbase + D40_DREG_LCLA);
3586
3587	} else {
3588		ret = d40_lcla_allocate(base);
3589		if (ret) {
3590			d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3591			goto destroy_cache;
3592		}
3593	}
3594
3595	spin_lock_init(&base->lcla_pool.lock);
3596
3597	base->irq = platform_get_irq(pdev, 0);
3598
3599	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3600	if (ret) {
3601		d40_err(&pdev->dev, "No IRQ defined\n");
3602		goto destroy_cache;
3603	}
3604
3605	if (base->plat_data->use_esram_lcla) {
3606
3607		base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3608		if (IS_ERR(base->lcpa_regulator)) {
3609			d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3610			ret = PTR_ERR(base->lcpa_regulator);
3611			base->lcpa_regulator = NULL;
3612			goto destroy_cache;
3613		}
3614
3615		ret = regulator_enable(base->lcpa_regulator);
3616		if (ret) {
3617			d40_err(&pdev->dev,
3618				"Failed to enable lcpa_regulator\n");
3619			regulator_put(base->lcpa_regulator);
3620			base->lcpa_regulator = NULL;
3621			goto destroy_cache;
3622		}
3623	}
3624
3625	writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3626
3627	pm_runtime_irq_safe(base->dev);
3628	pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3629	pm_runtime_use_autosuspend(base->dev);
3630	pm_runtime_mark_last_busy(base->dev);
3631	pm_runtime_set_active(base->dev);
3632	pm_runtime_enable(base->dev);
3633
3634	ret = d40_dmaengine_init(base, num_reserved_chans);
3635	if (ret)
3636		goto destroy_cache;
3637
3638	base->dev->dma_parms = &base->dma_parms;
3639	ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3640	if (ret) {
3641		d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3642		goto destroy_cache;
3643	}
3644
3645	d40_hw_init(base);
3646
3647	if (np) {
3648		ret = of_dma_controller_register(np, d40_xlate, NULL);
3649		if (ret)
3650			dev_err(&pdev->dev,
3651				"could not register of_dma_controller\n");
3652	}
3653
3654	dev_info(base->dev, "initialized\n");
3655	return 0;
3656 destroy_cache:
3657	kmem_cache_destroy(base->desc_slab);
3658	if (base->virtbase)
3659		iounmap(base->virtbase);
3660
3661	if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3662		iounmap(base->lcla_pool.base);
3663		base->lcla_pool.base = NULL;
3664	}
3665
3666	if (base->lcla_pool.dma_addr)
3667		dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3668				 SZ_1K * base->num_phy_chans,
3669				 DMA_TO_DEVICE);
3670
3671	if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3672		free_pages((unsigned long)base->lcla_pool.base,
3673			   base->lcla_pool.pages);
3674
3675	kfree(base->lcla_pool.base_unaligned);
3676
3677	if (base->phy_lcpa)
3678		release_mem_region(base->phy_lcpa,
3679				   base->lcpa_size);
3680	if (base->phy_start)
3681		release_mem_region(base->phy_start,
3682				   base->phy_size);
3683	if (base->clk) {
3684		clk_disable_unprepare(base->clk);
3685		clk_put(base->clk);
3686	}
3687
3688	if (base->lcpa_regulator) {
3689		regulator_disable(base->lcpa_regulator);
3690		regulator_put(base->lcpa_regulator);
3691	}
3692
3693	kfree(base->lcla_pool.alloc_map);
3694	kfree(base->lookup_log_chans);
3695	kfree(base->lookup_phy_chans);
3696	kfree(base->phy_res);
3697	kfree(base);
3698 report_failure:
3699	d40_err(&pdev->dev, "probe failed\n");
3700	return ret;
3701}
3702
3703static const struct of_device_id d40_match[] = {
3704        { .compatible = "stericsson,dma40", },
3705        {}
3706};
3707
3708static struct platform_driver d40_driver = {
3709	.driver = {
3710		.name  = D40_NAME,
3711		.pm = &dma40_pm_ops,
3712		.of_match_table = d40_match,
3713	},
3714};
3715
3716static int __init stedma40_init(void)
3717{
3718	return platform_driver_probe(&d40_driver, d40_probe);
3719}
3720subsys_initcall(stedma40_init);