Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (C) Ericsson AB 2007-2008
   3 * Copyright (C) ST-Ericsson SA 2008-2010
   4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
   5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
   6 * License terms: GNU General Public License (GPL) version 2
   7 */
   8
   9#include <linux/dma-mapping.h>
  10#include <linux/kernel.h>
  11#include <linux/slab.h>
 
  12#include <linux/dmaengine.h>
  13#include <linux/platform_device.h>
  14#include <linux/clk.h>
  15#include <linux/delay.h>
 
 
 
  16#include <linux/err.h>
 
 
  17#include <linux/amba/bus.h>
 
 
  18
  19#include <plat/ste_dma40.h>
  20
  21#include "ste_dma40_ll.h"
  22
  23#define D40_NAME "dma40"
  24
  25#define D40_PHY_CHAN -1
  26
  27/* For masking out/in 2 bit channel positions */
  28#define D40_CHAN_POS(chan)  (2 * (chan / 2))
  29#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
  30
  31/* Maximum iterations taken before giving up suspending a channel */
  32#define D40_SUSPEND_MAX_IT 500
  33
 
 
 
  34/* Hardware requirement on LCLA alignment */
  35#define LCLA_ALIGNMENT 0x40000
  36
  37/* Max number of links per event group */
  38#define D40_LCLA_LINK_PER_EVENT_GRP 128
  39#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
  40
 
 
 
  41/* Attempts before giving up to trying to get pages that are aligned */
  42#define MAX_LCLA_ALLOC_ATTEMPTS 256
  43
  44/* Bit markings for allocation map */
  45#define D40_ALLOC_FREE		(1 << 31)
  46#define D40_ALLOC_PHY		(1 << 30)
  47#define D40_ALLOC_LOG_FREE	0
  48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49/**
  50 * enum 40_command - The different commands and/or statuses.
  51 *
  52 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
  53 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
  54 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
  55 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
  56 */
  57enum d40_command {
  58	D40_DMA_STOP		= 0,
  59	D40_DMA_RUN		= 1,
  60	D40_DMA_SUSPEND_REQ	= 2,
  61	D40_DMA_SUSPENDED	= 3
  62};
  63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64/**
  65 * struct d40_lli_pool - Structure for keeping LLIs in memory
  66 *
  67 * @base: Pointer to memory area when the pre_alloc_lli's are not large
  68 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
  69 * pre_alloc_lli is used.
  70 * @dma_addr: DMA address, if mapped
  71 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
  72 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
  73 * one buffer to one buffer.
  74 */
  75struct d40_lli_pool {
  76	void	*base;
  77	int	 size;
  78	dma_addr_t	dma_addr;
  79	/* Space for dst and src, plus an extra for padding */
  80	u8	 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
  81};
  82
  83/**
  84 * struct d40_desc - A descriptor is one DMA job.
  85 *
  86 * @lli_phy: LLI settings for physical channel. Both src and dst=
  87 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
  88 * lli_len equals one.
  89 * @lli_log: Same as above but for logical channels.
  90 * @lli_pool: The pool with two entries pre-allocated.
  91 * @lli_len: Number of llis of current descriptor.
  92 * @lli_current: Number of transferred llis.
  93 * @lcla_alloc: Number of LCLA entries allocated.
  94 * @txd: DMA engine struct. Used for among other things for communication
  95 * during a transfer.
  96 * @node: List entry.
  97 * @is_in_client_list: true if the client owns this descriptor.
  98 * the previous one.
  99 *
 100 * This descriptor is used for both logical and physical transfers.
 101 */
 102struct d40_desc {
 103	/* LLI physical */
 104	struct d40_phy_lli_bidir	 lli_phy;
 105	/* LLI logical */
 106	struct d40_log_lli_bidir	 lli_log;
 107
 108	struct d40_lli_pool		 lli_pool;
 109	int				 lli_len;
 110	int				 lli_current;
 111	int				 lcla_alloc;
 112
 113	struct dma_async_tx_descriptor	 txd;
 114	struct list_head		 node;
 115
 116	bool				 is_in_client_list;
 117	bool				 cyclic;
 118};
 119
 120/**
 121 * struct d40_lcla_pool - LCLA pool settings and data.
 122 *
 123 * @base: The virtual address of LCLA. 18 bit aligned.
 124 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
 125 * This pointer is only there for clean-up on error.
 126 * @pages: The number of pages needed for all physical channels.
 127 * Only used later for clean-up on error
 128 * @lock: Lock to protect the content in this struct.
 129 * @alloc_map: big map over which LCLA entry is own by which job.
 130 */
 131struct d40_lcla_pool {
 132	void		*base;
 133	dma_addr_t	dma_addr;
 134	void		*base_unaligned;
 135	int		 pages;
 136	spinlock_t	 lock;
 137	struct d40_desc	**alloc_map;
 138};
 139
 140/**
 141 * struct d40_phy_res - struct for handling eventlines mapped to physical
 142 * channels.
 143 *
 144 * @lock: A lock protection this entity.
 
 145 * @num: The physical channel number of this entity.
 146 * @allocated_src: Bit mapped to show which src event line's are mapped to
 147 * this physical channel. Can also be free or physically allocated.
 148 * @allocated_dst: Same as for src but is dst.
 149 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
 150 * event line number.
 
 151 */
 152struct d40_phy_res {
 153	spinlock_t lock;
 
 154	int	   num;
 155	u32	   allocated_src;
 156	u32	   allocated_dst;
 
 157};
 158
 159struct d40_base;
 160
 161/**
 162 * struct d40_chan - Struct that describes a channel.
 163 *
 164 * @lock: A spinlock to protect this struct.
 165 * @log_num: The logical number, if any of this channel.
 166 * @completed: Starts with 1, after first interrupt it is set to dma engine's
 167 * current cookie.
 168 * @pending_tx: The number of pending transfers. Used between interrupt handler
 169 * and tasklet.
 170 * @busy: Set to true when transfer is ongoing on this channel.
 171 * @phy_chan: Pointer to physical channel which this instance runs on. If this
 172 * point is NULL, then the channel is not allocated.
 173 * @chan: DMA engine handle.
 174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
 175 * transfer and call client callback.
 176 * @client: Cliented owned descriptor list.
 177 * @pending_queue: Submitted jobs, to be issued by issue_pending()
 178 * @active: Active descriptor.
 
 179 * @queue: Queued jobs.
 180 * @prepare_queue: Prepared jobs.
 181 * @dma_cfg: The client configuration of this dma channel.
 182 * @configured: whether the dma_cfg configuration is valid
 183 * @base: Pointer to the device instance struct.
 184 * @src_def_cfg: Default cfg register setting for src.
 185 * @dst_def_cfg: Default cfg register setting for dst.
 186 * @log_def: Default logical channel settings.
 187 * @lcla: Space for one dst src pair for logical channel transfers.
 188 * @lcpa: Pointer to dst and src lcpa settings.
 189 * @runtime_addr: runtime configured address.
 190 * @runtime_direction: runtime configured direction.
 191 *
 192 * This struct can either "be" a logical or a physical channel.
 193 */
 194struct d40_chan {
 195	spinlock_t			 lock;
 196	int				 log_num;
 197	/* ID of the most recent completed transfer */
 198	int				 completed;
 199	int				 pending_tx;
 200	bool				 busy;
 201	struct d40_phy_res		*phy_chan;
 202	struct dma_chan			 chan;
 203	struct tasklet_struct		 tasklet;
 204	struct list_head		 client;
 205	struct list_head		 pending_queue;
 206	struct list_head		 active;
 
 207	struct list_head		 queue;
 208	struct list_head		 prepare_queue;
 209	struct stedma40_chan_cfg	 dma_cfg;
 210	bool				 configured;
 211	struct d40_base			*base;
 212	/* Default register configurations */
 213	u32				 src_def_cfg;
 214	u32				 dst_def_cfg;
 215	struct d40_def_lcsp		 log_def;
 216	struct d40_log_lli_full		*lcpa;
 217	/* Runtime reconfiguration */
 218	dma_addr_t			runtime_addr;
 219	enum dma_data_direction		runtime_direction;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220};
 221
 222/**
 223 * struct d40_base - The big global struct, one for each probe'd instance.
 224 *
 225 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
 226 * @execmd_lock: Lock for execute command usage since several channels share
 227 * the same physical register.
 228 * @dev: The device structure.
 229 * @virtbase: The virtual base address of the DMA's register.
 230 * @rev: silicon revision detected.
 231 * @clk: Pointer to the DMA clock structure.
 232 * @phy_start: Physical memory start of the DMA registers.
 233 * @phy_size: Size of the DMA register map.
 234 * @irq: The IRQ number.
 
 
 235 * @num_phy_chans: The number of physical channels. Read from HW. This
 236 * is the number of available channels for this driver, not counting "Secure
 237 * mode" allocated physical channels.
 238 * @num_log_chans: The number of logical channels. Calculated from
 239 * num_phy_chans.
 240 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
 241 * @dma_slave: dma_device channels that can do only do slave transfers.
 242 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
 
 243 * @log_chans: Room for all possible logical channels in system.
 244 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
 245 * to log_chans entries.
 246 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
 247 * to phy_chans entries.
 248 * @plat_data: Pointer to provided platform_data which is the driver
 249 * configuration.
 
 250 * @phy_res: Vector containing all physical channels.
 251 * @lcla_pool: lcla pool settings and data.
 252 * @lcpa_base: The virtual mapped address of LCPA.
 253 * @phy_lcpa: The physical address of the LCPA.
 254 * @lcpa_size: The size of the LCPA area.
 255 * @desc_slab: cache for descriptors.
 
 
 
 
 
 
 
 
 256 */
 257struct d40_base {
 258	spinlock_t			 interrupt_lock;
 259	spinlock_t			 execmd_lock;
 260	struct device			 *dev;
 261	void __iomem			 *virtbase;
 262	u8				  rev:4;
 263	struct clk			 *clk;
 264	phys_addr_t			  phy_start;
 265	resource_size_t			  phy_size;
 266	int				  irq;
 
 267	int				  num_phy_chans;
 268	int				  num_log_chans;
 
 269	struct dma_device		  dma_both;
 270	struct dma_device		  dma_slave;
 271	struct dma_device		  dma_memcpy;
 272	struct d40_chan			 *phy_chans;
 273	struct d40_chan			 *log_chans;
 274	struct d40_chan			**lookup_log_chans;
 275	struct d40_chan			**lookup_phy_chans;
 276	struct stedma40_platform_data	 *plat_data;
 
 277	/* Physical half channels */
 278	struct d40_phy_res		 *phy_res;
 279	struct d40_lcla_pool		  lcla_pool;
 280	void				 *lcpa_base;
 281	dma_addr_t			  phy_lcpa;
 282	resource_size_t			  lcpa_size;
 283	struct kmem_cache		 *desc_slab;
 284};
 285
 286/**
 287 * struct d40_interrupt_lookup - lookup table for interrupt handler
 288 *
 289 * @src: Interrupt mask register.
 290 * @clr: Interrupt clear register.
 291 * @is_error: true if this is an error interrupt.
 292 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
 293 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
 294 */
 295struct d40_interrupt_lookup {
 296	u32 src;
 297	u32 clr;
 298	bool is_error;
 299	int offset;
 300};
 301
 302/**
 303 * struct d40_reg_val - simple lookup struct
 304 *
 305 * @reg: The register.
 306 * @val: The value that belongs to the register in reg.
 307 */
 308struct d40_reg_val {
 309	unsigned int reg;
 310	unsigned int val;
 311};
 312
 313static struct device *chan2dev(struct d40_chan *d40c)
 314{
 315	return &d40c->chan.dev->device;
 316}
 317
 318static bool chan_is_physical(struct d40_chan *chan)
 319{
 320	return chan->log_num == D40_PHY_CHAN;
 321}
 322
 323static bool chan_is_logical(struct d40_chan *chan)
 324{
 325	return !chan_is_physical(chan);
 326}
 327
 328static void __iomem *chan_base(struct d40_chan *chan)
 329{
 330	return chan->base->virtbase + D40_DREG_PCBASE +
 331	       chan->phy_chan->num * D40_DREG_PCDELTA;
 332}
 333
 334#define d40_err(dev, format, arg...)		\
 335	dev_err(dev, "[%s] " format, __func__, ## arg)
 336
 337#define chan_err(d40c, format, arg...)		\
 338	d40_err(chan2dev(d40c), format, ## arg)
 339
 340static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
 341			      int lli_len)
 342{
 343	bool is_log = chan_is_logical(d40c);
 344	u32 align;
 345	void *base;
 346
 347	if (is_log)
 348		align = sizeof(struct d40_log_lli);
 349	else
 350		align = sizeof(struct d40_phy_lli);
 351
 352	if (lli_len == 1) {
 353		base = d40d->lli_pool.pre_alloc_lli;
 354		d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
 355		d40d->lli_pool.base = NULL;
 356	} else {
 357		d40d->lli_pool.size = lli_len * 2 * align;
 358
 359		base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
 360		d40d->lli_pool.base = base;
 361
 362		if (d40d->lli_pool.base == NULL)
 363			return -ENOMEM;
 364	}
 365
 366	if (is_log) {
 367		d40d->lli_log.src = PTR_ALIGN(base, align);
 368		d40d->lli_log.dst = d40d->lli_log.src + lli_len;
 369
 370		d40d->lli_pool.dma_addr = 0;
 371	} else {
 372		d40d->lli_phy.src = PTR_ALIGN(base, align);
 373		d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
 374
 375		d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
 376							 d40d->lli_phy.src,
 377							 d40d->lli_pool.size,
 378							 DMA_TO_DEVICE);
 379
 380		if (dma_mapping_error(d40c->base->dev,
 381				      d40d->lli_pool.dma_addr)) {
 382			kfree(d40d->lli_pool.base);
 383			d40d->lli_pool.base = NULL;
 384			d40d->lli_pool.dma_addr = 0;
 385			return -ENOMEM;
 386		}
 387	}
 388
 389	return 0;
 390}
 391
 392static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
 393{
 394	if (d40d->lli_pool.dma_addr)
 395		dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
 396				 d40d->lli_pool.size, DMA_TO_DEVICE);
 397
 398	kfree(d40d->lli_pool.base);
 399	d40d->lli_pool.base = NULL;
 400	d40d->lli_pool.size = 0;
 401	d40d->lli_log.src = NULL;
 402	d40d->lli_log.dst = NULL;
 403	d40d->lli_phy.src = NULL;
 404	d40d->lli_phy.dst = NULL;
 405}
 406
 407static int d40_lcla_alloc_one(struct d40_chan *d40c,
 408			      struct d40_desc *d40d)
 409{
 410	unsigned long flags;
 411	int i;
 412	int ret = -EINVAL;
 413	int p;
 414
 415	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
 416
 417	p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
 418
 419	/*
 420	 * Allocate both src and dst at the same time, therefore the half
 421	 * start on 1 since 0 can't be used since zero is used as end marker.
 422	 */
 423	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
 424		if (!d40c->base->lcla_pool.alloc_map[p + i]) {
 425			d40c->base->lcla_pool.alloc_map[p + i] = d40d;
 
 
 426			d40d->lcla_alloc++;
 427			ret = i;
 428			break;
 429		}
 430	}
 431
 432	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
 433
 434	return ret;
 435}
 436
 437static int d40_lcla_free_all(struct d40_chan *d40c,
 438			     struct d40_desc *d40d)
 439{
 440	unsigned long flags;
 441	int i;
 442	int ret = -EINVAL;
 443
 444	if (chan_is_physical(d40c))
 445		return 0;
 446
 447	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
 448
 449	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
 450		if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
 451						    D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
 452			d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num *
 453							D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL;
 454			d40d->lcla_alloc--;
 455			if (d40d->lcla_alloc == 0) {
 456				ret = 0;
 457				break;
 458			}
 459		}
 460	}
 461
 462	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
 463
 464	return ret;
 465
 466}
 467
 468static void d40_desc_remove(struct d40_desc *d40d)
 469{
 470	list_del(&d40d->node);
 471}
 472
 473static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
 474{
 475	struct d40_desc *desc = NULL;
 476
 477	if (!list_empty(&d40c->client)) {
 478		struct d40_desc *d;
 479		struct d40_desc *_d;
 480
 481		list_for_each_entry_safe(d, _d, &d40c->client, node)
 482			if (async_tx_test_ack(&d->txd)) {
 483				d40_desc_remove(d);
 484				desc = d;
 485				memset(desc, 0, sizeof(*desc));
 486				break;
 487			}
 
 488	}
 489
 490	if (!desc)
 491		desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
 492
 493	if (desc)
 494		INIT_LIST_HEAD(&desc->node);
 495
 496	return desc;
 497}
 498
 499static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
 500{
 501
 502	d40_pool_lli_free(d40c, d40d);
 503	d40_lcla_free_all(d40c, d40d);
 504	kmem_cache_free(d40c->base->desc_slab, d40d);
 505}
 506
 507static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
 508{
 509	list_add_tail(&desc->node, &d40c->active);
 510}
 511
 512static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
 513{
 514	struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
 515	struct d40_phy_lli *lli_src = desc->lli_phy.src;
 516	void __iomem *base = chan_base(chan);
 517
 518	writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
 519	writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
 520	writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
 521	writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
 522
 523	writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
 524	writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
 525	writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
 526	writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
 527}
 528
 
 
 
 
 
 529static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
 530{
 531	struct d40_lcla_pool *pool = &chan->base->lcla_pool;
 532	struct d40_log_lli_bidir *lli = &desc->lli_log;
 533	int lli_current = desc->lli_current;
 534	int lli_len = desc->lli_len;
 535	bool cyclic = desc->cyclic;
 536	int curr_lcla = -EINVAL;
 537	int first_lcla = 0;
 
 538	bool linkback;
 539
 540	/*
 541	 * We may have partially running cyclic transfers, in case we did't get
 542	 * enough LCLA entries.
 543	 */
 544	linkback = cyclic && lli_current == 0;
 545
 546	/*
 547	 * For linkback, we need one LCLA even with only one link, because we
 548	 * can't link back to the one in LCPA space
 549	 */
 550	if (linkback || (lli_len - lli_current > 1)) {
 551		curr_lcla = d40_lcla_alloc_one(chan, desc);
 
 
 
 
 
 
 
 
 
 552		first_lcla = curr_lcla;
 553	}
 554
 555	/*
 556	 * For linkback, we normally load the LCPA in the loop since we need to
 557	 * link it to the second LCLA and not the first.  However, if we
 558	 * couldn't even get a first LCLA, then we have to run in LCPA and
 559	 * reload manually.
 560	 */
 561	if (!linkback || curr_lcla == -EINVAL) {
 562		unsigned int flags = 0;
 563
 564		if (curr_lcla == -EINVAL)
 565			flags |= LLI_TERM_INT;
 566
 567		d40_log_lli_lcpa_write(chan->lcpa,
 568				       &lli->dst[lli_current],
 569				       &lli->src[lli_current],
 570				       curr_lcla,
 571				       flags);
 572		lli_current++;
 573	}
 574
 575	if (curr_lcla < 0)
 576		goto out;
 577
 578	for (; lli_current < lli_len; lli_current++) {
 579		unsigned int lcla_offset = chan->phy_chan->num * 1024 +
 580					   8 * curr_lcla * 2;
 581		struct d40_log_lli *lcla = pool->base + lcla_offset;
 582		unsigned int flags = 0;
 583		int next_lcla;
 584
 585		if (lli_current + 1 < lli_len)
 586			next_lcla = d40_lcla_alloc_one(chan, desc);
 587		else
 588			next_lcla = linkback ? first_lcla : -EINVAL;
 589
 590		if (cyclic || next_lcla == -EINVAL)
 591			flags |= LLI_TERM_INT;
 592
 593		if (linkback && curr_lcla == first_lcla) {
 594			/* First link goes in both LCPA and LCLA */
 595			d40_log_lli_lcpa_write(chan->lcpa,
 596					       &lli->dst[lli_current],
 597					       &lli->src[lli_current],
 598					       next_lcla, flags);
 599		}
 600
 601		/*
 602		 * One unused LCLA in the cyclic case if the very first
 603		 * next_lcla fails...
 604		 */
 605		d40_log_lli_lcla_write(lcla,
 606				       &lli->dst[lli_current],
 607				       &lli->src[lli_current],
 608				       next_lcla, flags);
 609
 610		dma_sync_single_range_for_device(chan->base->dev,
 611					pool->dma_addr, lcla_offset,
 612					2 * sizeof(struct d40_log_lli),
 613					DMA_TO_DEVICE);
 614
 
 
 
 
 
 615		curr_lcla = next_lcla;
 616
 617		if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
 618			lli_current++;
 619			break;
 620		}
 621	}
 622
 623out:
 624	desc->lli_current = lli_current;
 625}
 626
 627static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
 628{
 629	if (chan_is_physical(d40c)) {
 630		d40_phy_lli_load(d40c, d40d);
 631		d40d->lli_current = d40d->lli_len;
 632	} else
 633		d40_log_lli_to_lcxa(d40c, d40d);
 634}
 635
 636static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
 637{
 638	struct d40_desc *d;
 639
 640	if (list_empty(&d40c->active))
 641		return NULL;
 642
 643	d = list_first_entry(&d40c->active,
 644			     struct d40_desc,
 645			     node);
 646	return d;
 647}
 648
 649/* remove desc from current queue and add it to the pending_queue */
 650static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
 651{
 652	d40_desc_remove(desc);
 653	desc->is_in_client_list = false;
 654	list_add_tail(&desc->node, &d40c->pending_queue);
 655}
 656
 657static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
 658{
 659	struct d40_desc *d;
 660
 661	if (list_empty(&d40c->pending_queue))
 662		return NULL;
 663
 664	d = list_first_entry(&d40c->pending_queue,
 665			     struct d40_desc,
 666			     node);
 667	return d;
 668}
 669
 670static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
 671{
 672	struct d40_desc *d;
 673
 674	if (list_empty(&d40c->queue))
 675		return NULL;
 676
 677	d = list_first_entry(&d40c->queue,
 678			     struct d40_desc,
 679			     node);
 680	return d;
 681}
 682
 683static int d40_psize_2_burst_size(bool is_log, int psize)
 684{
 685	if (is_log) {
 686		if (psize == STEDMA40_PSIZE_LOG_1)
 687			return 1;
 688	} else {
 689		if (psize == STEDMA40_PSIZE_PHY_1)
 690			return 1;
 691	}
 692
 693	return 2 << psize;
 694}
 695
 696/*
 697 * The dma only supports transmitting packages up to
 698 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
 699 * dma elements required to send the entire sg list
 
 700 */
 701static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
 702{
 703	int dmalen;
 704	u32 max_w = max(data_width1, data_width2);
 705	u32 min_w = min(data_width1, data_width2);
 706	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
 707
 708	if (seg_max > STEDMA40_MAX_SEG_SIZE)
 709		seg_max -= (1 << max_w);
 710
 711	if (!IS_ALIGNED(size, 1 << max_w))
 712		return -EINVAL;
 713
 714	if (size <= seg_max)
 715		dmalen = 1;
 716	else {
 717		dmalen = size / seg_max;
 718		if (dmalen * seg_max < size)
 719			dmalen++;
 720	}
 721	return dmalen;
 722}
 723
 724static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
 725			   u32 data_width1, u32 data_width2)
 726{
 727	struct scatterlist *sg;
 728	int i;
 729	int len = 0;
 730	int ret;
 731
 732	for_each_sg(sgl, sg, sg_len, i) {
 733		ret = d40_size_2_dmalen(sg_dma_len(sg),
 734					data_width1, data_width2);
 735		if (ret < 0)
 736			return ret;
 737		len += ret;
 738	}
 739	return len;
 740}
 741
 742/* Support functions for logical channels */
 743
 744static int d40_channel_execute_command(struct d40_chan *d40c,
 745				       enum d40_command command)
 746{
 747	u32 status;
 748	int i;
 749	void __iomem *active_reg;
 750	int ret = 0;
 751	unsigned long flags;
 752	u32 wmask;
 753
 
 
 
 
 
 
 754	spin_lock_irqsave(&d40c->base->execmd_lock, flags);
 755
 756	if (d40c->phy_chan->num % 2 == 0)
 757		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
 758	else
 759		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
 760
 761	if (command == D40_DMA_SUSPEND_REQ) {
 762		status = (readl(active_reg) &
 763			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
 764			D40_CHAN_POS(d40c->phy_chan->num);
 765
 766		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
 767			goto done;
 768	}
 769
 770	wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
 771	writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
 772	       active_reg);
 773
 774	if (command == D40_DMA_SUSPEND_REQ) {
 775
 776		for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
 777			status = (readl(active_reg) &
 778				  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
 779				D40_CHAN_POS(d40c->phy_chan->num);
 780
 781			cpu_relax();
 782			/*
 783			 * Reduce the number of bus accesses while
 784			 * waiting for the DMA to suspend.
 785			 */
 786			udelay(3);
 787
 788			if (status == D40_DMA_STOP ||
 789			    status == D40_DMA_SUSPENDED)
 790				break;
 791		}
 792
 793		if (i == D40_SUSPEND_MAX_IT) {
 794			chan_err(d40c,
 795				"unable to suspend the chl %d (log: %d) status %x\n",
 796				d40c->phy_chan->num, d40c->log_num,
 797				status);
 798			dump_stack();
 799			ret = -EBUSY;
 800		}
 801
 802	}
 803done:
 804	spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
 805	return ret;
 806}
 807
 808static void d40_term_all(struct d40_chan *d40c)
 809{
 810	struct d40_desc *d40d;
 811	struct d40_desc *_d;
 812
 
 
 
 
 
 
 813	/* Release active descriptors */
 814	while ((d40d = d40_first_active_get(d40c))) {
 815		d40_desc_remove(d40d);
 816		d40_desc_free(d40c, d40d);
 817	}
 818
 819	/* Release queued descriptors waiting for transfer */
 820	while ((d40d = d40_first_queued(d40c))) {
 821		d40_desc_remove(d40d);
 822		d40_desc_free(d40c, d40d);
 823	}
 824
 825	/* Release pending descriptors */
 826	while ((d40d = d40_first_pending(d40c))) {
 827		d40_desc_remove(d40d);
 828		d40_desc_free(d40c, d40d);
 829	}
 830
 831	/* Release client owned descriptors */
 832	if (!list_empty(&d40c->client))
 833		list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
 834			d40_desc_remove(d40d);
 835			d40_desc_free(d40c, d40d);
 836		}
 837
 838	/* Release descriptors in prepare queue */
 839	if (!list_empty(&d40c->prepare_queue))
 840		list_for_each_entry_safe(d40d, _d,
 841					 &d40c->prepare_queue, node) {
 842			d40_desc_remove(d40d);
 843			d40_desc_free(d40c, d40d);
 844		}
 845
 846	d40c->pending_tx = 0;
 847	d40c->busy = false;
 848}
 849
 850static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
 851				   u32 event, int reg)
 
 852{
 853	void __iomem *addr = chan_base(d40c) + reg;
 854	int tries;
 
 
 
 
 
 855
 856	if (!enable) {
 857		writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
 858		       | ~D40_EVENTLINE_MASK(event), addr);
 859		return;
 860	}
 861
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862	/*
 863	 * The hardware sometimes doesn't register the enable when src and dst
 864	 * event lines are active on the same logical channel.  Retry to ensure
 865	 * it does.  Usually only one retry is sufficient.
 866	 */
 867	tries = 100;
 868	while (--tries) {
 869		writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
 870		       | ~D40_EVENTLINE_MASK(event), addr);
 
 871
 872		if (readl(addr) & D40_EVENTLINE_MASK(event))
 873			break;
 874	}
 875
 876	if (tries != 99)
 877		dev_dbg(chan2dev(d40c),
 878			"[%s] workaround enable S%cLNK (%d tries)\n",
 879			__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
 880			100 - tries);
 881
 882	WARN_ON(!tries);
 
 
 
 
 
 
 
 883}
 884
 885static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
 
 886{
 887	unsigned long flags;
 888
 889	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
 890
 891	/* Enable event line connected to device (or memcpy) */
 892	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
 893	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
 894		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
 895
 896		__d40_config_set_event(d40c, do_enable, event,
 897				       D40_CHAN_REG_SSLNK);
 898	}
 899
 900	if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
 901		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
 902
 903		__d40_config_set_event(d40c, do_enable, event,
 
 904				       D40_CHAN_REG_SDLNK);
 905	}
 906
 907	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
 908}
 909
 910static u32 d40_chan_has_events(struct d40_chan *d40c)
 911{
 912	void __iomem *chanbase = chan_base(d40c);
 913	u32 val;
 914
 915	val = readl(chanbase + D40_CHAN_REG_SSLNK);
 916	val |= readl(chanbase + D40_CHAN_REG_SDLNK);
 917
 918	return val;
 919}
 920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 921static u32 d40_get_prmo(struct d40_chan *d40c)
 922{
 923	static const unsigned int phy_map[] = {
 924		[STEDMA40_PCHAN_BASIC_MODE]
 925			= D40_DREG_PRMO_PCHAN_BASIC,
 926		[STEDMA40_PCHAN_MODULO_MODE]
 927			= D40_DREG_PRMO_PCHAN_MODULO,
 928		[STEDMA40_PCHAN_DOUBLE_DST_MODE]
 929			= D40_DREG_PRMO_PCHAN_DOUBLE_DST,
 930	};
 931	static const unsigned int log_map[] = {
 932		[STEDMA40_LCHAN_SRC_PHY_DST_LOG]
 933			= D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
 934		[STEDMA40_LCHAN_SRC_LOG_DST_PHY]
 935			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
 936		[STEDMA40_LCHAN_SRC_LOG_DST_LOG]
 937			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
 938	};
 939
 940	if (chan_is_physical(d40c))
 941		return phy_map[d40c->dma_cfg.mode_opt];
 942	else
 943		return log_map[d40c->dma_cfg.mode_opt];
 944}
 945
 946static void d40_config_write(struct d40_chan *d40c)
 947{
 948	u32 addr_base;
 949	u32 var;
 950
 951	/* Odd addresses are even addresses + 4 */
 952	addr_base = (d40c->phy_chan->num % 2) * 4;
 953	/* Setup channel mode to logical or physical */
 954	var = ((u32)(chan_is_logical(d40c)) + 1) <<
 955		D40_CHAN_POS(d40c->phy_chan->num);
 956	writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
 957
 958	/* Setup operational mode option register */
 959	var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
 960
 961	writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
 962
 963	if (chan_is_logical(d40c)) {
 964		int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
 965			   & D40_SREG_ELEM_LOG_LIDX_MASK;
 966		void __iomem *chanbase = chan_base(d40c);
 967
 968		/* Set default config for CFG reg */
 969		writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
 970		writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
 971
 972		/* Set LIDX for lcla */
 973		writel(lidx, chanbase + D40_CHAN_REG_SSELT);
 974		writel(lidx, chanbase + D40_CHAN_REG_SDELT);
 
 
 
 
 975	}
 976}
 977
 978static u32 d40_residue(struct d40_chan *d40c)
 979{
 980	u32 num_elt;
 981
 982	if (chan_is_logical(d40c))
 983		num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
 984			>> D40_MEM_LCSP2_ECNT_POS;
 985	else {
 986		u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
 987		num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
 988			  >> D40_SREG_ELEM_PHY_ECNT_POS;
 989	}
 990
 991	return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
 992}
 993
 994static bool d40_tx_is_linked(struct d40_chan *d40c)
 995{
 996	bool is_link;
 997
 998	if (chan_is_logical(d40c))
 999		is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
1000	else
1001		is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1002			  & D40_SREG_LNK_PHYS_LNK_MASK;
1003
1004	return is_link;
1005}
1006
1007static int d40_pause(struct d40_chan *d40c)
1008{
 
1009	int res = 0;
1010	unsigned long flags;
1011
 
 
 
 
 
1012	if (!d40c->busy)
1013		return 0;
1014
1015	spin_lock_irqsave(&d40c->lock, flags);
 
1016
1017	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1018	if (res == 0) {
1019		if (chan_is_logical(d40c)) {
1020			d40_config_set_event(d40c, false);
1021			/* Resume the other logical channels if any */
1022			if (d40_chan_has_events(d40c))
1023				res = d40_channel_execute_command(d40c,
1024								  D40_DMA_RUN);
1025		}
1026	}
1027
 
 
1028	spin_unlock_irqrestore(&d40c->lock, flags);
1029	return res;
1030}
1031
1032static int d40_resume(struct d40_chan *d40c)
1033{
 
1034	int res = 0;
1035	unsigned long flags;
1036
 
 
 
 
 
1037	if (!d40c->busy)
1038		return 0;
1039
1040	spin_lock_irqsave(&d40c->lock, flags);
1041
1042	if (d40c->base->rev == 0)
1043		if (chan_is_logical(d40c)) {
1044			res = d40_channel_execute_command(d40c,
1045							  D40_DMA_SUSPEND_REQ);
1046			goto no_suspend;
1047		}
1048
1049	/* If bytes left to transfer or linked tx resume job */
1050	if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1051
1052		if (chan_is_logical(d40c))
1053			d40_config_set_event(d40c, true);
1054
1055		res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1056	}
1057
1058no_suspend:
 
1059	spin_unlock_irqrestore(&d40c->lock, flags);
1060	return res;
1061}
1062
1063static int d40_terminate_all(struct d40_chan *chan)
1064{
1065	unsigned long flags;
1066	int ret = 0;
1067
1068	ret = d40_pause(chan);
1069	if (!ret && chan_is_physical(chan))
1070		ret = d40_channel_execute_command(chan, D40_DMA_STOP);
1071
1072	spin_lock_irqsave(&chan->lock, flags);
1073	d40_term_all(chan);
1074	spin_unlock_irqrestore(&chan->lock, flags);
1075
1076	return ret;
1077}
1078
1079static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1080{
1081	struct d40_chan *d40c = container_of(tx->chan,
1082					     struct d40_chan,
1083					     chan);
1084	struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1085	unsigned long flags;
 
1086
1087	spin_lock_irqsave(&d40c->lock, flags);
1088
1089	d40c->chan.cookie++;
1090
1091	if (d40c->chan.cookie < 0)
1092		d40c->chan.cookie = 1;
1093
1094	d40d->txd.cookie = d40c->chan.cookie;
1095
1096	d40_desc_queue(d40c, d40d);
1097
1098	spin_unlock_irqrestore(&d40c->lock, flags);
1099
1100	return tx->cookie;
1101}
1102
1103static int d40_start(struct d40_chan *d40c)
1104{
1105	if (d40c->base->rev == 0) {
1106		int err;
1107
1108		if (chan_is_logical(d40c)) {
1109			err = d40_channel_execute_command(d40c,
1110							  D40_DMA_SUSPEND_REQ);
1111			if (err)
1112				return err;
1113		}
1114	}
1115
1116	if (chan_is_logical(d40c))
1117		d40_config_set_event(d40c, true);
1118
1119	return d40_channel_execute_command(d40c, D40_DMA_RUN);
1120}
1121
1122static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1123{
1124	struct d40_desc *d40d;
1125	int err;
1126
1127	/* Start queued jobs, if any */
1128	d40d = d40_first_queued(d40c);
1129
1130	if (d40d != NULL) {
1131		d40c->busy = true;
 
 
 
1132
1133		/* Remove from queue */
1134		d40_desc_remove(d40d);
1135
1136		/* Add to active queue */
1137		d40_desc_submit(d40c, d40d);
1138
1139		/* Initiate DMA job */
1140		d40_desc_load(d40c, d40d);
1141
1142		/* Start dma job */
1143		err = d40_start(d40c);
1144
1145		if (err)
1146			return NULL;
1147	}
1148
1149	return d40d;
1150}
1151
1152/* called from interrupt context */
1153static void dma_tc_handle(struct d40_chan *d40c)
1154{
1155	struct d40_desc *d40d;
1156
1157	/* Get first active entry from list */
1158	d40d = d40_first_active_get(d40c);
1159
1160	if (d40d == NULL)
1161		return;
1162
1163	if (d40d->cyclic) {
1164		/*
1165		 * If this was a paritially loaded list, we need to reloaded
1166		 * it, and only when the list is completed.  We need to check
1167		 * for done because the interrupt will hit for every link, and
1168		 * not just the last one.
1169		 */
1170		if (d40d->lli_current < d40d->lli_len
1171		    && !d40_tx_is_linked(d40c)
1172		    && !d40_residue(d40c)) {
1173			d40_lcla_free_all(d40c, d40d);
1174			d40_desc_load(d40c, d40d);
1175			(void) d40_start(d40c);
1176
1177			if (d40d->lli_current == d40d->lli_len)
1178				d40d->lli_current = 0;
1179		}
1180	} else {
1181		d40_lcla_free_all(d40c, d40d);
1182
1183		if (d40d->lli_current < d40d->lli_len) {
1184			d40_desc_load(d40c, d40d);
1185			/* Start dma job */
1186			(void) d40_start(d40c);
1187			return;
1188		}
1189
1190		if (d40_queue_start(d40c) == NULL)
1191			d40c->busy = false;
 
 
 
 
 
 
 
1192	}
1193
1194	d40c->pending_tx++;
1195	tasklet_schedule(&d40c->tasklet);
1196
1197}
1198
1199static void dma_tasklet(unsigned long data)
1200{
1201	struct d40_chan *d40c = (struct d40_chan *) data;
1202	struct d40_desc *d40d;
1203	unsigned long flags;
1204	dma_async_tx_callback callback;
1205	void *callback_param;
1206
1207	spin_lock_irqsave(&d40c->lock, flags);
1208
1209	/* Get first active entry from list */
1210	d40d = d40_first_active_get(d40c);
1211	if (d40d == NULL)
1212		goto err;
 
 
 
 
1213
1214	if (!d40d->cyclic)
1215		d40c->completed = d40d->txd.cookie;
1216
1217	/*
1218	 * If terminating a channel pending_tx is set to zero.
1219	 * This prevents any finished active jobs to return to the client.
1220	 */
1221	if (d40c->pending_tx == 0) {
1222		spin_unlock_irqrestore(&d40c->lock, flags);
1223		return;
1224	}
1225
1226	/* Callback to client */
1227	callback = d40d->txd.callback;
1228	callback_param = d40d->txd.callback_param;
1229
1230	if (!d40d->cyclic) {
1231		if (async_tx_test_ack(&d40d->txd)) {
1232			d40_desc_remove(d40d);
1233			d40_desc_free(d40c, d40d);
1234		} else {
1235			if (!d40d->is_in_client_list) {
1236				d40_desc_remove(d40d);
1237				d40_lcla_free_all(d40c, d40d);
1238				list_add_tail(&d40d->node, &d40c->client);
1239				d40d->is_in_client_list = true;
1240			}
1241		}
1242	}
1243
1244	d40c->pending_tx--;
1245
1246	if (d40c->pending_tx)
1247		tasklet_schedule(&d40c->tasklet);
1248
1249	spin_unlock_irqrestore(&d40c->lock, flags);
1250
1251	if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1252		callback(callback_param);
1253
1254	return;
1255
1256 err:
1257	/* Rescue manoeuvre if receiving double interrupts */
1258	if (d40c->pending_tx > 0)
1259		d40c->pending_tx--;
1260	spin_unlock_irqrestore(&d40c->lock, flags);
1261}
1262
1263static irqreturn_t d40_handle_interrupt(int irq, void *data)
1264{
1265	static const struct d40_interrupt_lookup il[] = {
1266		{D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
1267		{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1268		{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1269		{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1270		{D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
1271		{D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
1272		{D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
1273		{D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
1274		{D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
1275		{D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
1276	};
1277
1278	int i;
1279	u32 regs[ARRAY_SIZE(il)];
1280	u32 idx;
1281	u32 row;
1282	long chan = -1;
1283	struct d40_chan *d40c;
1284	unsigned long flags;
1285	struct d40_base *base = data;
 
 
 
1286
1287	spin_lock_irqsave(&base->interrupt_lock, flags);
1288
1289	/* Read interrupt status of both logical and physical channels */
1290	for (i = 0; i < ARRAY_SIZE(il); i++)
1291		regs[i] = readl(base->virtbase + il[i].src);
1292
1293	for (;;) {
1294
1295		chan = find_next_bit((unsigned long *)regs,
1296				     BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1297
1298		/* No more set bits found? */
1299		if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1300			break;
1301
1302		row = chan / BITS_PER_LONG;
1303		idx = chan & (BITS_PER_LONG - 1);
1304
1305		/* ACK interrupt */
1306		writel(1 << idx, base->virtbase + il[row].clr);
1307
1308		if (il[row].offset == D40_PHY_CHAN)
1309			d40c = base->lookup_phy_chans[idx];
1310		else
1311			d40c = base->lookup_log_chans[il[row].offset + idx];
 
 
 
 
 
 
 
 
 
 
 
 
1312		spin_lock(&d40c->lock);
1313
1314		if (!il[row].is_error)
1315			dma_tc_handle(d40c);
1316		else
1317			d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1318				chan, il[row].offset, idx);
1319
1320		spin_unlock(&d40c->lock);
1321	}
1322
1323	spin_unlock_irqrestore(&base->interrupt_lock, flags);
1324
1325	return IRQ_HANDLED;
1326}
1327
1328static int d40_validate_conf(struct d40_chan *d40c,
1329			     struct stedma40_chan_cfg *conf)
1330{
1331	int res = 0;
1332	u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1333	u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1334	bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1335
1336	if (!conf->dir) {
1337		chan_err(d40c, "Invalid direction.\n");
1338		res = -EINVAL;
1339	}
1340
1341	if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1342	    d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1343	    d40c->runtime_addr == 0) {
1344
1345		chan_err(d40c, "Invalid TX channel address (%d)\n",
1346			 conf->dst_dev_type);
1347		res = -EINVAL;
1348	}
1349
1350	if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1351	    d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1352	    d40c->runtime_addr == 0) {
1353		chan_err(d40c, "Invalid RX channel address (%d)\n",
1354			conf->src_dev_type);
1355		res = -EINVAL;
1356	}
1357
1358	if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1359	    dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1360		chan_err(d40c, "Invalid dst\n");
1361		res = -EINVAL;
1362	}
1363
1364	if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1365	    src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1366		chan_err(d40c, "Invalid src\n");
1367		res = -EINVAL;
1368	}
1369
1370	if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1371	    dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1372		chan_err(d40c, "No event line\n");
1373		res = -EINVAL;
1374	}
1375
1376	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1377	    (src_event_group != dst_event_group)) {
1378		chan_err(d40c, "Invalid event group\n");
1379		res = -EINVAL;
1380	}
1381
1382	if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1383		/*
1384		 * DMAC HW supports it. Will be added to this driver,
1385		 * in case any dma client requires it.
1386		 */
1387		chan_err(d40c, "periph to periph not supported\n");
1388		res = -EINVAL;
1389	}
1390
1391	if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1392	    (1 << conf->src_info.data_width) !=
1393	    d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1394	    (1 << conf->dst_info.data_width)) {
1395		/*
1396		 * The DMAC hardware only supports
1397		 * src (burst x width) == dst (burst x width)
1398		 */
1399
1400		chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1401		res = -EINVAL;
1402	}
1403
1404	return res;
1405}
1406
1407static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1408			       int log_event_line, bool is_log)
 
1409{
1410	unsigned long flags;
1411	spin_lock_irqsave(&phy->lock, flags);
 
 
 
 
1412	if (!is_log) {
1413		/* Physical interrupts are masked per physical full channel */
1414		if (phy->allocated_src == D40_ALLOC_FREE &&
1415		    phy->allocated_dst == D40_ALLOC_FREE) {
1416			phy->allocated_dst = D40_ALLOC_PHY;
1417			phy->allocated_src = D40_ALLOC_PHY;
1418			goto found;
1419		} else
1420			goto not_found;
1421	}
1422
1423	/* Logical channel */
1424	if (is_src) {
1425		if (phy->allocated_src == D40_ALLOC_PHY)
1426			goto not_found;
1427
1428		if (phy->allocated_src == D40_ALLOC_FREE)
1429			phy->allocated_src = D40_ALLOC_LOG_FREE;
1430
1431		if (!(phy->allocated_src & (1 << log_event_line))) {
1432			phy->allocated_src |= 1 << log_event_line;
1433			goto found;
1434		} else
1435			goto not_found;
1436	} else {
1437		if (phy->allocated_dst == D40_ALLOC_PHY)
1438			goto not_found;
1439
1440		if (phy->allocated_dst == D40_ALLOC_FREE)
1441			phy->allocated_dst = D40_ALLOC_LOG_FREE;
1442
1443		if (!(phy->allocated_dst & (1 << log_event_line))) {
1444			phy->allocated_dst |= 1 << log_event_line;
1445			goto found;
1446		} else
1447			goto not_found;
1448	}
1449
1450not_found:
1451	spin_unlock_irqrestore(&phy->lock, flags);
1452	return false;
1453found:
1454	spin_unlock_irqrestore(&phy->lock, flags);
1455	return true;
1456}
1457
1458static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1459			       int log_event_line)
1460{
1461	unsigned long flags;
1462	bool is_free = false;
1463
1464	spin_lock_irqsave(&phy->lock, flags);
1465	if (!log_event_line) {
1466		phy->allocated_dst = D40_ALLOC_FREE;
1467		phy->allocated_src = D40_ALLOC_FREE;
1468		is_free = true;
1469		goto out;
1470	}
1471
1472	/* Logical channel */
1473	if (is_src) {
1474		phy->allocated_src &= ~(1 << log_event_line);
1475		if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1476			phy->allocated_src = D40_ALLOC_FREE;
1477	} else {
1478		phy->allocated_dst &= ~(1 << log_event_line);
1479		if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1480			phy->allocated_dst = D40_ALLOC_FREE;
1481	}
1482
1483	is_free = ((phy->allocated_src | phy->allocated_dst) ==
1484		   D40_ALLOC_FREE);
1485
1486out:
1487	spin_unlock_irqrestore(&phy->lock, flags);
1488
1489	return is_free;
1490}
1491
1492static int d40_allocate_channel(struct d40_chan *d40c)
1493{
1494	int dev_type;
1495	int event_group;
1496	int event_line;
1497	struct d40_phy_res *phys;
1498	int i;
1499	int j;
1500	int log_num;
 
1501	bool is_src;
1502	bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1503
1504	phys = d40c->base->phy_res;
 
1505
1506	if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1507		dev_type = d40c->dma_cfg.src_dev_type;
1508		log_num = 2 * dev_type;
1509		is_src = true;
1510	} else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1511		   d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1512		/* dst event lines are used for logical memcpy */
1513		dev_type = d40c->dma_cfg.dst_dev_type;
1514		log_num = 2 * dev_type + 1;
1515		is_src = false;
1516	} else
1517		return -EINVAL;
1518
1519	event_group = D40_TYPE_TO_GROUP(dev_type);
1520	event_line = D40_TYPE_TO_EVENT(dev_type);
1521
1522	if (!is_log) {
1523		if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1524			/* Find physical half channel */
1525			for (i = 0; i < d40c->base->num_phy_chans; i++) {
1526
1527				if (d40_alloc_mask_set(&phys[i], is_src,
1528						       0, is_log))
 
1529					goto found_phy;
 
 
 
 
 
 
 
1530			}
1531		} else
1532			for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1533				int phy_num = j  + event_group * 2;
1534				for (i = phy_num; i < phy_num + 2; i++) {
1535					if (d40_alloc_mask_set(&phys[i],
1536							       is_src,
1537							       0,
1538							       is_log))
 
1539						goto found_phy;
1540				}
1541			}
1542		return -EINVAL;
1543found_phy:
1544		d40c->phy_chan = &phys[i];
1545		d40c->log_num = D40_PHY_CHAN;
1546		goto out;
1547	}
1548	if (dev_type == -1)
1549		return -EINVAL;
1550
1551	/* Find logical channel */
1552	for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1553		int phy_num = j + event_group * 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1554		/*
1555		 * Spread logical channels across all available physical rather
1556		 * than pack every logical channel at the first available phy
1557		 * channels.
1558		 */
1559		if (is_src) {
1560			for (i = phy_num; i < phy_num + 2; i++) {
1561				if (d40_alloc_mask_set(&phys[i], is_src,
1562						       event_line, is_log))
 
1563					goto found_log;
1564			}
1565		} else {
1566			for (i = phy_num + 1; i >= phy_num; i--) {
1567				if (d40_alloc_mask_set(&phys[i], is_src,
1568						       event_line, is_log))
 
1569					goto found_log;
1570			}
1571		}
1572	}
1573	return -EINVAL;
1574
1575found_log:
1576	d40c->phy_chan = &phys[i];
1577	d40c->log_num = log_num;
1578out:
1579
1580	if (is_log)
1581		d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1582	else
1583		d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1584
1585	return 0;
1586
1587}
1588
1589static int d40_config_memcpy(struct d40_chan *d40c)
1590{
1591	dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1592
1593	if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1594		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1595		d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1596		d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1597			memcpy[d40c->chan.chan_id];
 
1598
1599	} else if (dma_has_cap(DMA_MEMCPY, cap) &&
1600		   dma_has_cap(DMA_SLAVE, cap)) {
1601		d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
 
 
 
 
 
 
 
 
1602	} else {
1603		chan_err(d40c, "No memcpy\n");
1604		return -EINVAL;
1605	}
1606
1607	return 0;
1608}
1609
1610
1611static int d40_free_dma(struct d40_chan *d40c)
1612{
1613
1614	int res = 0;
1615	u32 event;
1616	struct d40_phy_res *phy = d40c->phy_chan;
1617	bool is_src;
1618
1619	/* Terminate all queued and active transfers */
1620	d40_term_all(d40c);
1621
1622	if (phy == NULL) {
1623		chan_err(d40c, "phy == null\n");
1624		return -EINVAL;
1625	}
1626
1627	if (phy->allocated_src == D40_ALLOC_FREE &&
1628	    phy->allocated_dst == D40_ALLOC_FREE) {
1629		chan_err(d40c, "channel already free\n");
1630		return -EINVAL;
1631	}
1632
1633	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1634	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1635		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1636		is_src = false;
1637	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1638		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1639		is_src = true;
1640	} else {
1641		chan_err(d40c, "Unknown direction\n");
1642		return -EINVAL;
1643	}
1644
1645	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
 
1646	if (res) {
1647		chan_err(d40c, "suspend failed\n");
1648		return res;
1649	}
1650
1651	if (chan_is_logical(d40c)) {
1652		/* Release logical channel, deactivate the event line */
1653
1654		d40_config_set_event(d40c, false);
1655		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
 
 
1656
1657		/*
1658		 * Check if there are more logical allocation
1659		 * on this phy channel.
1660		 */
1661		if (!d40_alloc_mask_free(phy, is_src, event)) {
1662			/* Resume the other logical channels if any */
1663			if (d40_chan_has_events(d40c)) {
1664				res = d40_channel_execute_command(d40c,
1665								  D40_DMA_RUN);
1666				if (res) {
1667					chan_err(d40c,
1668						"Executing RUN command\n");
1669					return res;
1670				}
1671			}
1672			return 0;
1673		}
1674	} else {
1675		(void) d40_alloc_mask_free(phy, is_src, 0);
1676	}
1677
1678	/* Release physical channel */
1679	res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1680	if (res) {
1681		chan_err(d40c, "Failed to stop channel\n");
1682		return res;
1683	}
1684	d40c->phy_chan = NULL;
1685	d40c->configured = false;
1686	d40c->base->lookup_phy_chans[phy->num] = NULL;
1687
1688	return 0;
 
1689}
1690
1691static bool d40_is_paused(struct d40_chan *d40c)
1692{
1693	void __iomem *chanbase = chan_base(d40c);
1694	bool is_paused = false;
1695	unsigned long flags;
1696	void __iomem *active_reg;
1697	u32 status;
1698	u32 event;
1699
1700	spin_lock_irqsave(&d40c->lock, flags);
1701
1702	if (chan_is_physical(d40c)) {
1703		if (d40c->phy_chan->num % 2 == 0)
1704			active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1705		else
1706			active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1707
1708		status = (readl(active_reg) &
1709			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1710			D40_CHAN_POS(d40c->phy_chan->num);
1711		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1712			is_paused = true;
1713
1714		goto _exit;
1715	}
1716
1717	if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1718	    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1719		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1720		status = readl(chanbase + D40_CHAN_REG_SDLNK);
1721	} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1722		event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1723		status = readl(chanbase + D40_CHAN_REG_SSLNK);
1724	} else {
1725		chan_err(d40c, "Unknown direction\n");
1726		goto _exit;
1727	}
1728
1729	status = (status & D40_EVENTLINE_MASK(event)) >>
1730		D40_EVENTLINE_POS(event);
1731
1732	if (status != D40_DMA_RUN)
1733		is_paused = true;
1734_exit:
1735	spin_unlock_irqrestore(&d40c->lock, flags);
1736	return is_paused;
1737
1738}
1739
1740
1741static u32 stedma40_residue(struct dma_chan *chan)
1742{
1743	struct d40_chan *d40c =
1744		container_of(chan, struct d40_chan, chan);
1745	u32 bytes_left;
1746	unsigned long flags;
1747
1748	spin_lock_irqsave(&d40c->lock, flags);
1749	bytes_left = d40_residue(d40c);
1750	spin_unlock_irqrestore(&d40c->lock, flags);
1751
1752	return bytes_left;
1753}
1754
1755static int
1756d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
1757		struct scatterlist *sg_src, struct scatterlist *sg_dst,
1758		unsigned int sg_len, dma_addr_t src_dev_addr,
1759		dma_addr_t dst_dev_addr)
1760{
1761	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1762	struct stedma40_half_channel_info *src_info = &cfg->src_info;
1763	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1764	int ret;
1765
1766	ret = d40_log_sg_to_lli(sg_src, sg_len,
1767				src_dev_addr,
1768				desc->lli_log.src,
1769				chan->log_def.lcsp1,
1770				src_info->data_width,
1771				dst_info->data_width);
1772
1773	ret = d40_log_sg_to_lli(sg_dst, sg_len,
1774				dst_dev_addr,
1775				desc->lli_log.dst,
1776				chan->log_def.lcsp3,
1777				dst_info->data_width,
1778				src_info->data_width);
1779
1780	return ret < 0 ? ret : 0;
1781}
1782
1783static int
1784d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
1785		struct scatterlist *sg_src, struct scatterlist *sg_dst,
1786		unsigned int sg_len, dma_addr_t src_dev_addr,
1787		dma_addr_t dst_dev_addr)
1788{
1789	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1790	struct stedma40_half_channel_info *src_info = &cfg->src_info;
1791	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1792	unsigned long flags = 0;
1793	int ret;
1794
1795	if (desc->cyclic)
1796		flags |= LLI_CYCLIC | LLI_TERM_INT;
1797
1798	ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
1799				desc->lli_phy.src,
1800				virt_to_phys(desc->lli_phy.src),
1801				chan->src_def_cfg,
1802				src_info, dst_info, flags);
1803
1804	ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
1805				desc->lli_phy.dst,
1806				virt_to_phys(desc->lli_phy.dst),
1807				chan->dst_def_cfg,
1808				dst_info, src_info, flags);
1809
1810	dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
1811				   desc->lli_pool.size, DMA_TO_DEVICE);
1812
1813	return ret < 0 ? ret : 0;
1814}
1815
1816
1817static struct d40_desc *
1818d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
1819	      unsigned int sg_len, unsigned long dma_flags)
1820{
1821	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1822	struct d40_desc *desc;
1823	int ret;
1824
1825	desc = d40_desc_get(chan);
1826	if (!desc)
1827		return NULL;
1828
 
1829	desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
1830					cfg->dst_info.data_width);
1831	if (desc->lli_len < 0) {
1832		chan_err(chan, "Unaligned size\n");
1833		goto err;
1834	}
1835
1836	ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
1837	if (ret < 0) {
1838		chan_err(chan, "Could not allocate lli\n");
1839		goto err;
1840	}
1841
1842
1843	desc->lli_current = 0;
1844	desc->txd.flags = dma_flags;
1845	desc->txd.tx_submit = d40_tx_submit;
1846
1847	dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
1848
1849	return desc;
1850
1851err:
1852	d40_desc_free(chan, desc);
1853	return NULL;
1854}
1855
1856static dma_addr_t
1857d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
1858{
1859	struct stedma40_platform_data *plat = chan->base->plat_data;
1860	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1861	dma_addr_t addr = 0;
1862
1863	if (chan->runtime_addr)
1864		return chan->runtime_addr;
1865
1866	if (direction == DMA_FROM_DEVICE)
1867		addr = plat->dev_rx[cfg->src_dev_type];
1868	else if (direction == DMA_TO_DEVICE)
1869		addr = plat->dev_tx[cfg->dst_dev_type];
1870
1871	return addr;
1872}
1873
1874static struct dma_async_tx_descriptor *
1875d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1876	    struct scatterlist *sg_dst, unsigned int sg_len,
1877	    enum dma_data_direction direction, unsigned long dma_flags)
1878{
1879	struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
1880	dma_addr_t src_dev_addr = 0;
1881	dma_addr_t dst_dev_addr = 0;
1882	struct d40_desc *desc;
1883	unsigned long flags;
1884	int ret;
1885
1886	if (!chan->phy_chan) {
1887		chan_err(chan, "Cannot prepare unallocated channel\n");
1888		return NULL;
1889	}
1890
1891
1892	spin_lock_irqsave(&chan->lock, flags);
1893
1894	desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
1895	if (desc == NULL)
1896		goto err;
1897
1898	if (sg_next(&sg_src[sg_len - 1]) == sg_src)
1899		desc->cyclic = true;
1900
1901	if (direction != DMA_NONE) {
1902		dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
1903
1904		if (direction == DMA_FROM_DEVICE)
1905			src_dev_addr = dev_addr;
1906		else if (direction == DMA_TO_DEVICE)
1907			dst_dev_addr = dev_addr;
1908	}
1909
1910	if (chan_is_logical(chan))
1911		ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
1912				      sg_len, src_dev_addr, dst_dev_addr);
1913	else
1914		ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
1915				      sg_len, src_dev_addr, dst_dev_addr);
1916
1917	if (ret) {
1918		chan_err(chan, "Failed to prepare %s sg job: %d\n",
1919			 chan_is_logical(chan) ? "log" : "phy", ret);
1920		goto err;
1921	}
1922
1923	/*
1924	 * add descriptor to the prepare queue in order to be able
1925	 * to free them later in terminate_all
1926	 */
1927	list_add_tail(&desc->node, &chan->prepare_queue);
1928
1929	spin_unlock_irqrestore(&chan->lock, flags);
1930
1931	return &desc->txd;
1932
1933err:
1934	if (desc)
1935		d40_desc_free(chan, desc);
1936	spin_unlock_irqrestore(&chan->lock, flags);
1937	return NULL;
1938}
1939
1940bool stedma40_filter(struct dma_chan *chan, void *data)
1941{
1942	struct stedma40_chan_cfg *info = data;
1943	struct d40_chan *d40c =
1944		container_of(chan, struct d40_chan, chan);
1945	int err;
1946
1947	if (data) {
1948		err = d40_validate_conf(d40c, info);
1949		if (!err)
1950			d40c->dma_cfg = *info;
1951	} else
1952		err = d40_config_memcpy(d40c);
1953
1954	if (!err)
1955		d40c->configured = true;
1956
1957	return err == 0;
1958}
1959EXPORT_SYMBOL(stedma40_filter);
1960
1961static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
1962{
1963	bool realtime = d40c->dma_cfg.realtime;
1964	bool highprio = d40c->dma_cfg.high_priority;
1965	u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
1966	u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
1967	u32 event = D40_TYPE_TO_EVENT(dev_type);
1968	u32 group = D40_TYPE_TO_GROUP(dev_type);
1969	u32 bit = 1 << event;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1970
1971	/* Destination event lines are stored in the upper halfword */
1972	if (!src)
1973		bit <<= 16;
1974
1975	writel(bit, d40c->base->virtbase + prioreg + group * 4);
1976	writel(bit, d40c->base->virtbase + rtreg + group * 4);
1977}
1978
1979static void d40_set_prio_realtime(struct d40_chan *d40c)
1980{
1981	if (d40c->base->rev < 3)
1982		return;
1983
1984	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
1985	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1986		__d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
1987
1988	if ((d40c->dma_cfg.dir ==  STEDMA40_MEM_TO_PERIPH) ||
1989	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1990		__d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1991}
1992
1993/* DMA ENGINE functions */
1994static int d40_alloc_chan_resources(struct dma_chan *chan)
1995{
1996	int err;
1997	unsigned long flags;
1998	struct d40_chan *d40c =
1999		container_of(chan, struct d40_chan, chan);
2000	bool is_free_phy;
2001	spin_lock_irqsave(&d40c->lock, flags);
2002
2003	d40c->completed = chan->cookie = 1;
2004
2005	/* If no dma configuration is set use default configuration (memcpy) */
2006	if (!d40c->configured) {
2007		err = d40_config_memcpy(d40c);
2008		if (err) {
2009			chan_err(d40c, "Failed to configure memcpy channel\n");
2010			goto fail;
2011		}
2012	}
2013	is_free_phy = (d40c->phy_chan == NULL);
2014
2015	err = d40_allocate_channel(d40c);
2016	if (err) {
2017		chan_err(d40c, "Failed to allocate channel\n");
2018		goto fail;
 
2019	}
2020
2021	/* Fill in basic CFG register values */
2022	d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
2023		    &d40c->dst_def_cfg, chan_is_logical(d40c));
2024
2025	d40_set_prio_realtime(d40c);
2026
2027	if (chan_is_logical(d40c)) {
2028		d40_log_cfg(&d40c->dma_cfg,
2029			    &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2030
2031		if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2032			d40c->lcpa = d40c->base->lcpa_base +
2033			  d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
2034		else
2035			d40c->lcpa = d40c->base->lcpa_base +
2036			  d40c->dma_cfg.dst_dev_type *
2037			  D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
 
 
 
 
2038	}
2039
 
 
 
 
 
 
2040	/*
2041	 * Only write channel configuration to the DMA if the physical
2042	 * resource is free. In case of multiple logical channels
2043	 * on the same physical resource, only the first write is necessary.
2044	 */
2045	if (is_free_phy)
2046		d40_config_write(d40c);
2047fail:
 
 
2048	spin_unlock_irqrestore(&d40c->lock, flags);
2049	return err;
2050}
2051
2052static void d40_free_chan_resources(struct dma_chan *chan)
2053{
2054	struct d40_chan *d40c =
2055		container_of(chan, struct d40_chan, chan);
2056	int err;
2057	unsigned long flags;
2058
2059	if (d40c->phy_chan == NULL) {
2060		chan_err(d40c, "Cannot free unallocated channel\n");
2061		return;
2062	}
2063
2064
2065	spin_lock_irqsave(&d40c->lock, flags);
2066
2067	err = d40_free_dma(d40c);
2068
2069	if (err)
2070		chan_err(d40c, "Failed to free channel\n");
2071	spin_unlock_irqrestore(&d40c->lock, flags);
2072}
2073
2074static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2075						       dma_addr_t dst,
2076						       dma_addr_t src,
2077						       size_t size,
2078						       unsigned long dma_flags)
2079{
2080	struct scatterlist dst_sg;
2081	struct scatterlist src_sg;
2082
2083	sg_init_table(&dst_sg, 1);
2084	sg_init_table(&src_sg, 1);
2085
2086	sg_dma_address(&dst_sg) = dst;
2087	sg_dma_address(&src_sg) = src;
2088
2089	sg_dma_len(&dst_sg) = size;
2090	sg_dma_len(&src_sg) = size;
2091
2092	return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
 
2093}
2094
2095static struct dma_async_tx_descriptor *
2096d40_prep_memcpy_sg(struct dma_chan *chan,
2097		   struct scatterlist *dst_sg, unsigned int dst_nents,
2098		   struct scatterlist *src_sg, unsigned int src_nents,
2099		   unsigned long dma_flags)
2100{
2101	if (dst_nents != src_nents)
2102		return NULL;
2103
2104	return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2105}
2106
2107static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2108							 struct scatterlist *sgl,
2109							 unsigned int sg_len,
2110							 enum dma_data_direction direction,
2111							 unsigned long dma_flags)
2112{
2113	if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
2114		return NULL;
2115
2116	return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2117}
2118
2119static struct dma_async_tx_descriptor *
2120dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2121		     size_t buf_len, size_t period_len,
2122		     enum dma_data_direction direction)
2123{
2124	unsigned int periods = buf_len / period_len;
2125	struct dma_async_tx_descriptor *txd;
2126	struct scatterlist *sg;
2127	int i;
2128
2129	sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
 
 
 
2130	for (i = 0; i < periods; i++) {
2131		sg_dma_address(&sg[i]) = dma_addr;
2132		sg_dma_len(&sg[i]) = period_len;
2133		dma_addr += period_len;
2134	}
2135
2136	sg[periods].offset = 0;
2137	sg[periods].length = 0;
2138	sg[periods].page_link =
2139		((unsigned long)sg | 0x01) & ~0x02;
2140
2141	txd = d40_prep_sg(chan, sg, sg, periods, direction,
2142			  DMA_PREP_INTERRUPT);
2143
2144	kfree(sg);
2145
2146	return txd;
2147}
2148
2149static enum dma_status d40_tx_status(struct dma_chan *chan,
2150				     dma_cookie_t cookie,
2151				     struct dma_tx_state *txstate)
2152{
2153	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2154	dma_cookie_t last_used;
2155	dma_cookie_t last_complete;
2156	int ret;
2157
2158	if (d40c->phy_chan == NULL) {
2159		chan_err(d40c, "Cannot read status of unallocated channel\n");
2160		return -EINVAL;
2161	}
2162
2163	last_complete = d40c->completed;
2164	last_used = chan->cookie;
 
2165
2166	if (d40_is_paused(d40c))
2167		ret = DMA_PAUSED;
2168	else
2169		ret = dma_async_is_complete(cookie, last_complete, last_used);
2170
2171	dma_set_tx_state(txstate, last_complete, last_used,
2172			 stedma40_residue(chan));
2173
2174	return ret;
2175}
2176
2177static void d40_issue_pending(struct dma_chan *chan)
2178{
2179	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2180	unsigned long flags;
2181
2182	if (d40c->phy_chan == NULL) {
2183		chan_err(d40c, "Channel is not allocated!\n");
2184		return;
2185	}
2186
2187	spin_lock_irqsave(&d40c->lock, flags);
2188
2189	list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2190
2191	/* Busy means that queued jobs are already being processed */
2192	if (!d40c->busy)
2193		(void) d40_queue_start(d40c);
2194
2195	spin_unlock_irqrestore(&d40c->lock, flags);
2196}
2197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2198static int
2199dma40_config_to_halfchannel(struct d40_chan *d40c,
2200			    struct stedma40_half_channel_info *info,
2201			    enum dma_slave_buswidth width,
2202			    u32 maxburst)
2203{
2204	enum stedma40_periph_data_width addr_width;
2205	int psize;
2206
2207	switch (width) {
2208	case DMA_SLAVE_BUSWIDTH_1_BYTE:
2209		addr_width = STEDMA40_BYTE_WIDTH;
2210		break;
2211	case DMA_SLAVE_BUSWIDTH_2_BYTES:
2212		addr_width = STEDMA40_HALFWORD_WIDTH;
2213		break;
2214	case DMA_SLAVE_BUSWIDTH_4_BYTES:
2215		addr_width = STEDMA40_WORD_WIDTH;
2216		break;
2217	case DMA_SLAVE_BUSWIDTH_8_BYTES:
2218		addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2219		break;
2220	default:
2221		dev_err(d40c->base->dev,
2222			"illegal peripheral address width "
2223			"requested (%d)\n",
2224			width);
2225		return -EINVAL;
2226	}
2227
2228	if (chan_is_logical(d40c)) {
2229		if (maxburst >= 16)
2230			psize = STEDMA40_PSIZE_LOG_16;
2231		else if (maxburst >= 8)
2232			psize = STEDMA40_PSIZE_LOG_8;
2233		else if (maxburst >= 4)
2234			psize = STEDMA40_PSIZE_LOG_4;
2235		else
2236			psize = STEDMA40_PSIZE_LOG_1;
2237	} else {
2238		if (maxburst >= 16)
2239			psize = STEDMA40_PSIZE_PHY_16;
2240		else if (maxburst >= 8)
2241			psize = STEDMA40_PSIZE_PHY_8;
2242		else if (maxburst >= 4)
2243			psize = STEDMA40_PSIZE_PHY_4;
2244		else
2245			psize = STEDMA40_PSIZE_PHY_1;
2246	}
2247
2248	info->data_width = addr_width;
2249	info->psize = psize;
2250	info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2251
2252	return 0;
2253}
2254
2255/* Runtime reconfiguration extension */
2256static int d40_set_runtime_config(struct dma_chan *chan,
2257				  struct dma_slave_config *config)
2258{
2259	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2260	struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2261	enum dma_slave_buswidth src_addr_width, dst_addr_width;
2262	dma_addr_t config_addr;
2263	u32 src_maxburst, dst_maxburst;
2264	int ret;
2265
 
 
 
 
 
2266	src_addr_width = config->src_addr_width;
2267	src_maxburst = config->src_maxburst;
2268	dst_addr_width = config->dst_addr_width;
2269	dst_maxburst = config->dst_maxburst;
2270
2271	if (config->direction == DMA_FROM_DEVICE) {
2272		dma_addr_t dev_addr_rx =
2273			d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2274
2275		config_addr = config->src_addr;
2276		if (dev_addr_rx)
2277			dev_dbg(d40c->base->dev,
2278				"channel has a pre-wired RX address %08x "
2279				"overriding with %08x\n",
2280				dev_addr_rx, config_addr);
2281		if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2282			dev_dbg(d40c->base->dev,
2283				"channel was not configured for peripheral "
2284				"to memory transfer (%d) overriding\n",
2285				cfg->dir);
2286		cfg->dir = STEDMA40_PERIPH_TO_MEM;
2287
2288		/* Configure the memory side */
2289		if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2290			dst_addr_width = src_addr_width;
2291		if (dst_maxburst == 0)
2292			dst_maxburst = src_maxburst;
2293
2294	} else if (config->direction == DMA_TO_DEVICE) {
2295		dma_addr_t dev_addr_tx =
2296			d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2297
2298		config_addr = config->dst_addr;
2299		if (dev_addr_tx)
2300			dev_dbg(d40c->base->dev,
2301				"channel has a pre-wired TX address %08x "
2302				"overriding with %08x\n",
2303				dev_addr_tx, config_addr);
2304		if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2305			dev_dbg(d40c->base->dev,
2306				"channel was not configured for memory "
2307				"to peripheral transfer (%d) overriding\n",
2308				cfg->dir);
2309		cfg->dir = STEDMA40_MEM_TO_PERIPH;
2310
2311		/* Configure the memory side */
2312		if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2313			src_addr_width = dst_addr_width;
2314		if (src_maxburst == 0)
2315			src_maxburst = dst_maxburst;
2316	} else {
2317		dev_err(d40c->base->dev,
2318			"unrecognized channel direction %d\n",
2319			config->direction);
2320		return -EINVAL;
2321	}
2322
 
 
 
 
 
2323	if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2324		dev_err(d40c->base->dev,
2325			"src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2326			src_maxburst,
2327			src_addr_width,
2328			dst_maxburst,
2329			dst_addr_width);
2330		return -EINVAL;
2331	}
2332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2333	ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2334					  src_addr_width,
2335					  src_maxburst);
2336	if (ret)
2337		return ret;
2338
2339	ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2340					  dst_addr_width,
2341					  dst_maxburst);
2342	if (ret)
2343		return ret;
2344
2345	/* Fill in register values */
2346	if (chan_is_logical(d40c))
2347		d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2348	else
2349		d40_phy_cfg(cfg, &d40c->src_def_cfg,
2350			    &d40c->dst_def_cfg, false);
2351
2352	/* These settings will take precedence later */
2353	d40c->runtime_addr = config_addr;
2354	d40c->runtime_direction = config->direction;
2355	dev_dbg(d40c->base->dev,
2356		"configured channel %s for %s, data width %d/%d, "
2357		"maxburst %d/%d elements, LE, no flow control\n",
2358		dma_chan_name(chan),
2359		(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2360		src_addr_width, dst_addr_width,
2361		src_maxburst, dst_maxburst);
2362
2363	return 0;
2364}
2365
2366static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2367		       unsigned long arg)
2368{
2369	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2370
2371	if (d40c->phy_chan == NULL) {
2372		chan_err(d40c, "Channel is not allocated!\n");
2373		return -EINVAL;
2374	}
2375
2376	switch (cmd) {
2377	case DMA_TERMINATE_ALL:
2378		return d40_terminate_all(d40c);
2379	case DMA_PAUSE:
2380		return d40_pause(d40c);
2381	case DMA_RESUME:
2382		return d40_resume(d40c);
2383	case DMA_SLAVE_CONFIG:
2384		return d40_set_runtime_config(chan,
2385			(struct dma_slave_config *) arg);
2386	default:
2387		break;
2388	}
2389
2390	/* Other commands are unimplemented */
2391	return -ENXIO;
2392}
2393
2394/* Initialization functions */
2395
2396static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2397				 struct d40_chan *chans, int offset,
2398				 int num_chans)
2399{
2400	int i = 0;
2401	struct d40_chan *d40c;
2402
2403	INIT_LIST_HEAD(&dma->channels);
2404
2405	for (i = offset; i < offset + num_chans; i++) {
2406		d40c = &chans[i];
2407		d40c->base = base;
2408		d40c->chan.device = dma;
2409
2410		spin_lock_init(&d40c->lock);
2411
2412		d40c->log_num = D40_PHY_CHAN;
2413
 
2414		INIT_LIST_HEAD(&d40c->active);
2415		INIT_LIST_HEAD(&d40c->queue);
2416		INIT_LIST_HEAD(&d40c->pending_queue);
2417		INIT_LIST_HEAD(&d40c->client);
2418		INIT_LIST_HEAD(&d40c->prepare_queue);
2419
2420		tasklet_init(&d40c->tasklet, dma_tasklet,
2421			     (unsigned long) d40c);
2422
2423		list_add_tail(&d40c->chan.device_node,
2424			      &dma->channels);
2425	}
2426}
2427
2428static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2429{
2430	if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2431		dev->device_prep_slave_sg = d40_prep_slave_sg;
 
 
2432
2433	if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2434		dev->device_prep_dma_memcpy = d40_prep_memcpy;
2435
2436		/*
2437		 * This controller can only access address at even
2438		 * 32bit boundaries, i.e. 2^2
2439		 */
2440		dev->copy_align = 2;
2441	}
2442
2443	if (dma_has_cap(DMA_SG, dev->cap_mask))
2444		dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2445
2446	if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2447		dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2448
2449	dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2450	dev->device_free_chan_resources = d40_free_chan_resources;
2451	dev->device_issue_pending = d40_issue_pending;
2452	dev->device_tx_status = d40_tx_status;
2453	dev->device_control = d40_control;
 
 
 
 
2454	dev->dev = base->dev;
2455}
2456
2457static int __init d40_dmaengine_init(struct d40_base *base,
2458				     int num_reserved_chans)
2459{
2460	int err ;
2461
2462	d40_chan_init(base, &base->dma_slave, base->log_chans,
2463		      0, base->num_log_chans);
2464
2465	dma_cap_zero(base->dma_slave.cap_mask);
2466	dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2467	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2468
2469	d40_ops_init(base, &base->dma_slave);
2470
2471	err = dma_async_device_register(&base->dma_slave);
2472
2473	if (err) {
2474		d40_err(base->dev, "Failed to register slave channels\n");
2475		goto failure1;
2476	}
2477
2478	d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2479		      base->num_log_chans, base->plat_data->memcpy_len);
2480
2481	dma_cap_zero(base->dma_memcpy.cap_mask);
2482	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2483	dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2484
2485	d40_ops_init(base, &base->dma_memcpy);
2486
2487	err = dma_async_device_register(&base->dma_memcpy);
2488
2489	if (err) {
2490		d40_err(base->dev,
2491			"Failed to regsiter memcpy only channels\n");
2492		goto failure2;
2493	}
2494
2495	d40_chan_init(base, &base->dma_both, base->phy_chans,
2496		      0, num_reserved_chans);
2497
2498	dma_cap_zero(base->dma_both.cap_mask);
2499	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2500	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2501	dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2502	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2503
2504	d40_ops_init(base, &base->dma_both);
2505	err = dma_async_device_register(&base->dma_both);
2506
2507	if (err) {
2508		d40_err(base->dev,
2509			"Failed to register logical and physical capable channels\n");
2510		goto failure3;
2511	}
2512	return 0;
2513failure3:
2514	dma_async_device_unregister(&base->dma_memcpy);
2515failure2:
2516	dma_async_device_unregister(&base->dma_slave);
2517failure1:
2518	return err;
2519}
2520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2521/* Initialization functions. */
2522
2523static int __init d40_phy_res_init(struct d40_base *base)
2524{
2525	int i;
2526	int num_phy_chans_avail = 0;
2527	u32 val[2];
2528	int odd_even_bit = -2;
 
2529
2530	val[0] = readl(base->virtbase + D40_DREG_PRSME);
2531	val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2532
2533	for (i = 0; i < base->num_phy_chans; i++) {
2534		base->phy_res[i].num = i;
2535		odd_even_bit += 2 * ((i % 2) == 0);
2536		if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2537			/* Mark security only channels as occupied */
2538			base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2539			base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
 
 
 
 
 
 
 
2540		} else {
2541			base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2542			base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
 
2543			num_phy_chans_avail++;
2544		}
2545		spin_lock_init(&base->phy_res[i].lock);
2546	}
2547
2548	/* Mark disabled channels as occupied */
2549	for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2550		int chan = base->plat_data->disabled_channels[i];
2551
2552		base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
2553		base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
 
 
 
 
 
2554		num_phy_chans_avail--;
2555	}
2556
 
 
 
 
 
 
 
2557	dev_info(base->dev, "%d of %d physical DMA channels available\n",
2558		 num_phy_chans_avail, base->num_phy_chans);
2559
2560	/* Verify settings extended vs standard */
2561	val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2562
2563	for (i = 0; i < base->num_phy_chans; i++) {
2564
2565		if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2566		    (val[0] & 0x3) != 1)
2567			dev_info(base->dev,
2568				 "[%s] INFO: channel %d is misconfigured (%d)\n",
2569				 __func__, i, val[0] & 0x3);
2570
2571		val[0] = val[0] >> 2;
2572	}
2573
 
 
 
 
 
 
 
 
 
2574	return num_phy_chans_avail;
2575}
2576
2577static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2578{
2579	struct stedma40_platform_data *plat_data;
2580	struct clk *clk = NULL;
2581	void __iomem *virtbase = NULL;
2582	struct resource *res = NULL;
2583	struct d40_base *base = NULL;
2584	int num_log_chans = 0;
2585	int num_phy_chans;
 
 
2586	int i;
2587	u32 pid;
2588	u32 cid;
2589	u8 rev;
2590
2591	clk = clk_get(&pdev->dev, NULL);
2592
2593	if (IS_ERR(clk)) {
2594		d40_err(&pdev->dev, "No matching clock found\n");
2595		goto failure;
2596	}
2597
2598	clk_enable(clk);
 
 
 
 
2599
2600	/* Get IO for DMAC base address */
2601	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2602	if (!res)
2603		goto failure;
2604
2605	if (request_mem_region(res->start, resource_size(res),
2606			       D40_NAME " I/O base") == NULL)
2607		goto failure;
2608
2609	virtbase = ioremap(res->start, resource_size(res));
2610	if (!virtbase)
2611		goto failure;
2612
2613	/* This is just a regular AMBA PrimeCell ID actually */
2614	for (pid = 0, i = 0; i < 4; i++)
2615		pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
2616			& 255) << (i * 8);
2617	for (cid = 0, i = 0; i < 4; i++)
2618		cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
2619			& 255) << (i * 8);
2620
2621	if (cid != AMBA_CID) {
2622		d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
2623		goto failure;
2624	}
2625	if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
2626		d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2627			AMBA_MANF_BITS(pid),
2628			AMBA_VENDOR_ST);
2629		goto failure;
2630	}
2631	/*
2632	 * HW revision:
2633	 * DB8500ed has revision 0
2634	 * ? has revision 1
2635	 * DB8500v1 has revision 2
2636	 * DB8500v2 has revision 3
 
 
2637	 */
2638	rev = AMBA_REV_BITS(pid);
 
 
 
 
2639
2640	/* The number of physical channels on this HW */
2641	num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2642
2643	dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2644		 rev, res->start);
2645
2646	plat_data = pdev->dev.platform_data;
 
 
 
 
2647
2648	/* Count the number of logical channels in use */
2649	for (i = 0; i < plat_data->dev_len; i++)
2650		if (plat_data->dev_rx[i] != 0)
2651			num_log_chans++;
2652
2653	for (i = 0; i < plat_data->dev_len; i++)
2654		if (plat_data->dev_tx[i] != 0)
2655			num_log_chans++;
2656
2657	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2658		       (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2659		       sizeof(struct d40_chan), GFP_KERNEL);
2660
2661	if (base == NULL) {
2662		d40_err(&pdev->dev, "Out of memory\n");
2663		goto failure;
2664	}
2665
2666	base->rev = rev;
2667	base->clk = clk;
 
2668	base->num_phy_chans = num_phy_chans;
2669	base->num_log_chans = num_log_chans;
2670	base->phy_start = res->start;
2671	base->phy_size = resource_size(res);
2672	base->virtbase = virtbase;
2673	base->plat_data = plat_data;
2674	base->dev = &pdev->dev;
2675	base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2676	base->log_chans = &base->phy_chans[num_phy_chans];
2677
2678	base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2679				GFP_KERNEL);
2680	if (!base->phy_res)
2681		goto failure;
2682
2683	base->lookup_phy_chans = kzalloc(num_phy_chans *
2684					 sizeof(struct d40_chan *),
2685					 GFP_KERNEL);
2686	if (!base->lookup_phy_chans)
2687		goto failure;
2688
2689	if (num_log_chans + plat_data->memcpy_len) {
2690		/*
2691		 * The max number of logical channels are event lines for all
2692		 * src devices and dst devices
2693		 */
2694		base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2695						 sizeof(struct d40_chan *),
2696						 GFP_KERNEL);
2697		if (!base->lookup_log_chans)
2698			goto failure;
2699	}
2700
2701	base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
2702					    sizeof(struct d40_desc *) *
2703					    D40_LCLA_LINK_PER_EVENT_GRP,
 
 
 
 
 
 
2704					    GFP_KERNEL);
2705	if (!base->lcla_pool.alloc_map)
2706		goto failure;
2707
2708	base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2709					    0, SLAB_HWCACHE_ALIGN,
2710					    NULL);
2711	if (base->desc_slab == NULL)
2712		goto failure;
2713
2714	return base;
2715
2716failure:
2717	if (!IS_ERR(clk)) {
2718		clk_disable(clk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2719		clk_put(clk);
2720	}
2721	if (virtbase)
2722		iounmap(virtbase);
2723	if (res)
2724		release_mem_region(res->start,
2725				   resource_size(res));
2726	if (virtbase)
2727		iounmap(virtbase);
2728
2729	if (base) {
2730		kfree(base->lcla_pool.alloc_map);
2731		kfree(base->lookup_log_chans);
2732		kfree(base->lookup_phy_chans);
2733		kfree(base->phy_res);
2734		kfree(base);
2735	}
2736
2737	return NULL;
2738}
2739
2740static void __init d40_hw_init(struct d40_base *base)
2741{
2742
2743	static const struct d40_reg_val dma_init_reg[] = {
2744		/* Clock every part of the DMA block from start */
2745		{ .reg = D40_DREG_GCC,    .val = 0x0000ff01},
2746
2747		/* Interrupts on all logical channels */
2748		{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2749		{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2750		{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2751		{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2752		{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2753		{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2754		{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2755		{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2756		{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2757		{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2758		{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2759		{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2760	};
2761	int i;
2762	u32 prmseo[2] = {0, 0};
2763	u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2764	u32 pcmis = 0;
2765	u32 pcicr = 0;
 
 
2766
2767	for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2768		writel(dma_init_reg[i].val,
2769		       base->virtbase + dma_init_reg[i].reg);
2770
2771	/* Configure all our dma channels to default settings */
2772	for (i = 0; i < base->num_phy_chans; i++) {
2773
2774		activeo[i % 2] = activeo[i % 2] << 2;
2775
2776		if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2777		    == D40_ALLOC_PHY) {
2778			activeo[i % 2] |= 3;
2779			continue;
2780		}
2781
2782		/* Enable interrupt # */
2783		pcmis = (pcmis << 1) | 1;
2784
2785		/* Clear interrupt # */
2786		pcicr = (pcicr << 1) | 1;
2787
2788		/* Set channel to physical mode */
2789		prmseo[i % 2] = prmseo[i % 2] << 2;
2790		prmseo[i % 2] |= 1;
2791
2792	}
2793
2794	writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2795	writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2796	writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2797	writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2798
2799	/* Write which interrupt to enable */
2800	writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2801
2802	/* Write which interrupt to clear */
2803	writel(pcicr, base->virtbase + D40_DREG_PCICR);
2804
 
 
 
2805}
2806
2807static int __init d40_lcla_allocate(struct d40_base *base)
2808{
2809	struct d40_lcla_pool *pool = &base->lcla_pool;
2810	unsigned long *page_list;
2811	int i, j;
2812	int ret = 0;
2813
2814	/*
2815	 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2816	 * To full fill this hardware requirement without wasting 256 kb
2817	 * we allocate pages until we get an aligned one.
2818	 */
2819	page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2820			    GFP_KERNEL);
2821
2822	if (!page_list) {
2823		ret = -ENOMEM;
2824		goto failure;
2825	}
2826
2827	/* Calculating how many pages that are required */
2828	base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2829
2830	for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2831		page_list[i] = __get_free_pages(GFP_KERNEL,
2832						base->lcla_pool.pages);
2833		if (!page_list[i]) {
2834
2835			d40_err(base->dev, "Failed to allocate %d pages.\n",
2836				base->lcla_pool.pages);
 
2837
2838			for (j = 0; j < i; j++)
2839				free_pages(page_list[j], base->lcla_pool.pages);
2840			goto failure;
2841		}
2842
2843		if ((virt_to_phys((void *)page_list[i]) &
2844		     (LCLA_ALIGNMENT - 1)) == 0)
2845			break;
2846	}
2847
2848	for (j = 0; j < i; j++)
2849		free_pages(page_list[j], base->lcla_pool.pages);
2850
2851	if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2852		base->lcla_pool.base = (void *)page_list[i];
2853	} else {
2854		/*
2855		 * After many attempts and no succees with finding the correct
2856		 * alignment, try with allocating a big buffer.
2857		 */
2858		dev_warn(base->dev,
2859			 "[%s] Failed to get %d pages @ 18 bit align.\n",
2860			 __func__, base->lcla_pool.pages);
2861		base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2862							 base->num_phy_chans +
2863							 LCLA_ALIGNMENT,
2864							 GFP_KERNEL);
2865		if (!base->lcla_pool.base_unaligned) {
2866			ret = -ENOMEM;
2867			goto failure;
2868		}
2869
2870		base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2871						 LCLA_ALIGNMENT);
2872	}
2873
2874	pool->dma_addr = dma_map_single(base->dev, pool->base,
2875					SZ_1K * base->num_phy_chans,
2876					DMA_TO_DEVICE);
2877	if (dma_mapping_error(base->dev, pool->dma_addr)) {
2878		pool->dma_addr = 0;
2879		ret = -ENOMEM;
2880		goto failure;
2881	}
2882
2883	writel(virt_to_phys(base->lcla_pool.base),
2884	       base->virtbase + D40_DREG_LCLA);
2885failure:
 
2886	kfree(page_list);
2887	return ret;
2888}
2889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2890static int __init d40_probe(struct platform_device *pdev)
2891{
2892	int err;
 
2893	int ret = -ENOENT;
2894	struct d40_base *base;
2895	struct resource *res = NULL;
2896	int num_reserved_chans;
2897	u32 val;
2898
2899	base = d40_hw_detect_init(pdev);
 
 
 
 
 
 
 
 
 
 
2900
 
2901	if (!base)
2902		goto failure;
2903
2904	num_reserved_chans = d40_phy_res_init(base);
2905
2906	platform_set_drvdata(pdev, base);
2907
2908	spin_lock_init(&base->interrupt_lock);
2909	spin_lock_init(&base->execmd_lock);
2910
2911	/* Get IO for logical channel parameter address */
2912	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2913	if (!res) {
2914		ret = -ENOENT;
2915		d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
2916		goto failure;
2917	}
2918	base->lcpa_size = resource_size(res);
2919	base->phy_lcpa = res->start;
2920
2921	if (request_mem_region(res->start, resource_size(res),
2922			       D40_NAME " I/O lcpa") == NULL) {
2923		ret = -EBUSY;
2924		d40_err(&pdev->dev,
2925			"Failed to request LCPA region 0x%x-0x%x\n",
2926			res->start, res->end);
2927		goto failure;
2928	}
2929
2930	/* We make use of ESRAM memory for this. */
2931	val = readl(base->virtbase + D40_DREG_LCPA);
2932	if (res->start != val && val != 0) {
2933		dev_warn(&pdev->dev,
2934			 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2935			 __func__, val, res->start);
2936	} else
2937		writel(res->start, base->virtbase + D40_DREG_LCPA);
2938
2939	base->lcpa_base = ioremap(res->start, resource_size(res));
2940	if (!base->lcpa_base) {
2941		ret = -ENOMEM;
2942		d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2943		goto failure;
2944	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2945
2946	ret = d40_lcla_allocate(base);
2947	if (ret) {
2948		d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
2949		goto failure;
 
 
2950	}
2951
2952	spin_lock_init(&base->lcla_pool.lock);
2953
2954	base->irq = platform_get_irq(pdev, 0);
2955
2956	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2957	if (ret) {
2958		d40_err(&pdev->dev, "No IRQ defined\n");
2959		goto failure;
2960	}
2961
2962	err = d40_dmaengine_init(base, num_reserved_chans);
2963	if (err)
2964		goto failure;
2965
2966	d40_hw_init(base);
 
 
 
 
 
 
2967
2968	dev_info(base->dev, "initialized\n");
2969	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2970
2971failure:
2972	if (base) {
2973		if (base->desc_slab)
2974			kmem_cache_destroy(base->desc_slab);
2975		if (base->virtbase)
2976			iounmap(base->virtbase);
2977
2978		if (base->lcla_pool.dma_addr)
2979			dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
2980					 SZ_1K * base->num_phy_chans,
2981					 DMA_TO_DEVICE);
2982
2983		if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2984			free_pages((unsigned long)base->lcla_pool.base,
2985				   base->lcla_pool.pages);
2986
2987		kfree(base->lcla_pool.base_unaligned);
2988
2989		if (base->phy_lcpa)
2990			release_mem_region(base->phy_lcpa,
2991					   base->lcpa_size);
2992		if (base->phy_start)
2993			release_mem_region(base->phy_start,
2994					   base->phy_size);
2995		if (base->clk) {
2996			clk_disable(base->clk);
2997			clk_put(base->clk);
2998		}
2999
3000		kfree(base->lcla_pool.alloc_map);
3001		kfree(base->lookup_log_chans);
3002		kfree(base->lookup_phy_chans);
3003		kfree(base->phy_res);
3004		kfree(base);
3005	}
3006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3007	d40_err(&pdev->dev, "probe failed\n");
3008	return ret;
3009}
3010
 
 
 
 
 
3011static struct platform_driver d40_driver = {
3012	.driver = {
3013		.owner = THIS_MODULE,
3014		.name  = D40_NAME,
 
 
3015	},
3016};
3017
3018static int __init stedma40_init(void)
3019{
3020	return platform_driver_probe(&d40_driver, d40_probe);
3021}
3022subsys_initcall(stedma40_init);
v4.17
   1/*
   2 * Copyright (C) Ericsson AB 2007-2008
   3 * Copyright (C) ST-Ericsson SA 2008-2010
   4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
   5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
   6 * License terms: GNU General Public License (GPL) version 2
   7 */
   8
   9#include <linux/dma-mapping.h>
  10#include <linux/kernel.h>
  11#include <linux/slab.h>
  12#include <linux/export.h>
  13#include <linux/dmaengine.h>
  14#include <linux/platform_device.h>
  15#include <linux/clk.h>
  16#include <linux/delay.h>
  17#include <linux/log2.h>
  18#include <linux/pm.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/err.h>
  21#include <linux/of.h>
  22#include <linux/of_dma.h>
  23#include <linux/amba/bus.h>
  24#include <linux/regulator/consumer.h>
  25#include <linux/platform_data/dma-ste-dma40.h>
  26
  27#include "dmaengine.h"
 
  28#include "ste_dma40_ll.h"
  29
  30#define D40_NAME "dma40"
  31
  32#define D40_PHY_CHAN -1
  33
  34/* For masking out/in 2 bit channel positions */
  35#define D40_CHAN_POS(chan)  (2 * (chan / 2))
  36#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
  37
  38/* Maximum iterations taken before giving up suspending a channel */
  39#define D40_SUSPEND_MAX_IT 500
  40
  41/* Milliseconds */
  42#define DMA40_AUTOSUSPEND_DELAY	100
  43
  44/* Hardware requirement on LCLA alignment */
  45#define LCLA_ALIGNMENT 0x40000
  46
  47/* Max number of links per event group */
  48#define D40_LCLA_LINK_PER_EVENT_GRP 128
  49#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
  50
  51/* Max number of logical channels per physical channel */
  52#define D40_MAX_LOG_CHAN_PER_PHY 32
  53
  54/* Attempts before giving up to trying to get pages that are aligned */
  55#define MAX_LCLA_ALLOC_ATTEMPTS 256
  56
  57/* Bit markings for allocation map */
  58#define D40_ALLOC_FREE		BIT(31)
  59#define D40_ALLOC_PHY		BIT(30)
  60#define D40_ALLOC_LOG_FREE	0
  61
  62#define D40_MEMCPY_MAX_CHANS	8
  63
  64/* Reserved event lines for memcpy only. */
  65#define DB8500_DMA_MEMCPY_EV_0	51
  66#define DB8500_DMA_MEMCPY_EV_1	56
  67#define DB8500_DMA_MEMCPY_EV_2	57
  68#define DB8500_DMA_MEMCPY_EV_3	58
  69#define DB8500_DMA_MEMCPY_EV_4	59
  70#define DB8500_DMA_MEMCPY_EV_5	60
  71
  72static int dma40_memcpy_channels[] = {
  73	DB8500_DMA_MEMCPY_EV_0,
  74	DB8500_DMA_MEMCPY_EV_1,
  75	DB8500_DMA_MEMCPY_EV_2,
  76	DB8500_DMA_MEMCPY_EV_3,
  77	DB8500_DMA_MEMCPY_EV_4,
  78	DB8500_DMA_MEMCPY_EV_5,
  79};
  80
  81/* Default configuration for physcial memcpy */
  82static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
  83	.mode = STEDMA40_MODE_PHYSICAL,
  84	.dir = DMA_MEM_TO_MEM,
  85
  86	.src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
  87	.src_info.psize = STEDMA40_PSIZE_PHY_1,
  88	.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  89
  90	.dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
  91	.dst_info.psize = STEDMA40_PSIZE_PHY_1,
  92	.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
  93};
  94
  95/* Default configuration for logical memcpy */
  96static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
  97	.mode = STEDMA40_MODE_LOGICAL,
  98	.dir = DMA_MEM_TO_MEM,
  99
 100	.src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 101	.src_info.psize = STEDMA40_PSIZE_LOG_1,
 102	.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
 103
 104	.dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 105	.dst_info.psize = STEDMA40_PSIZE_LOG_1,
 106	.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
 107};
 108
 109/**
 110 * enum 40_command - The different commands and/or statuses.
 111 *
 112 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
 113 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
 114 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
 115 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
 116 */
 117enum d40_command {
 118	D40_DMA_STOP		= 0,
 119	D40_DMA_RUN		= 1,
 120	D40_DMA_SUSPEND_REQ	= 2,
 121	D40_DMA_SUSPENDED	= 3
 122};
 123
 124/*
 125 * enum d40_events - The different Event Enables for the event lines.
 126 *
 127 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
 128 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
 129 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
 130 * @D40_ROUND_EVENTLINE: Status check for event line.
 131 */
 132
 133enum d40_events {
 134	D40_DEACTIVATE_EVENTLINE	= 0,
 135	D40_ACTIVATE_EVENTLINE		= 1,
 136	D40_SUSPEND_REQ_EVENTLINE	= 2,
 137	D40_ROUND_EVENTLINE		= 3
 138};
 139
 140/*
 141 * These are the registers that has to be saved and later restored
 142 * when the DMA hw is powered off.
 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
 144 */
 145static u32 d40_backup_regs[] = {
 146	D40_DREG_LCPA,
 147	D40_DREG_LCLA,
 148	D40_DREG_PRMSE,
 149	D40_DREG_PRMSO,
 150	D40_DREG_PRMOE,
 151	D40_DREG_PRMOO,
 152};
 153
 154#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
 155
 156/*
 157 * since 9540 and 8540 has the same HW revision
 158 * use v4a for 9540 or ealier
 159 * use v4b for 8540 or later
 160 * HW revision:
 161 * DB8500ed has revision 0
 162 * DB8500v1 has revision 2
 163 * DB8500v2 has revision 3
 164 * AP9540v1 has revision 4
 165 * DB8540v1 has revision 4
 166 * TODO: Check if all these registers have to be saved/restored on dma40 v4a
 167 */
 168static u32 d40_backup_regs_v4a[] = {
 169	D40_DREG_PSEG1,
 170	D40_DREG_PSEG2,
 171	D40_DREG_PSEG3,
 172	D40_DREG_PSEG4,
 173	D40_DREG_PCEG1,
 174	D40_DREG_PCEG2,
 175	D40_DREG_PCEG3,
 176	D40_DREG_PCEG4,
 177	D40_DREG_RSEG1,
 178	D40_DREG_RSEG2,
 179	D40_DREG_RSEG3,
 180	D40_DREG_RSEG4,
 181	D40_DREG_RCEG1,
 182	D40_DREG_RCEG2,
 183	D40_DREG_RCEG3,
 184	D40_DREG_RCEG4,
 185};
 186
 187#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
 188
 189static u32 d40_backup_regs_v4b[] = {
 190	D40_DREG_CPSEG1,
 191	D40_DREG_CPSEG2,
 192	D40_DREG_CPSEG3,
 193	D40_DREG_CPSEG4,
 194	D40_DREG_CPSEG5,
 195	D40_DREG_CPCEG1,
 196	D40_DREG_CPCEG2,
 197	D40_DREG_CPCEG3,
 198	D40_DREG_CPCEG4,
 199	D40_DREG_CPCEG5,
 200	D40_DREG_CRSEG1,
 201	D40_DREG_CRSEG2,
 202	D40_DREG_CRSEG3,
 203	D40_DREG_CRSEG4,
 204	D40_DREG_CRSEG5,
 205	D40_DREG_CRCEG1,
 206	D40_DREG_CRCEG2,
 207	D40_DREG_CRCEG3,
 208	D40_DREG_CRCEG4,
 209	D40_DREG_CRCEG5,
 210};
 211
 212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
 213
 214static u32 d40_backup_regs_chan[] = {
 215	D40_CHAN_REG_SSCFG,
 216	D40_CHAN_REG_SSELT,
 217	D40_CHAN_REG_SSPTR,
 218	D40_CHAN_REG_SSLNK,
 219	D40_CHAN_REG_SDCFG,
 220	D40_CHAN_REG_SDELT,
 221	D40_CHAN_REG_SDPTR,
 222	D40_CHAN_REG_SDLNK,
 223};
 224
 225#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
 226			     BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
 227
 228/**
 229 * struct d40_interrupt_lookup - lookup table for interrupt handler
 230 *
 231 * @src: Interrupt mask register.
 232 * @clr: Interrupt clear register.
 233 * @is_error: true if this is an error interrupt.
 234 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
 235 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
 236 */
 237struct d40_interrupt_lookup {
 238	u32 src;
 239	u32 clr;
 240	bool is_error;
 241	int offset;
 242};
 243
 244
 245static struct d40_interrupt_lookup il_v4a[] = {
 246	{D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
 247	{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
 248	{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
 249	{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
 250	{D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
 251	{D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
 252	{D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
 253	{D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
 254	{D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
 255	{D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
 256};
 257
 258static struct d40_interrupt_lookup il_v4b[] = {
 259	{D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false,  0},
 260	{D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
 261	{D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
 262	{D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
 263	{D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
 264	{D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true,   0},
 265	{D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true,  32},
 266	{D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true,  64},
 267	{D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true,  96},
 268	{D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true,  128},
 269	{D40_DREG_CPCTIS,  D40_DREG_CPCICR,  false, D40_PHY_CHAN},
 270	{D40_DREG_CPCEIS,  D40_DREG_CPCICR,  true,  D40_PHY_CHAN},
 271};
 272
 273/**
 274 * struct d40_reg_val - simple lookup struct
 275 *
 276 * @reg: The register.
 277 * @val: The value that belongs to the register in reg.
 278 */
 279struct d40_reg_val {
 280	unsigned int reg;
 281	unsigned int val;
 282};
 283
 284static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
 285	/* Clock every part of the DMA block from start */
 286	{ .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
 287
 288	/* Interrupts on all logical channels */
 289	{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
 290	{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
 291	{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
 292	{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
 293	{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
 294	{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
 295	{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
 296	{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
 297	{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
 298	{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
 299	{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
 300	{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
 301};
 302static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
 303	/* Clock every part of the DMA block from start */
 304	{ .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
 305
 306	/* Interrupts on all logical channels */
 307	{ .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
 308	{ .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
 309	{ .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
 310	{ .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
 311	{ .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
 312	{ .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
 313	{ .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
 314	{ .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
 315	{ .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
 316	{ .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
 317	{ .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
 318	{ .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
 319	{ .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
 320	{ .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
 321	{ .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
 322};
 323
 324/**
 325 * struct d40_lli_pool - Structure for keeping LLIs in memory
 326 *
 327 * @base: Pointer to memory area when the pre_alloc_lli's are not large
 328 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
 329 * pre_alloc_lli is used.
 330 * @dma_addr: DMA address, if mapped
 331 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
 332 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
 333 * one buffer to one buffer.
 334 */
 335struct d40_lli_pool {
 336	void	*base;
 337	int	 size;
 338	dma_addr_t	dma_addr;
 339	/* Space for dst and src, plus an extra for padding */
 340	u8	 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
 341};
 342
 343/**
 344 * struct d40_desc - A descriptor is one DMA job.
 345 *
 346 * @lli_phy: LLI settings for physical channel. Both src and dst=
 347 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
 348 * lli_len equals one.
 349 * @lli_log: Same as above but for logical channels.
 350 * @lli_pool: The pool with two entries pre-allocated.
 351 * @lli_len: Number of llis of current descriptor.
 352 * @lli_current: Number of transferred llis.
 353 * @lcla_alloc: Number of LCLA entries allocated.
 354 * @txd: DMA engine struct. Used for among other things for communication
 355 * during a transfer.
 356 * @node: List entry.
 357 * @is_in_client_list: true if the client owns this descriptor.
 358 * @cyclic: true if this is a cyclic job
 359 *
 360 * This descriptor is used for both logical and physical transfers.
 361 */
 362struct d40_desc {
 363	/* LLI physical */
 364	struct d40_phy_lli_bidir	 lli_phy;
 365	/* LLI logical */
 366	struct d40_log_lli_bidir	 lli_log;
 367
 368	struct d40_lli_pool		 lli_pool;
 369	int				 lli_len;
 370	int				 lli_current;
 371	int				 lcla_alloc;
 372
 373	struct dma_async_tx_descriptor	 txd;
 374	struct list_head		 node;
 375
 376	bool				 is_in_client_list;
 377	bool				 cyclic;
 378};
 379
 380/**
 381 * struct d40_lcla_pool - LCLA pool settings and data.
 382 *
 383 * @base: The virtual address of LCLA. 18 bit aligned.
 384 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
 385 * This pointer is only there for clean-up on error.
 386 * @pages: The number of pages needed for all physical channels.
 387 * Only used later for clean-up on error
 388 * @lock: Lock to protect the content in this struct.
 389 * @alloc_map: big map over which LCLA entry is own by which job.
 390 */
 391struct d40_lcla_pool {
 392	void		*base;
 393	dma_addr_t	dma_addr;
 394	void		*base_unaligned;
 395	int		 pages;
 396	spinlock_t	 lock;
 397	struct d40_desc	**alloc_map;
 398};
 399
 400/**
 401 * struct d40_phy_res - struct for handling eventlines mapped to physical
 402 * channels.
 403 *
 404 * @lock: A lock protection this entity.
 405 * @reserved: True if used by secure world or otherwise.
 406 * @num: The physical channel number of this entity.
 407 * @allocated_src: Bit mapped to show which src event line's are mapped to
 408 * this physical channel. Can also be free or physically allocated.
 409 * @allocated_dst: Same as for src but is dst.
 410 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
 411 * event line number.
 412 * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
 413 */
 414struct d40_phy_res {
 415	spinlock_t lock;
 416	bool	   reserved;
 417	int	   num;
 418	u32	   allocated_src;
 419	u32	   allocated_dst;
 420	bool	   use_soft_lli;
 421};
 422
 423struct d40_base;
 424
 425/**
 426 * struct d40_chan - Struct that describes a channel.
 427 *
 428 * @lock: A spinlock to protect this struct.
 429 * @log_num: The logical number, if any of this channel.
 
 
 430 * @pending_tx: The number of pending transfers. Used between interrupt handler
 431 * and tasklet.
 432 * @busy: Set to true when transfer is ongoing on this channel.
 433 * @phy_chan: Pointer to physical channel which this instance runs on. If this
 434 * point is NULL, then the channel is not allocated.
 435 * @chan: DMA engine handle.
 436 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
 437 * transfer and call client callback.
 438 * @client: Cliented owned descriptor list.
 439 * @pending_queue: Submitted jobs, to be issued by issue_pending()
 440 * @active: Active descriptor.
 441 * @done: Completed jobs
 442 * @queue: Queued jobs.
 443 * @prepare_queue: Prepared jobs.
 444 * @dma_cfg: The client configuration of this dma channel.
 445 * @configured: whether the dma_cfg configuration is valid
 446 * @base: Pointer to the device instance struct.
 447 * @src_def_cfg: Default cfg register setting for src.
 448 * @dst_def_cfg: Default cfg register setting for dst.
 449 * @log_def: Default logical channel settings.
 
 450 * @lcpa: Pointer to dst and src lcpa settings.
 451 * @runtime_addr: runtime configured address.
 452 * @runtime_direction: runtime configured direction.
 453 *
 454 * This struct can either "be" a logical or a physical channel.
 455 */
 456struct d40_chan {
 457	spinlock_t			 lock;
 458	int				 log_num;
 
 
 459	int				 pending_tx;
 460	bool				 busy;
 461	struct d40_phy_res		*phy_chan;
 462	struct dma_chan			 chan;
 463	struct tasklet_struct		 tasklet;
 464	struct list_head		 client;
 465	struct list_head		 pending_queue;
 466	struct list_head		 active;
 467	struct list_head		 done;
 468	struct list_head		 queue;
 469	struct list_head		 prepare_queue;
 470	struct stedma40_chan_cfg	 dma_cfg;
 471	bool				 configured;
 472	struct d40_base			*base;
 473	/* Default register configurations */
 474	u32				 src_def_cfg;
 475	u32				 dst_def_cfg;
 476	struct d40_def_lcsp		 log_def;
 477	struct d40_log_lli_full		*lcpa;
 478	/* Runtime reconfiguration */
 479	dma_addr_t			runtime_addr;
 480	enum dma_transfer_direction	runtime_direction;
 481};
 482
 483/**
 484 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
 485 * controller
 486 *
 487 * @backup: the pointer to the registers address array for backup
 488 * @backup_size: the size of the registers address array for backup
 489 * @realtime_en: the realtime enable register
 490 * @realtime_clear: the realtime clear register
 491 * @high_prio_en: the high priority enable register
 492 * @high_prio_clear: the high priority clear register
 493 * @interrupt_en: the interrupt enable register
 494 * @interrupt_clear: the interrupt clear register
 495 * @il: the pointer to struct d40_interrupt_lookup
 496 * @il_size: the size of d40_interrupt_lookup array
 497 * @init_reg: the pointer to the struct d40_reg_val
 498 * @init_reg_size: the size of d40_reg_val array
 499 */
 500struct d40_gen_dmac {
 501	u32				*backup;
 502	u32				 backup_size;
 503	u32				 realtime_en;
 504	u32				 realtime_clear;
 505	u32				 high_prio_en;
 506	u32				 high_prio_clear;
 507	u32				 interrupt_en;
 508	u32				 interrupt_clear;
 509	struct d40_interrupt_lookup	*il;
 510	u32				 il_size;
 511	struct d40_reg_val		*init_reg;
 512	u32				 init_reg_size;
 513};
 514
 515/**
 516 * struct d40_base - The big global struct, one for each probe'd instance.
 517 *
 518 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
 519 * @execmd_lock: Lock for execute command usage since several channels share
 520 * the same physical register.
 521 * @dev: The device structure.
 522 * @virtbase: The virtual base address of the DMA's register.
 523 * @rev: silicon revision detected.
 524 * @clk: Pointer to the DMA clock structure.
 525 * @phy_start: Physical memory start of the DMA registers.
 526 * @phy_size: Size of the DMA register map.
 527 * @irq: The IRQ number.
 528 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
 529 * transfers).
 530 * @num_phy_chans: The number of physical channels. Read from HW. This
 531 * is the number of available channels for this driver, not counting "Secure
 532 * mode" allocated physical channels.
 533 * @num_log_chans: The number of logical channels. Calculated from
 534 * num_phy_chans.
 535 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
 536 * @dma_slave: dma_device channels that can do only do slave transfers.
 537 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
 538 * @phy_chans: Room for all possible physical channels in system.
 539 * @log_chans: Room for all possible logical channels in system.
 540 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
 541 * to log_chans entries.
 542 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
 543 * to phy_chans entries.
 544 * @plat_data: Pointer to provided platform_data which is the driver
 545 * configuration.
 546 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
 547 * @phy_res: Vector containing all physical channels.
 548 * @lcla_pool: lcla pool settings and data.
 549 * @lcpa_base: The virtual mapped address of LCPA.
 550 * @phy_lcpa: The physical address of the LCPA.
 551 * @lcpa_size: The size of the LCPA area.
 552 * @desc_slab: cache for descriptors.
 553 * @reg_val_backup: Here the values of some hardware registers are stored
 554 * before the DMA is powered off. They are restored when the power is back on.
 555 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
 556 * later
 557 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
 558 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
 559 * @gen_dmac: the struct for generic registers values to represent u8500/8540
 560 * DMA controller
 561 */
 562struct d40_base {
 563	spinlock_t			 interrupt_lock;
 564	spinlock_t			 execmd_lock;
 565	struct device			 *dev;
 566	void __iomem			 *virtbase;
 567	u8				  rev:4;
 568	struct clk			 *clk;
 569	phys_addr_t			  phy_start;
 570	resource_size_t			  phy_size;
 571	int				  irq;
 572	int				  num_memcpy_chans;
 573	int				  num_phy_chans;
 574	int				  num_log_chans;
 575	struct device_dma_parameters	  dma_parms;
 576	struct dma_device		  dma_both;
 577	struct dma_device		  dma_slave;
 578	struct dma_device		  dma_memcpy;
 579	struct d40_chan			 *phy_chans;
 580	struct d40_chan			 *log_chans;
 581	struct d40_chan			**lookup_log_chans;
 582	struct d40_chan			**lookup_phy_chans;
 583	struct stedma40_platform_data	 *plat_data;
 584	struct regulator		 *lcpa_regulator;
 585	/* Physical half channels */
 586	struct d40_phy_res		 *phy_res;
 587	struct d40_lcla_pool		  lcla_pool;
 588	void				 *lcpa_base;
 589	dma_addr_t			  phy_lcpa;
 590	resource_size_t			  lcpa_size;
 591	struct kmem_cache		 *desc_slab;
 592	u32				  reg_val_backup[BACKUP_REGS_SZ];
 593	u32				  reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
 594	u32				 *reg_val_backup_chan;
 595	u16				  gcc_pwr_off_mask;
 596	struct d40_gen_dmac		  gen_dmac;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 597};
 598
 599static struct device *chan2dev(struct d40_chan *d40c)
 600{
 601	return &d40c->chan.dev->device;
 602}
 603
 604static bool chan_is_physical(struct d40_chan *chan)
 605{
 606	return chan->log_num == D40_PHY_CHAN;
 607}
 608
 609static bool chan_is_logical(struct d40_chan *chan)
 610{
 611	return !chan_is_physical(chan);
 612}
 613
 614static void __iomem *chan_base(struct d40_chan *chan)
 615{
 616	return chan->base->virtbase + D40_DREG_PCBASE +
 617	       chan->phy_chan->num * D40_DREG_PCDELTA;
 618}
 619
 620#define d40_err(dev, format, arg...)		\
 621	dev_err(dev, "[%s] " format, __func__, ## arg)
 622
 623#define chan_err(d40c, format, arg...)		\
 624	d40_err(chan2dev(d40c), format, ## arg)
 625
 626static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
 627			      int lli_len)
 628{
 629	bool is_log = chan_is_logical(d40c);
 630	u32 align;
 631	void *base;
 632
 633	if (is_log)
 634		align = sizeof(struct d40_log_lli);
 635	else
 636		align = sizeof(struct d40_phy_lli);
 637
 638	if (lli_len == 1) {
 639		base = d40d->lli_pool.pre_alloc_lli;
 640		d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
 641		d40d->lli_pool.base = NULL;
 642	} else {
 643		d40d->lli_pool.size = lli_len * 2 * align;
 644
 645		base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
 646		d40d->lli_pool.base = base;
 647
 648		if (d40d->lli_pool.base == NULL)
 649			return -ENOMEM;
 650	}
 651
 652	if (is_log) {
 653		d40d->lli_log.src = PTR_ALIGN(base, align);
 654		d40d->lli_log.dst = d40d->lli_log.src + lli_len;
 655
 656		d40d->lli_pool.dma_addr = 0;
 657	} else {
 658		d40d->lli_phy.src = PTR_ALIGN(base, align);
 659		d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
 660
 661		d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
 662							 d40d->lli_phy.src,
 663							 d40d->lli_pool.size,
 664							 DMA_TO_DEVICE);
 665
 666		if (dma_mapping_error(d40c->base->dev,
 667				      d40d->lli_pool.dma_addr)) {
 668			kfree(d40d->lli_pool.base);
 669			d40d->lli_pool.base = NULL;
 670			d40d->lli_pool.dma_addr = 0;
 671			return -ENOMEM;
 672		}
 673	}
 674
 675	return 0;
 676}
 677
 678static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
 679{
 680	if (d40d->lli_pool.dma_addr)
 681		dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
 682				 d40d->lli_pool.size, DMA_TO_DEVICE);
 683
 684	kfree(d40d->lli_pool.base);
 685	d40d->lli_pool.base = NULL;
 686	d40d->lli_pool.size = 0;
 687	d40d->lli_log.src = NULL;
 688	d40d->lli_log.dst = NULL;
 689	d40d->lli_phy.src = NULL;
 690	d40d->lli_phy.dst = NULL;
 691}
 692
 693static int d40_lcla_alloc_one(struct d40_chan *d40c,
 694			      struct d40_desc *d40d)
 695{
 696	unsigned long flags;
 697	int i;
 698	int ret = -EINVAL;
 
 699
 700	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
 701
 
 
 702	/*
 703	 * Allocate both src and dst at the same time, therefore the half
 704	 * start on 1 since 0 can't be used since zero is used as end marker.
 705	 */
 706	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
 707		int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
 708
 709		if (!d40c->base->lcla_pool.alloc_map[idx]) {
 710			d40c->base->lcla_pool.alloc_map[idx] = d40d;
 711			d40d->lcla_alloc++;
 712			ret = i;
 713			break;
 714		}
 715	}
 716
 717	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
 718
 719	return ret;
 720}
 721
 722static int d40_lcla_free_all(struct d40_chan *d40c,
 723			     struct d40_desc *d40d)
 724{
 725	unsigned long flags;
 726	int i;
 727	int ret = -EINVAL;
 728
 729	if (chan_is_physical(d40c))
 730		return 0;
 731
 732	spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
 733
 734	for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
 735		int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
 736
 737		if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
 738			d40c->base->lcla_pool.alloc_map[idx] = NULL;
 739			d40d->lcla_alloc--;
 740			if (d40d->lcla_alloc == 0) {
 741				ret = 0;
 742				break;
 743			}
 744		}
 745	}
 746
 747	spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
 748
 749	return ret;
 750
 751}
 752
 753static void d40_desc_remove(struct d40_desc *d40d)
 754{
 755	list_del(&d40d->node);
 756}
 757
 758static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
 759{
 760	struct d40_desc *desc = NULL;
 761
 762	if (!list_empty(&d40c->client)) {
 763		struct d40_desc *d;
 764		struct d40_desc *_d;
 765
 766		list_for_each_entry_safe(d, _d, &d40c->client, node) {
 767			if (async_tx_test_ack(&d->txd)) {
 768				d40_desc_remove(d);
 769				desc = d;
 770				memset(desc, 0, sizeof(*desc));
 771				break;
 772			}
 773		}
 774	}
 775
 776	if (!desc)
 777		desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
 778
 779	if (desc)
 780		INIT_LIST_HEAD(&desc->node);
 781
 782	return desc;
 783}
 784
 785static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
 786{
 787
 788	d40_pool_lli_free(d40c, d40d);
 789	d40_lcla_free_all(d40c, d40d);
 790	kmem_cache_free(d40c->base->desc_slab, d40d);
 791}
 792
 793static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
 794{
 795	list_add_tail(&desc->node, &d40c->active);
 796}
 797
 798static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
 799{
 800	struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
 801	struct d40_phy_lli *lli_src = desc->lli_phy.src;
 802	void __iomem *base = chan_base(chan);
 803
 804	writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
 805	writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
 806	writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
 807	writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
 808
 809	writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
 810	writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
 811	writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
 812	writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
 813}
 814
 815static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
 816{
 817	list_add_tail(&desc->node, &d40c->done);
 818}
 819
 820static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
 821{
 822	struct d40_lcla_pool *pool = &chan->base->lcla_pool;
 823	struct d40_log_lli_bidir *lli = &desc->lli_log;
 824	int lli_current = desc->lli_current;
 825	int lli_len = desc->lli_len;
 826	bool cyclic = desc->cyclic;
 827	int curr_lcla = -EINVAL;
 828	int first_lcla = 0;
 829	bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
 830	bool linkback;
 831
 832	/*
 833	 * We may have partially running cyclic transfers, in case we did't get
 834	 * enough LCLA entries.
 835	 */
 836	linkback = cyclic && lli_current == 0;
 837
 838	/*
 839	 * For linkback, we need one LCLA even with only one link, because we
 840	 * can't link back to the one in LCPA space
 841	 */
 842	if (linkback || (lli_len - lli_current > 1)) {
 843		/*
 844		 * If the channel is expected to use only soft_lli don't
 845		 * allocate a lcla. This is to avoid a HW issue that exists
 846		 * in some controller during a peripheral to memory transfer
 847		 * that uses linked lists.
 848		 */
 849		if (!(chan->phy_chan->use_soft_lli &&
 850			chan->dma_cfg.dir == DMA_DEV_TO_MEM))
 851			curr_lcla = d40_lcla_alloc_one(chan, desc);
 852
 853		first_lcla = curr_lcla;
 854	}
 855
 856	/*
 857	 * For linkback, we normally load the LCPA in the loop since we need to
 858	 * link it to the second LCLA and not the first.  However, if we
 859	 * couldn't even get a first LCLA, then we have to run in LCPA and
 860	 * reload manually.
 861	 */
 862	if (!linkback || curr_lcla == -EINVAL) {
 863		unsigned int flags = 0;
 864
 865		if (curr_lcla == -EINVAL)
 866			flags |= LLI_TERM_INT;
 867
 868		d40_log_lli_lcpa_write(chan->lcpa,
 869				       &lli->dst[lli_current],
 870				       &lli->src[lli_current],
 871				       curr_lcla,
 872				       flags);
 873		lli_current++;
 874	}
 875
 876	if (curr_lcla < 0)
 877		goto set_current;
 878
 879	for (; lli_current < lli_len; lli_current++) {
 880		unsigned int lcla_offset = chan->phy_chan->num * 1024 +
 881					   8 * curr_lcla * 2;
 882		struct d40_log_lli *lcla = pool->base + lcla_offset;
 883		unsigned int flags = 0;
 884		int next_lcla;
 885
 886		if (lli_current + 1 < lli_len)
 887			next_lcla = d40_lcla_alloc_one(chan, desc);
 888		else
 889			next_lcla = linkback ? first_lcla : -EINVAL;
 890
 891		if (cyclic || next_lcla == -EINVAL)
 892			flags |= LLI_TERM_INT;
 893
 894		if (linkback && curr_lcla == first_lcla) {
 895			/* First link goes in both LCPA and LCLA */
 896			d40_log_lli_lcpa_write(chan->lcpa,
 897					       &lli->dst[lli_current],
 898					       &lli->src[lli_current],
 899					       next_lcla, flags);
 900		}
 901
 902		/*
 903		 * One unused LCLA in the cyclic case if the very first
 904		 * next_lcla fails...
 905		 */
 906		d40_log_lli_lcla_write(lcla,
 907				       &lli->dst[lli_current],
 908				       &lli->src[lli_current],
 909				       next_lcla, flags);
 910
 911		/*
 912		 * Cache maintenance is not needed if lcla is
 913		 * mapped in esram
 914		 */
 915		if (!use_esram_lcla) {
 916			dma_sync_single_range_for_device(chan->base->dev,
 917						pool->dma_addr, lcla_offset,
 918						2 * sizeof(struct d40_log_lli),
 919						DMA_TO_DEVICE);
 920		}
 921		curr_lcla = next_lcla;
 922
 923		if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
 924			lli_current++;
 925			break;
 926		}
 927	}
 928 set_current:
 
 929	desc->lli_current = lli_current;
 930}
 931
 932static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
 933{
 934	if (chan_is_physical(d40c)) {
 935		d40_phy_lli_load(d40c, d40d);
 936		d40d->lli_current = d40d->lli_len;
 937	} else
 938		d40_log_lli_to_lcxa(d40c, d40d);
 939}
 940
 941static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
 942{
 943	return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
 
 
 
 
 
 
 
 
 944}
 945
 946/* remove desc from current queue and add it to the pending_queue */
 947static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
 948{
 949	d40_desc_remove(desc);
 950	desc->is_in_client_list = false;
 951	list_add_tail(&desc->node, &d40c->pending_queue);
 952}
 953
 954static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
 955{
 956	return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
 957					node);
 
 
 
 
 
 
 
 958}
 959
 960static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
 961{
 962	return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
 963}
 
 
 964
 965static struct d40_desc *d40_first_done(struct d40_chan *d40c)
 966{
 967	return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
 
 968}
 969
 970static int d40_psize_2_burst_size(bool is_log, int psize)
 971{
 972	if (is_log) {
 973		if (psize == STEDMA40_PSIZE_LOG_1)
 974			return 1;
 975	} else {
 976		if (psize == STEDMA40_PSIZE_PHY_1)
 977			return 1;
 978	}
 979
 980	return 2 << psize;
 981}
 982
 983/*
 984 * The dma only supports transmitting packages up to
 985 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
 986 *
 987 * Calculate the total number of dma elements required to send the entire sg list.
 988 */
 989static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
 990{
 991	int dmalen;
 992	u32 max_w = max(data_width1, data_width2);
 993	u32 min_w = min(data_width1, data_width2);
 994	u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
 995
 996	if (seg_max > STEDMA40_MAX_SEG_SIZE)
 997		seg_max -= max_w;
 998
 999	if (!IS_ALIGNED(size, max_w))
1000		return -EINVAL;
1001
1002	if (size <= seg_max)
1003		dmalen = 1;
1004	else {
1005		dmalen = size / seg_max;
1006		if (dmalen * seg_max < size)
1007			dmalen++;
1008	}
1009	return dmalen;
1010}
1011
1012static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1013			   u32 data_width1, u32 data_width2)
1014{
1015	struct scatterlist *sg;
1016	int i;
1017	int len = 0;
1018	int ret;
1019
1020	for_each_sg(sgl, sg, sg_len, i) {
1021		ret = d40_size_2_dmalen(sg_dma_len(sg),
1022					data_width1, data_width2);
1023		if (ret < 0)
1024			return ret;
1025		len += ret;
1026	}
1027	return len;
1028}
1029
1030static int __d40_execute_command_phy(struct d40_chan *d40c,
1031				     enum d40_command command)
 
 
1032{
1033	u32 status;
1034	int i;
1035	void __iomem *active_reg;
1036	int ret = 0;
1037	unsigned long flags;
1038	u32 wmask;
1039
1040	if (command == D40_DMA_STOP) {
1041		ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1042		if (ret)
1043			return ret;
1044	}
1045
1046	spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1047
1048	if (d40c->phy_chan->num % 2 == 0)
1049		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1050	else
1051		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1052
1053	if (command == D40_DMA_SUSPEND_REQ) {
1054		status = (readl(active_reg) &
1055			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1056			D40_CHAN_POS(d40c->phy_chan->num);
1057
1058		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1059			goto unlock;
1060	}
1061
1062	wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1063	writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1064	       active_reg);
1065
1066	if (command == D40_DMA_SUSPEND_REQ) {
1067
1068		for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1069			status = (readl(active_reg) &
1070				  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1071				D40_CHAN_POS(d40c->phy_chan->num);
1072
1073			cpu_relax();
1074			/*
1075			 * Reduce the number of bus accesses while
1076			 * waiting for the DMA to suspend.
1077			 */
1078			udelay(3);
1079
1080			if (status == D40_DMA_STOP ||
1081			    status == D40_DMA_SUSPENDED)
1082				break;
1083		}
1084
1085		if (i == D40_SUSPEND_MAX_IT) {
1086			chan_err(d40c,
1087				"unable to suspend the chl %d (log: %d) status %x\n",
1088				d40c->phy_chan->num, d40c->log_num,
1089				status);
1090			dump_stack();
1091			ret = -EBUSY;
1092		}
1093
1094	}
1095 unlock:
1096	spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1097	return ret;
1098}
1099
1100static void d40_term_all(struct d40_chan *d40c)
1101{
1102	struct d40_desc *d40d;
1103	struct d40_desc *_d;
1104
1105	/* Release completed descriptors */
1106	while ((d40d = d40_first_done(d40c))) {
1107		d40_desc_remove(d40d);
1108		d40_desc_free(d40c, d40d);
1109	}
1110
1111	/* Release active descriptors */
1112	while ((d40d = d40_first_active_get(d40c))) {
1113		d40_desc_remove(d40d);
1114		d40_desc_free(d40c, d40d);
1115	}
1116
1117	/* Release queued descriptors waiting for transfer */
1118	while ((d40d = d40_first_queued(d40c))) {
1119		d40_desc_remove(d40d);
1120		d40_desc_free(d40c, d40d);
1121	}
1122
1123	/* Release pending descriptors */
1124	while ((d40d = d40_first_pending(d40c))) {
1125		d40_desc_remove(d40d);
1126		d40_desc_free(d40c, d40d);
1127	}
1128
1129	/* Release client owned descriptors */
1130	if (!list_empty(&d40c->client))
1131		list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1132			d40_desc_remove(d40d);
1133			d40_desc_free(d40c, d40d);
1134		}
1135
1136	/* Release descriptors in prepare queue */
1137	if (!list_empty(&d40c->prepare_queue))
1138		list_for_each_entry_safe(d40d, _d,
1139					 &d40c->prepare_queue, node) {
1140			d40_desc_remove(d40d);
1141			d40_desc_free(d40c, d40d);
1142		}
1143
1144	d40c->pending_tx = 0;
 
1145}
1146
1147static void __d40_config_set_event(struct d40_chan *d40c,
1148				   enum d40_events event_type, u32 event,
1149				   int reg)
1150{
1151	void __iomem *addr = chan_base(d40c) + reg;
1152	int tries;
1153	u32 status;
1154
1155	switch (event_type) {
1156
1157	case D40_DEACTIVATE_EVENTLINE:
1158
 
1159		writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1160		       | ~D40_EVENTLINE_MASK(event), addr);
1161		break;
 
1162
1163	case D40_SUSPEND_REQ_EVENTLINE:
1164		status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1165			  D40_EVENTLINE_POS(event);
1166
1167		if (status == D40_DEACTIVATE_EVENTLINE ||
1168		    status == D40_SUSPEND_REQ_EVENTLINE)
1169			break;
1170
1171		writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1172		       | ~D40_EVENTLINE_MASK(event), addr);
1173
1174		for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1175
1176			status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1177				  D40_EVENTLINE_POS(event);
1178
1179			cpu_relax();
1180			/*
1181			 * Reduce the number of bus accesses while
1182			 * waiting for the DMA to suspend.
1183			 */
1184			udelay(3);
1185
1186			if (status == D40_DEACTIVATE_EVENTLINE)
1187				break;
1188		}
1189
1190		if (tries == D40_SUSPEND_MAX_IT) {
1191			chan_err(d40c,
1192				"unable to stop the event_line chl %d (log: %d)"
1193				"status %x\n", d40c->phy_chan->num,
1194				 d40c->log_num, status);
1195		}
1196		break;
1197
1198	case D40_ACTIVATE_EVENTLINE:
1199	/*
1200	 * The hardware sometimes doesn't register the enable when src and dst
1201	 * event lines are active on the same logical channel.  Retry to ensure
1202	 * it does.  Usually only one retry is sufficient.
1203	 */
1204		tries = 100;
1205		while (--tries) {
1206			writel((D40_ACTIVATE_EVENTLINE <<
1207				D40_EVENTLINE_POS(event)) |
1208				~D40_EVENTLINE_MASK(event), addr);
1209
1210			if (readl(addr) & D40_EVENTLINE_MASK(event))
1211				break;
1212		}
1213
1214		if (tries != 99)
1215			dev_dbg(chan2dev(d40c),
1216				"[%s] workaround enable S%cLNK (%d tries)\n",
1217				__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1218				100 - tries);
1219
1220		WARN_ON(!tries);
1221		break;
1222
1223	case D40_ROUND_EVENTLINE:
1224		BUG();
1225		break;
1226
1227	}
1228}
1229
1230static void d40_config_set_event(struct d40_chan *d40c,
1231				 enum d40_events event_type)
1232{
1233	u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
 
 
1234
1235	/* Enable event line connected to device (or memcpy) */
1236	if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1237	    (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1238		__d40_config_set_event(d40c, event_type, event,
 
 
1239				       D40_CHAN_REG_SSLNK);
 
 
 
 
1240
1241	if (d40c->dma_cfg.dir !=  DMA_DEV_TO_MEM)
1242		__d40_config_set_event(d40c, event_type, event,
1243				       D40_CHAN_REG_SDLNK);
 
 
 
1244}
1245
1246static u32 d40_chan_has_events(struct d40_chan *d40c)
1247{
1248	void __iomem *chanbase = chan_base(d40c);
1249	u32 val;
1250
1251	val = readl(chanbase + D40_CHAN_REG_SSLNK);
1252	val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1253
1254	return val;
1255}
1256
1257static int
1258__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1259{
1260	unsigned long flags;
1261	int ret = 0;
1262	u32 active_status;
1263	void __iomem *active_reg;
1264
1265	if (d40c->phy_chan->num % 2 == 0)
1266		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1267	else
1268		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1269
1270
1271	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1272
1273	switch (command) {
1274	case D40_DMA_STOP:
1275	case D40_DMA_SUSPEND_REQ:
1276
1277		active_status = (readl(active_reg) &
1278				 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1279				 D40_CHAN_POS(d40c->phy_chan->num);
1280
1281		if (active_status == D40_DMA_RUN)
1282			d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1283		else
1284			d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1285
1286		if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1287			ret = __d40_execute_command_phy(d40c, command);
1288
1289		break;
1290
1291	case D40_DMA_RUN:
1292
1293		d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1294		ret = __d40_execute_command_phy(d40c, command);
1295		break;
1296
1297	case D40_DMA_SUSPENDED:
1298		BUG();
1299		break;
1300	}
1301
1302	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1303	return ret;
1304}
1305
1306static int d40_channel_execute_command(struct d40_chan *d40c,
1307				       enum d40_command command)
1308{
1309	if (chan_is_logical(d40c))
1310		return __d40_execute_command_log(d40c, command);
1311	else
1312		return __d40_execute_command_phy(d40c, command);
1313}
1314
1315static u32 d40_get_prmo(struct d40_chan *d40c)
1316{
1317	static const unsigned int phy_map[] = {
1318		[STEDMA40_PCHAN_BASIC_MODE]
1319			= D40_DREG_PRMO_PCHAN_BASIC,
1320		[STEDMA40_PCHAN_MODULO_MODE]
1321			= D40_DREG_PRMO_PCHAN_MODULO,
1322		[STEDMA40_PCHAN_DOUBLE_DST_MODE]
1323			= D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1324	};
1325	static const unsigned int log_map[] = {
1326		[STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1327			= D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1328		[STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1329			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1330		[STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1331			= D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1332	};
1333
1334	if (chan_is_physical(d40c))
1335		return phy_map[d40c->dma_cfg.mode_opt];
1336	else
1337		return log_map[d40c->dma_cfg.mode_opt];
1338}
1339
1340static void d40_config_write(struct d40_chan *d40c)
1341{
1342	u32 addr_base;
1343	u32 var;
1344
1345	/* Odd addresses are even addresses + 4 */
1346	addr_base = (d40c->phy_chan->num % 2) * 4;
1347	/* Setup channel mode to logical or physical */
1348	var = ((u32)(chan_is_logical(d40c)) + 1) <<
1349		D40_CHAN_POS(d40c->phy_chan->num);
1350	writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1351
1352	/* Setup operational mode option register */
1353	var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1354
1355	writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1356
1357	if (chan_is_logical(d40c)) {
1358		int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1359			   & D40_SREG_ELEM_LOG_LIDX_MASK;
1360		void __iomem *chanbase = chan_base(d40c);
1361
1362		/* Set default config for CFG reg */
1363		writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1364		writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1365
1366		/* Set LIDX for lcla */
1367		writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1368		writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1369
1370		/* Clear LNK which will be used by d40_chan_has_events() */
1371		writel(0, chanbase + D40_CHAN_REG_SSLNK);
1372		writel(0, chanbase + D40_CHAN_REG_SDLNK);
1373	}
1374}
1375
1376static u32 d40_residue(struct d40_chan *d40c)
1377{
1378	u32 num_elt;
1379
1380	if (chan_is_logical(d40c))
1381		num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1382			>> D40_MEM_LCSP2_ECNT_POS;
1383	else {
1384		u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1385		num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1386			  >> D40_SREG_ELEM_PHY_ECNT_POS;
1387	}
1388
1389	return num_elt * d40c->dma_cfg.dst_info.data_width;
1390}
1391
1392static bool d40_tx_is_linked(struct d40_chan *d40c)
1393{
1394	bool is_link;
1395
1396	if (chan_is_logical(d40c))
1397		is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
1398	else
1399		is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1400			  & D40_SREG_LNK_PHYS_LNK_MASK;
1401
1402	return is_link;
1403}
1404
1405static int d40_pause(struct dma_chan *chan)
1406{
1407	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1408	int res = 0;
1409	unsigned long flags;
1410
1411	if (d40c->phy_chan == NULL) {
1412		chan_err(d40c, "Channel is not allocated!\n");
1413		return -EINVAL;
1414	}
1415
1416	if (!d40c->busy)
1417		return 0;
1418
1419	spin_lock_irqsave(&d40c->lock, flags);
1420	pm_runtime_get_sync(d40c->base->dev);
1421
1422	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
 
 
 
 
 
 
 
 
 
1423
1424	pm_runtime_mark_last_busy(d40c->base->dev);
1425	pm_runtime_put_autosuspend(d40c->base->dev);
1426	spin_unlock_irqrestore(&d40c->lock, flags);
1427	return res;
1428}
1429
1430static int d40_resume(struct dma_chan *chan)
1431{
1432	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1433	int res = 0;
1434	unsigned long flags;
1435
1436	if (d40c->phy_chan == NULL) {
1437		chan_err(d40c, "Channel is not allocated!\n");
1438		return -EINVAL;
1439	}
1440
1441	if (!d40c->busy)
1442		return 0;
1443
1444	spin_lock_irqsave(&d40c->lock, flags);
1445	pm_runtime_get_sync(d40c->base->dev);
 
 
 
 
 
 
1446
1447	/* If bytes left to transfer or linked tx resume job */
1448	if (d40_residue(d40c) || d40_tx_is_linked(d40c))
 
 
 
 
1449		res = d40_channel_execute_command(d40c, D40_DMA_RUN);
 
1450
1451	pm_runtime_mark_last_busy(d40c->base->dev);
1452	pm_runtime_put_autosuspend(d40c->base->dev);
1453	spin_unlock_irqrestore(&d40c->lock, flags);
1454	return res;
1455}
1456
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1457static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1458{
1459	struct d40_chan *d40c = container_of(tx->chan,
1460					     struct d40_chan,
1461					     chan);
1462	struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1463	unsigned long flags;
1464	dma_cookie_t cookie;
1465
1466	spin_lock_irqsave(&d40c->lock, flags);
1467	cookie = dma_cookie_assign(tx);
 
 
 
 
 
 
 
1468	d40_desc_queue(d40c, d40d);
 
1469	spin_unlock_irqrestore(&d40c->lock, flags);
1470
1471	return cookie;
1472}
1473
1474static int d40_start(struct d40_chan *d40c)
1475{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1476	return d40_channel_execute_command(d40c, D40_DMA_RUN);
1477}
1478
1479static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1480{
1481	struct d40_desc *d40d;
1482	int err;
1483
1484	/* Start queued jobs, if any */
1485	d40d = d40_first_queued(d40c);
1486
1487	if (d40d != NULL) {
1488		if (!d40c->busy) {
1489			d40c->busy = true;
1490			pm_runtime_get_sync(d40c->base->dev);
1491		}
1492
1493		/* Remove from queue */
1494		d40_desc_remove(d40d);
1495
1496		/* Add to active queue */
1497		d40_desc_submit(d40c, d40d);
1498
1499		/* Initiate DMA job */
1500		d40_desc_load(d40c, d40d);
1501
1502		/* Start dma job */
1503		err = d40_start(d40c);
1504
1505		if (err)
1506			return NULL;
1507	}
1508
1509	return d40d;
1510}
1511
1512/* called from interrupt context */
1513static void dma_tc_handle(struct d40_chan *d40c)
1514{
1515	struct d40_desc *d40d;
1516
1517	/* Get first active entry from list */
1518	d40d = d40_first_active_get(d40c);
1519
1520	if (d40d == NULL)
1521		return;
1522
1523	if (d40d->cyclic) {
1524		/*
1525		 * If this was a paritially loaded list, we need to reloaded
1526		 * it, and only when the list is completed.  We need to check
1527		 * for done because the interrupt will hit for every link, and
1528		 * not just the last one.
1529		 */
1530		if (d40d->lli_current < d40d->lli_len
1531		    && !d40_tx_is_linked(d40c)
1532		    && !d40_residue(d40c)) {
1533			d40_lcla_free_all(d40c, d40d);
1534			d40_desc_load(d40c, d40d);
1535			(void) d40_start(d40c);
1536
1537			if (d40d->lli_current == d40d->lli_len)
1538				d40d->lli_current = 0;
1539		}
1540	} else {
1541		d40_lcla_free_all(d40c, d40d);
1542
1543		if (d40d->lli_current < d40d->lli_len) {
1544			d40_desc_load(d40c, d40d);
1545			/* Start dma job */
1546			(void) d40_start(d40c);
1547			return;
1548		}
1549
1550		if (d40_queue_start(d40c) == NULL) {
1551			d40c->busy = false;
1552
1553			pm_runtime_mark_last_busy(d40c->base->dev);
1554			pm_runtime_put_autosuspend(d40c->base->dev);
1555		}
1556
1557		d40_desc_remove(d40d);
1558		d40_desc_done(d40c, d40d);
1559	}
1560
1561	d40c->pending_tx++;
1562	tasklet_schedule(&d40c->tasklet);
1563
1564}
1565
1566static void dma_tasklet(unsigned long data)
1567{
1568	struct d40_chan *d40c = (struct d40_chan *) data;
1569	struct d40_desc *d40d;
1570	unsigned long flags;
1571	bool callback_active;
1572	struct dmaengine_desc_callback cb;
1573
1574	spin_lock_irqsave(&d40c->lock, flags);
1575
1576	/* Get first entry from the done list */
1577	d40d = d40_first_done(d40c);
1578	if (d40d == NULL) {
1579		/* Check if we have reached here for cyclic job */
1580		d40d = d40_first_active_get(d40c);
1581		if (d40d == NULL || !d40d->cyclic)
1582			goto check_pending_tx;
1583	}
1584
1585	if (!d40d->cyclic)
1586		dma_cookie_complete(&d40d->txd);
1587
1588	/*
1589	 * If terminating a channel pending_tx is set to zero.
1590	 * This prevents any finished active jobs to return to the client.
1591	 */
1592	if (d40c->pending_tx == 0) {
1593		spin_unlock_irqrestore(&d40c->lock, flags);
1594		return;
1595	}
1596
1597	/* Callback to client */
1598	callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1599	dmaengine_desc_get_callback(&d40d->txd, &cb);
1600
1601	if (!d40d->cyclic) {
1602		if (async_tx_test_ack(&d40d->txd)) {
1603			d40_desc_remove(d40d);
1604			d40_desc_free(d40c, d40d);
1605		} else if (!d40d->is_in_client_list) {
1606			d40_desc_remove(d40d);
1607			d40_lcla_free_all(d40c, d40d);
1608			list_add_tail(&d40d->node, &d40c->client);
1609			d40d->is_in_client_list = true;
 
 
1610		}
1611	}
1612
1613	d40c->pending_tx--;
1614
1615	if (d40c->pending_tx)
1616		tasklet_schedule(&d40c->tasklet);
1617
1618	spin_unlock_irqrestore(&d40c->lock, flags);
1619
1620	if (callback_active)
1621		dmaengine_desc_callback_invoke(&cb, NULL);
1622
1623	return;
1624 check_pending_tx:
1625	/* Rescue manouver if receiving double interrupts */
 
1626	if (d40c->pending_tx > 0)
1627		d40c->pending_tx--;
1628	spin_unlock_irqrestore(&d40c->lock, flags);
1629}
1630
1631static irqreturn_t d40_handle_interrupt(int irq, void *data)
1632{
 
 
 
 
 
 
 
 
 
 
 
 
 
1633	int i;
 
1634	u32 idx;
1635	u32 row;
1636	long chan = -1;
1637	struct d40_chan *d40c;
1638	unsigned long flags;
1639	struct d40_base *base = data;
1640	u32 regs[base->gen_dmac.il_size];
1641	struct d40_interrupt_lookup *il = base->gen_dmac.il;
1642	u32 il_size = base->gen_dmac.il_size;
1643
1644	spin_lock_irqsave(&base->interrupt_lock, flags);
1645
1646	/* Read interrupt status of both logical and physical channels */
1647	for (i = 0; i < il_size; i++)
1648		regs[i] = readl(base->virtbase + il[i].src);
1649
1650	for (;;) {
1651
1652		chan = find_next_bit((unsigned long *)regs,
1653				     BITS_PER_LONG * il_size, chan + 1);
1654
1655		/* No more set bits found? */
1656		if (chan == BITS_PER_LONG * il_size)
1657			break;
1658
1659		row = chan / BITS_PER_LONG;
1660		idx = chan & (BITS_PER_LONG - 1);
1661
 
 
 
1662		if (il[row].offset == D40_PHY_CHAN)
1663			d40c = base->lookup_phy_chans[idx];
1664		else
1665			d40c = base->lookup_log_chans[il[row].offset + idx];
1666
1667		if (!d40c) {
1668			/*
1669			 * No error because this can happen if something else
1670			 * in the system is using the channel.
1671			 */
1672			continue;
1673		}
1674
1675		/* ACK interrupt */
1676		writel(BIT(idx), base->virtbase + il[row].clr);
1677
1678		spin_lock(&d40c->lock);
1679
1680		if (!il[row].is_error)
1681			dma_tc_handle(d40c);
1682		else
1683			d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1684				chan, il[row].offset, idx);
1685
1686		spin_unlock(&d40c->lock);
1687	}
1688
1689	spin_unlock_irqrestore(&base->interrupt_lock, flags);
1690
1691	return IRQ_HANDLED;
1692}
1693
1694static int d40_validate_conf(struct d40_chan *d40c,
1695			     struct stedma40_chan_cfg *conf)
1696{
1697	int res = 0;
 
 
1698	bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1699
1700	if (!conf->dir) {
1701		chan_err(d40c, "Invalid direction.\n");
1702		res = -EINVAL;
1703	}
1704
1705	if ((is_log && conf->dev_type > d40c->base->num_log_chans)  ||
1706	    (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1707	    (conf->dev_type < 0)) {
1708		chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
 
 
 
 
 
 
 
 
 
 
1709		res = -EINVAL;
1710	}
1711
1712	if (conf->dir == DMA_DEV_TO_DEV) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1713		/*
1714		 * DMAC HW supports it. Will be added to this driver,
1715		 * in case any dma client requires it.
1716		 */
1717		chan_err(d40c, "periph to periph not supported\n");
1718		res = -EINVAL;
1719	}
1720
1721	if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1722	    conf->src_info.data_width !=
1723	    d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1724	    conf->dst_info.data_width) {
1725		/*
1726		 * The DMAC hardware only supports
1727		 * src (burst x width) == dst (burst x width)
1728		 */
1729
1730		chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1731		res = -EINVAL;
1732	}
1733
1734	return res;
1735}
1736
1737static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1738			       bool is_src, int log_event_line, bool is_log,
1739			       bool *first_user)
1740{
1741	unsigned long flags;
1742	spin_lock_irqsave(&phy->lock, flags);
1743
1744	*first_user = ((phy->allocated_src | phy->allocated_dst)
1745			== D40_ALLOC_FREE);
1746
1747	if (!is_log) {
1748		/* Physical interrupts are masked per physical full channel */
1749		if (phy->allocated_src == D40_ALLOC_FREE &&
1750		    phy->allocated_dst == D40_ALLOC_FREE) {
1751			phy->allocated_dst = D40_ALLOC_PHY;
1752			phy->allocated_src = D40_ALLOC_PHY;
1753			goto found_unlock;
1754		} else
1755			goto not_found_unlock;
1756	}
1757
1758	/* Logical channel */
1759	if (is_src) {
1760		if (phy->allocated_src == D40_ALLOC_PHY)
1761			goto not_found_unlock;
1762
1763		if (phy->allocated_src == D40_ALLOC_FREE)
1764			phy->allocated_src = D40_ALLOC_LOG_FREE;
1765
1766		if (!(phy->allocated_src & BIT(log_event_line))) {
1767			phy->allocated_src |= BIT(log_event_line);
1768			goto found_unlock;
1769		} else
1770			goto not_found_unlock;
1771	} else {
1772		if (phy->allocated_dst == D40_ALLOC_PHY)
1773			goto not_found_unlock;
1774
1775		if (phy->allocated_dst == D40_ALLOC_FREE)
1776			phy->allocated_dst = D40_ALLOC_LOG_FREE;
1777
1778		if (!(phy->allocated_dst & BIT(log_event_line))) {
1779			phy->allocated_dst |= BIT(log_event_line);
1780			goto found_unlock;
1781		}
 
1782	}
1783 not_found_unlock:
 
1784	spin_unlock_irqrestore(&phy->lock, flags);
1785	return false;
1786 found_unlock:
1787	spin_unlock_irqrestore(&phy->lock, flags);
1788	return true;
1789}
1790
1791static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1792			       int log_event_line)
1793{
1794	unsigned long flags;
1795	bool is_free = false;
1796
1797	spin_lock_irqsave(&phy->lock, flags);
1798	if (!log_event_line) {
1799		phy->allocated_dst = D40_ALLOC_FREE;
1800		phy->allocated_src = D40_ALLOC_FREE;
1801		is_free = true;
1802		goto unlock;
1803	}
1804
1805	/* Logical channel */
1806	if (is_src) {
1807		phy->allocated_src &= ~BIT(log_event_line);
1808		if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1809			phy->allocated_src = D40_ALLOC_FREE;
1810	} else {
1811		phy->allocated_dst &= ~BIT(log_event_line);
1812		if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1813			phy->allocated_dst = D40_ALLOC_FREE;
1814	}
1815
1816	is_free = ((phy->allocated_src | phy->allocated_dst) ==
1817		   D40_ALLOC_FREE);
1818 unlock:
 
1819	spin_unlock_irqrestore(&phy->lock, flags);
1820
1821	return is_free;
1822}
1823
1824static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1825{
1826	int dev_type = d40c->dma_cfg.dev_type;
1827	int event_group;
1828	int event_line;
1829	struct d40_phy_res *phys;
1830	int i;
1831	int j;
1832	int log_num;
1833	int num_phy_chans;
1834	bool is_src;
1835	bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1836
1837	phys = d40c->base->phy_res;
1838	num_phy_chans = d40c->base->num_phy_chans;
1839
1840	if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
 
1841		log_num = 2 * dev_type;
1842		is_src = true;
1843	} else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1844		   d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1845		/* dst event lines are used for logical memcpy */
 
1846		log_num = 2 * dev_type + 1;
1847		is_src = false;
1848	} else
1849		return -EINVAL;
1850
1851	event_group = D40_TYPE_TO_GROUP(dev_type);
1852	event_line = D40_TYPE_TO_EVENT(dev_type);
1853
1854	if (!is_log) {
1855		if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1856			/* Find physical half channel */
1857			if (d40c->dma_cfg.use_fixed_channel) {
1858				i = d40c->dma_cfg.phy_channel;
1859				if (d40_alloc_mask_set(&phys[i], is_src,
1860						       0, is_log,
1861						       first_phy_user))
1862					goto found_phy;
1863			} else {
1864				for (i = 0; i < num_phy_chans; i++) {
1865					if (d40_alloc_mask_set(&phys[i], is_src,
1866						       0, is_log,
1867						       first_phy_user))
1868						goto found_phy;
1869				}
1870			}
1871		} else
1872			for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1873				int phy_num = j  + event_group * 2;
1874				for (i = phy_num; i < phy_num + 2; i++) {
1875					if (d40_alloc_mask_set(&phys[i],
1876							       is_src,
1877							       0,
1878							       is_log,
1879							       first_phy_user))
1880						goto found_phy;
1881				}
1882			}
1883		return -EINVAL;
1884found_phy:
1885		d40c->phy_chan = &phys[i];
1886		d40c->log_num = D40_PHY_CHAN;
1887		goto out;
1888	}
1889	if (dev_type == -1)
1890		return -EINVAL;
1891
1892	/* Find logical channel */
1893	for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1894		int phy_num = j + event_group * 2;
1895
1896		if (d40c->dma_cfg.use_fixed_channel) {
1897			i = d40c->dma_cfg.phy_channel;
1898
1899			if ((i != phy_num) && (i != phy_num + 1)) {
1900				dev_err(chan2dev(d40c),
1901					"invalid fixed phy channel %d\n", i);
1902				return -EINVAL;
1903			}
1904
1905			if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1906					       is_log, first_phy_user))
1907				goto found_log;
1908
1909			dev_err(chan2dev(d40c),
1910				"could not allocate fixed phy channel %d\n", i);
1911			return -EINVAL;
1912		}
1913
1914		/*
1915		 * Spread logical channels across all available physical rather
1916		 * than pack every logical channel at the first available phy
1917		 * channels.
1918		 */
1919		if (is_src) {
1920			for (i = phy_num; i < phy_num + 2; i++) {
1921				if (d40_alloc_mask_set(&phys[i], is_src,
1922						       event_line, is_log,
1923						       first_phy_user))
1924					goto found_log;
1925			}
1926		} else {
1927			for (i = phy_num + 1; i >= phy_num; i--) {
1928				if (d40_alloc_mask_set(&phys[i], is_src,
1929						       event_line, is_log,
1930						       first_phy_user))
1931					goto found_log;
1932			}
1933		}
1934	}
1935	return -EINVAL;
1936
1937found_log:
1938	d40c->phy_chan = &phys[i];
1939	d40c->log_num = log_num;
1940out:
1941
1942	if (is_log)
1943		d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1944	else
1945		d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1946
1947	return 0;
1948
1949}
1950
1951static int d40_config_memcpy(struct d40_chan *d40c)
1952{
1953	dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1954
1955	if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1956		d40c->dma_cfg = dma40_memcpy_conf_log;
1957		d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
1958
1959		d40_log_cfg(&d40c->dma_cfg,
1960			    &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1961
1962	} else if (dma_has_cap(DMA_MEMCPY, cap) &&
1963		   dma_has_cap(DMA_SLAVE, cap)) {
1964		d40c->dma_cfg = dma40_memcpy_conf_phy;
1965
1966		/* Generate interrrupt at end of transfer or relink. */
1967		d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
1968
1969		/* Generate interrupt on error. */
1970		d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1971		d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
1972
1973	} else {
1974		chan_err(d40c, "No memcpy\n");
1975		return -EINVAL;
1976	}
1977
1978	return 0;
1979}
1980
 
1981static int d40_free_dma(struct d40_chan *d40c)
1982{
1983
1984	int res = 0;
1985	u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1986	struct d40_phy_res *phy = d40c->phy_chan;
1987	bool is_src;
1988
1989	/* Terminate all queued and active transfers */
1990	d40_term_all(d40c);
1991
1992	if (phy == NULL) {
1993		chan_err(d40c, "phy == null\n");
1994		return -EINVAL;
1995	}
1996
1997	if (phy->allocated_src == D40_ALLOC_FREE &&
1998	    phy->allocated_dst == D40_ALLOC_FREE) {
1999		chan_err(d40c, "channel already free\n");
2000		return -EINVAL;
2001	}
2002
2003	if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2004	    d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
 
2005		is_src = false;
2006	else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
 
2007		is_src = true;
2008	else {
2009		chan_err(d40c, "Unknown direction\n");
2010		return -EINVAL;
2011	}
2012
2013	pm_runtime_get_sync(d40c->base->dev);
2014	res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2015	if (res) {
2016		chan_err(d40c, "stop failed\n");
2017		goto mark_last_busy;
2018	}
2019
2020	d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
 
2021
2022	if (chan_is_logical(d40c))
2023		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2024	else
2025		d40c->base->lookup_phy_chans[phy->num] = NULL;
2026
2027	if (d40c->busy) {
2028		pm_runtime_mark_last_busy(d40c->base->dev);
2029		pm_runtime_put_autosuspend(d40c->base->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2030	}
2031
2032	d40c->busy = false;
 
 
 
 
 
2033	d40c->phy_chan = NULL;
2034	d40c->configured = false;
2035 mark_last_busy:
2036	pm_runtime_mark_last_busy(d40c->base->dev);
2037	pm_runtime_put_autosuspend(d40c->base->dev);
2038	return res;
2039}
2040
2041static bool d40_is_paused(struct d40_chan *d40c)
2042{
2043	void __iomem *chanbase = chan_base(d40c);
2044	bool is_paused = false;
2045	unsigned long flags;
2046	void __iomem *active_reg;
2047	u32 status;
2048	u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2049
2050	spin_lock_irqsave(&d40c->lock, flags);
2051
2052	if (chan_is_physical(d40c)) {
2053		if (d40c->phy_chan->num % 2 == 0)
2054			active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2055		else
2056			active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2057
2058		status = (readl(active_reg) &
2059			  D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2060			D40_CHAN_POS(d40c->phy_chan->num);
2061		if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2062			is_paused = true;
2063		goto unlock;
 
2064	}
2065
2066	if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2067	    d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
 
2068		status = readl(chanbase + D40_CHAN_REG_SDLNK);
2069	} else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
 
2070		status = readl(chanbase + D40_CHAN_REG_SSLNK);
2071	} else {
2072		chan_err(d40c, "Unknown direction\n");
2073		goto unlock;
2074	}
2075
2076	status = (status & D40_EVENTLINE_MASK(event)) >>
2077		D40_EVENTLINE_POS(event);
2078
2079	if (status != D40_DMA_RUN)
2080		is_paused = true;
2081 unlock:
2082	spin_unlock_irqrestore(&d40c->lock, flags);
2083	return is_paused;
2084
2085}
2086
 
2087static u32 stedma40_residue(struct dma_chan *chan)
2088{
2089	struct d40_chan *d40c =
2090		container_of(chan, struct d40_chan, chan);
2091	u32 bytes_left;
2092	unsigned long flags;
2093
2094	spin_lock_irqsave(&d40c->lock, flags);
2095	bytes_left = d40_residue(d40c);
2096	spin_unlock_irqrestore(&d40c->lock, flags);
2097
2098	return bytes_left;
2099}
2100
2101static int
2102d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2103		struct scatterlist *sg_src, struct scatterlist *sg_dst,
2104		unsigned int sg_len, dma_addr_t src_dev_addr,
2105		dma_addr_t dst_dev_addr)
2106{
2107	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2108	struct stedma40_half_channel_info *src_info = &cfg->src_info;
2109	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2110	int ret;
2111
2112	ret = d40_log_sg_to_lli(sg_src, sg_len,
2113				src_dev_addr,
2114				desc->lli_log.src,
2115				chan->log_def.lcsp1,
2116				src_info->data_width,
2117				dst_info->data_width);
2118
2119	ret = d40_log_sg_to_lli(sg_dst, sg_len,
2120				dst_dev_addr,
2121				desc->lli_log.dst,
2122				chan->log_def.lcsp3,
2123				dst_info->data_width,
2124				src_info->data_width);
2125
2126	return ret < 0 ? ret : 0;
2127}
2128
2129static int
2130d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2131		struct scatterlist *sg_src, struct scatterlist *sg_dst,
2132		unsigned int sg_len, dma_addr_t src_dev_addr,
2133		dma_addr_t dst_dev_addr)
2134{
2135	struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2136	struct stedma40_half_channel_info *src_info = &cfg->src_info;
2137	struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2138	unsigned long flags = 0;
2139	int ret;
2140
2141	if (desc->cyclic)
2142		flags |= LLI_CYCLIC | LLI_TERM_INT;
2143
2144	ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2145				desc->lli_phy.src,
2146				virt_to_phys(desc->lli_phy.src),
2147				chan->src_def_cfg,
2148				src_info, dst_info, flags);
2149
2150	ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2151				desc->lli_phy.dst,
2152				virt_to_phys(desc->lli_phy.dst),
2153				chan->dst_def_cfg,
2154				dst_info, src_info, flags);
2155
2156	dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2157				   desc->lli_pool.size, DMA_TO_DEVICE);
2158
2159	return ret < 0 ? ret : 0;
2160}
2161
 
2162static struct d40_desc *
2163d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2164	      unsigned int sg_len, unsigned long dma_flags)
2165{
2166	struct stedma40_chan_cfg *cfg;
2167	struct d40_desc *desc;
2168	int ret;
2169
2170	desc = d40_desc_get(chan);
2171	if (!desc)
2172		return NULL;
2173
2174	cfg = &chan->dma_cfg;
2175	desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2176					cfg->dst_info.data_width);
2177	if (desc->lli_len < 0) {
2178		chan_err(chan, "Unaligned size\n");
2179		goto free_desc;
2180	}
2181
2182	ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2183	if (ret < 0) {
2184		chan_err(chan, "Could not allocate lli\n");
2185		goto free_desc;
2186	}
2187
 
2188	desc->lli_current = 0;
2189	desc->txd.flags = dma_flags;
2190	desc->txd.tx_submit = d40_tx_submit;
2191
2192	dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2193
2194	return desc;
2195 free_desc:
 
2196	d40_desc_free(chan, desc);
2197	return NULL;
2198}
2199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2200static struct dma_async_tx_descriptor *
2201d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2202	    struct scatterlist *sg_dst, unsigned int sg_len,
2203	    enum dma_transfer_direction direction, unsigned long dma_flags)
2204{
2205	struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2206	dma_addr_t src_dev_addr;
2207	dma_addr_t dst_dev_addr;
2208	struct d40_desc *desc;
2209	unsigned long flags;
2210	int ret;
2211
2212	if (!chan->phy_chan) {
2213		chan_err(chan, "Cannot prepare unallocated channel\n");
2214		return NULL;
2215	}
2216
 
2217	spin_lock_irqsave(&chan->lock, flags);
2218
2219	desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2220	if (desc == NULL)
2221		goto unlock;
2222
2223	if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2224		desc->cyclic = true;
2225
2226	src_dev_addr = 0;
2227	dst_dev_addr = 0;
2228	if (direction == DMA_DEV_TO_MEM)
2229		src_dev_addr = chan->runtime_addr;
2230	else if (direction == DMA_MEM_TO_DEV)
2231		dst_dev_addr = chan->runtime_addr;
 
 
2232
2233	if (chan_is_logical(chan))
2234		ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2235				      sg_len, src_dev_addr, dst_dev_addr);
2236	else
2237		ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2238				      sg_len, src_dev_addr, dst_dev_addr);
2239
2240	if (ret) {
2241		chan_err(chan, "Failed to prepare %s sg job: %d\n",
2242			 chan_is_logical(chan) ? "log" : "phy", ret);
2243		goto free_desc;
2244	}
2245
2246	/*
2247	 * add descriptor to the prepare queue in order to be able
2248	 * to free them later in terminate_all
2249	 */
2250	list_add_tail(&desc->node, &chan->prepare_queue);
2251
2252	spin_unlock_irqrestore(&chan->lock, flags);
2253
2254	return &desc->txd;
2255 free_desc:
2256	d40_desc_free(chan, desc);
2257 unlock:
 
2258	spin_unlock_irqrestore(&chan->lock, flags);
2259	return NULL;
2260}
2261
2262bool stedma40_filter(struct dma_chan *chan, void *data)
2263{
2264	struct stedma40_chan_cfg *info = data;
2265	struct d40_chan *d40c =
2266		container_of(chan, struct d40_chan, chan);
2267	int err;
2268
2269	if (data) {
2270		err = d40_validate_conf(d40c, info);
2271		if (!err)
2272			d40c->dma_cfg = *info;
2273	} else
2274		err = d40_config_memcpy(d40c);
2275
2276	if (!err)
2277		d40c->configured = true;
2278
2279	return err == 0;
2280}
2281EXPORT_SYMBOL(stedma40_filter);
2282
2283static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2284{
2285	bool realtime = d40c->dma_cfg.realtime;
2286	bool highprio = d40c->dma_cfg.high_priority;
2287	u32 rtreg;
 
2288	u32 event = D40_TYPE_TO_EVENT(dev_type);
2289	u32 group = D40_TYPE_TO_GROUP(dev_type);
2290	u32 bit = BIT(event);
2291	u32 prioreg;
2292	struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2293
2294	rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2295	/*
2296	 * Due to a hardware bug, in some cases a logical channel triggered by
2297	 * a high priority destination event line can generate extra packet
2298	 * transactions.
2299	 *
2300	 * The workaround is to not set the high priority level for the
2301	 * destination event lines that trigger logical channels.
2302	 */
2303	if (!src && chan_is_logical(d40c))
2304		highprio = false;
2305
2306	prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2307
2308	/* Destination event lines are stored in the upper halfword */
2309	if (!src)
2310		bit <<= 16;
2311
2312	writel(bit, d40c->base->virtbase + prioreg + group * 4);
2313	writel(bit, d40c->base->virtbase + rtreg + group * 4);
2314}
2315
2316static void d40_set_prio_realtime(struct d40_chan *d40c)
2317{
2318	if (d40c->base->rev < 3)
2319		return;
2320
2321	if ((d40c->dma_cfg.dir ==  DMA_DEV_TO_MEM) ||
2322	    (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2323		__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2324
2325	if ((d40c->dma_cfg.dir ==  DMA_MEM_TO_DEV) ||
2326	    (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2327		__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2328}
2329
2330#define D40_DT_FLAGS_MODE(flags)       ((flags >> 0) & 0x1)
2331#define D40_DT_FLAGS_DIR(flags)        ((flags >> 1) & 0x1)
2332#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2333#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2334#define D40_DT_FLAGS_HIGH_PRIO(flags)  ((flags >> 4) & 0x1)
2335
2336static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2337				  struct of_dma *ofdma)
2338{
2339	struct stedma40_chan_cfg cfg;
2340	dma_cap_mask_t cap;
2341	u32 flags;
2342
2343	memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2344
2345	dma_cap_zero(cap);
2346	dma_cap_set(DMA_SLAVE, cap);
2347
2348	cfg.dev_type = dma_spec->args[0];
2349	flags = dma_spec->args[2];
2350
2351	switch (D40_DT_FLAGS_MODE(flags)) {
2352	case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2353	case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2354	}
2355
2356	switch (D40_DT_FLAGS_DIR(flags)) {
2357	case 0:
2358		cfg.dir = DMA_MEM_TO_DEV;
2359		cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2360		break;
2361	case 1:
2362		cfg.dir = DMA_DEV_TO_MEM;
2363		cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2364		break;
2365	}
2366
2367	if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2368		cfg.phy_channel = dma_spec->args[1];
2369		cfg.use_fixed_channel = true;
2370	}
2371
2372	if (D40_DT_FLAGS_HIGH_PRIO(flags))
2373		cfg.high_priority = true;
2374
2375	return dma_request_channel(cap, stedma40_filter, &cfg);
2376}
2377
2378/* DMA ENGINE functions */
2379static int d40_alloc_chan_resources(struct dma_chan *chan)
2380{
2381	int err;
2382	unsigned long flags;
2383	struct d40_chan *d40c =
2384		container_of(chan, struct d40_chan, chan);
2385	bool is_free_phy;
2386	spin_lock_irqsave(&d40c->lock, flags);
2387
2388	dma_cookie_init(chan);
2389
2390	/* If no dma configuration is set use default configuration (memcpy) */
2391	if (!d40c->configured) {
2392		err = d40_config_memcpy(d40c);
2393		if (err) {
2394			chan_err(d40c, "Failed to configure memcpy channel\n");
2395			goto mark_last_busy;
2396		}
2397	}
 
2398
2399	err = d40_allocate_channel(d40c, &is_free_phy);
2400	if (err) {
2401		chan_err(d40c, "Failed to allocate channel\n");
2402		d40c->configured = false;
2403		goto mark_last_busy;
2404	}
2405
2406	pm_runtime_get_sync(d40c->base->dev);
 
 
2407
2408	d40_set_prio_realtime(d40c);
2409
2410	if (chan_is_logical(d40c)) {
2411		if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
 
 
 
2412			d40c->lcpa = d40c->base->lcpa_base +
2413				d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2414		else
2415			d40c->lcpa = d40c->base->lcpa_base +
2416				d40c->dma_cfg.dev_type *
2417				D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2418
2419		/* Unmask the Global Interrupt Mask. */
2420		d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2421		d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2422	}
2423
2424	dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2425		 chan_is_logical(d40c) ? "logical" : "physical",
2426		 d40c->phy_chan->num,
2427		 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2428
2429
2430	/*
2431	 * Only write channel configuration to the DMA if the physical
2432	 * resource is free. In case of multiple logical channels
2433	 * on the same physical resource, only the first write is necessary.
2434	 */
2435	if (is_free_phy)
2436		d40_config_write(d40c);
2437 mark_last_busy:
2438	pm_runtime_mark_last_busy(d40c->base->dev);
2439	pm_runtime_put_autosuspend(d40c->base->dev);
2440	spin_unlock_irqrestore(&d40c->lock, flags);
2441	return err;
2442}
2443
2444static void d40_free_chan_resources(struct dma_chan *chan)
2445{
2446	struct d40_chan *d40c =
2447		container_of(chan, struct d40_chan, chan);
2448	int err;
2449	unsigned long flags;
2450
2451	if (d40c->phy_chan == NULL) {
2452		chan_err(d40c, "Cannot free unallocated channel\n");
2453		return;
2454	}
2455
 
2456	spin_lock_irqsave(&d40c->lock, flags);
2457
2458	err = d40_free_dma(d40c);
2459
2460	if (err)
2461		chan_err(d40c, "Failed to free channel\n");
2462	spin_unlock_irqrestore(&d40c->lock, flags);
2463}
2464
2465static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2466						       dma_addr_t dst,
2467						       dma_addr_t src,
2468						       size_t size,
2469						       unsigned long dma_flags)
2470{
2471	struct scatterlist dst_sg;
2472	struct scatterlist src_sg;
2473
2474	sg_init_table(&dst_sg, 1);
2475	sg_init_table(&src_sg, 1);
2476
2477	sg_dma_address(&dst_sg) = dst;
2478	sg_dma_address(&src_sg) = src;
2479
2480	sg_dma_len(&dst_sg) = size;
2481	sg_dma_len(&src_sg) = size;
2482
2483	return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
2484			   DMA_MEM_TO_MEM, dma_flags);
2485}
2486
2487static struct dma_async_tx_descriptor *
2488d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2489		  unsigned int sg_len, enum dma_transfer_direction direction,
2490		  unsigned long dma_flags, void *context)
 
2491{
2492	if (!is_slave_direction(direction))
 
 
 
 
 
 
 
 
 
 
 
 
2493		return NULL;
2494
2495	return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2496}
2497
2498static struct dma_async_tx_descriptor *
2499dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2500		     size_t buf_len, size_t period_len,
2501		     enum dma_transfer_direction direction, unsigned long flags)
2502{
2503	unsigned int periods = buf_len / period_len;
2504	struct dma_async_tx_descriptor *txd;
2505	struct scatterlist *sg;
2506	int i;
2507
2508	sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2509	if (!sg)
2510		return NULL;
2511
2512	for (i = 0; i < periods; i++) {
2513		sg_dma_address(&sg[i]) = dma_addr;
2514		sg_dma_len(&sg[i]) = period_len;
2515		dma_addr += period_len;
2516	}
2517
2518	sg_chain(sg, periods + 1, sg);
 
 
 
2519
2520	txd = d40_prep_sg(chan, sg, sg, periods, direction,
2521			  DMA_PREP_INTERRUPT);
2522
2523	kfree(sg);
2524
2525	return txd;
2526}
2527
2528static enum dma_status d40_tx_status(struct dma_chan *chan,
2529				     dma_cookie_t cookie,
2530				     struct dma_tx_state *txstate)
2531{
2532	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2533	enum dma_status ret;
 
 
2534
2535	if (d40c->phy_chan == NULL) {
2536		chan_err(d40c, "Cannot read status of unallocated channel\n");
2537		return -EINVAL;
2538	}
2539
2540	ret = dma_cookie_status(chan, cookie, txstate);
2541	if (ret != DMA_COMPLETE && txstate)
2542		dma_set_residue(txstate, stedma40_residue(chan));
2543
2544	if (d40_is_paused(d40c))
2545		ret = DMA_PAUSED;
 
 
 
 
 
2546
2547	return ret;
2548}
2549
2550static void d40_issue_pending(struct dma_chan *chan)
2551{
2552	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2553	unsigned long flags;
2554
2555	if (d40c->phy_chan == NULL) {
2556		chan_err(d40c, "Channel is not allocated!\n");
2557		return;
2558	}
2559
2560	spin_lock_irqsave(&d40c->lock, flags);
2561
2562	list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2563
2564	/* Busy means that queued jobs are already being processed */
2565	if (!d40c->busy)
2566		(void) d40_queue_start(d40c);
2567
2568	spin_unlock_irqrestore(&d40c->lock, flags);
2569}
2570
2571static int d40_terminate_all(struct dma_chan *chan)
2572{
2573	unsigned long flags;
2574	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2575	int ret;
2576
2577	if (d40c->phy_chan == NULL) {
2578		chan_err(d40c, "Channel is not allocated!\n");
2579		return -EINVAL;
2580	}
2581
2582	spin_lock_irqsave(&d40c->lock, flags);
2583
2584	pm_runtime_get_sync(d40c->base->dev);
2585	ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2586	if (ret)
2587		chan_err(d40c, "Failed to stop channel\n");
2588
2589	d40_term_all(d40c);
2590	pm_runtime_mark_last_busy(d40c->base->dev);
2591	pm_runtime_put_autosuspend(d40c->base->dev);
2592	if (d40c->busy) {
2593		pm_runtime_mark_last_busy(d40c->base->dev);
2594		pm_runtime_put_autosuspend(d40c->base->dev);
2595	}
2596	d40c->busy = false;
2597
2598	spin_unlock_irqrestore(&d40c->lock, flags);
2599	return 0;
2600}
2601
2602static int
2603dma40_config_to_halfchannel(struct d40_chan *d40c,
2604			    struct stedma40_half_channel_info *info,
 
2605			    u32 maxburst)
2606{
 
2607	int psize;
2608
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2609	if (chan_is_logical(d40c)) {
2610		if (maxburst >= 16)
2611			psize = STEDMA40_PSIZE_LOG_16;
2612		else if (maxburst >= 8)
2613			psize = STEDMA40_PSIZE_LOG_8;
2614		else if (maxburst >= 4)
2615			psize = STEDMA40_PSIZE_LOG_4;
2616		else
2617			psize = STEDMA40_PSIZE_LOG_1;
2618	} else {
2619		if (maxburst >= 16)
2620			psize = STEDMA40_PSIZE_PHY_16;
2621		else if (maxburst >= 8)
2622			psize = STEDMA40_PSIZE_PHY_8;
2623		else if (maxburst >= 4)
2624			psize = STEDMA40_PSIZE_PHY_4;
2625		else
2626			psize = STEDMA40_PSIZE_PHY_1;
2627	}
2628
 
2629	info->psize = psize;
2630	info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2631
2632	return 0;
2633}
2634
2635/* Runtime reconfiguration extension */
2636static int d40_set_runtime_config(struct dma_chan *chan,
2637				  struct dma_slave_config *config)
2638{
2639	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2640	struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2641	enum dma_slave_buswidth src_addr_width, dst_addr_width;
2642	dma_addr_t config_addr;
2643	u32 src_maxburst, dst_maxburst;
2644	int ret;
2645
2646	if (d40c->phy_chan == NULL) {
2647		chan_err(d40c, "Channel is not allocated!\n");
2648		return -EINVAL;
2649	}
2650
2651	src_addr_width = config->src_addr_width;
2652	src_maxburst = config->src_maxburst;
2653	dst_addr_width = config->dst_addr_width;
2654	dst_maxburst = config->dst_maxburst;
2655
2656	if (config->direction == DMA_DEV_TO_MEM) {
 
 
 
2657		config_addr = config->src_addr;
2658
2659		if (cfg->dir != DMA_DEV_TO_MEM)
 
 
 
 
2660			dev_dbg(d40c->base->dev,
2661				"channel was not configured for peripheral "
2662				"to memory transfer (%d) overriding\n",
2663				cfg->dir);
2664		cfg->dir = DMA_DEV_TO_MEM;
2665
2666		/* Configure the memory side */
2667		if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2668			dst_addr_width = src_addr_width;
2669		if (dst_maxburst == 0)
2670			dst_maxburst = src_maxburst;
2671
2672	} else if (config->direction == DMA_MEM_TO_DEV) {
 
 
 
2673		config_addr = config->dst_addr;
2674
2675		if (cfg->dir != DMA_MEM_TO_DEV)
 
 
 
 
2676			dev_dbg(d40c->base->dev,
2677				"channel was not configured for memory "
2678				"to peripheral transfer (%d) overriding\n",
2679				cfg->dir);
2680		cfg->dir = DMA_MEM_TO_DEV;
2681
2682		/* Configure the memory side */
2683		if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2684			src_addr_width = dst_addr_width;
2685		if (src_maxburst == 0)
2686			src_maxburst = dst_maxburst;
2687	} else {
2688		dev_err(d40c->base->dev,
2689			"unrecognized channel direction %d\n",
2690			config->direction);
2691		return -EINVAL;
2692	}
2693
2694	if (config_addr <= 0) {
2695		dev_err(d40c->base->dev, "no address supplied\n");
2696		return -EINVAL;
2697	}
2698
2699	if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2700		dev_err(d40c->base->dev,
2701			"src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2702			src_maxburst,
2703			src_addr_width,
2704			dst_maxburst,
2705			dst_addr_width);
2706		return -EINVAL;
2707	}
2708
2709	if (src_maxburst > 16) {
2710		src_maxburst = 16;
2711		dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2712	} else if (dst_maxburst > 16) {
2713		dst_maxburst = 16;
2714		src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2715	}
2716
2717	/* Only valid widths are; 1, 2, 4 and 8. */
2718	if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2719	    src_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2720	    dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2721	    dst_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
2722	    !is_power_of_2(src_addr_width) ||
2723	    !is_power_of_2(dst_addr_width))
2724		return -EINVAL;
2725
2726	cfg->src_info.data_width = src_addr_width;
2727	cfg->dst_info.data_width = dst_addr_width;
2728
2729	ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
 
2730					  src_maxburst);
2731	if (ret)
2732		return ret;
2733
2734	ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
 
2735					  dst_maxburst);
2736	if (ret)
2737		return ret;
2738
2739	/* Fill in register values */
2740	if (chan_is_logical(d40c))
2741		d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2742	else
2743		d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
 
2744
2745	/* These settings will take precedence later */
2746	d40c->runtime_addr = config_addr;
2747	d40c->runtime_direction = config->direction;
2748	dev_dbg(d40c->base->dev,
2749		"configured channel %s for %s, data width %d/%d, "
2750		"maxburst %d/%d elements, LE, no flow control\n",
2751		dma_chan_name(chan),
2752		(config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2753		src_addr_width, dst_addr_width,
2754		src_maxburst, dst_maxburst);
2755
2756	return 0;
2757}
2758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2759/* Initialization functions */
2760
2761static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2762				 struct d40_chan *chans, int offset,
2763				 int num_chans)
2764{
2765	int i = 0;
2766	struct d40_chan *d40c;
2767
2768	INIT_LIST_HEAD(&dma->channels);
2769
2770	for (i = offset; i < offset + num_chans; i++) {
2771		d40c = &chans[i];
2772		d40c->base = base;
2773		d40c->chan.device = dma;
2774
2775		spin_lock_init(&d40c->lock);
2776
2777		d40c->log_num = D40_PHY_CHAN;
2778
2779		INIT_LIST_HEAD(&d40c->done);
2780		INIT_LIST_HEAD(&d40c->active);
2781		INIT_LIST_HEAD(&d40c->queue);
2782		INIT_LIST_HEAD(&d40c->pending_queue);
2783		INIT_LIST_HEAD(&d40c->client);
2784		INIT_LIST_HEAD(&d40c->prepare_queue);
2785
2786		tasklet_init(&d40c->tasklet, dma_tasklet,
2787			     (unsigned long) d40c);
2788
2789		list_add_tail(&d40c->chan.device_node,
2790			      &dma->channels);
2791	}
2792}
2793
2794static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2795{
2796	if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
2797		dev->device_prep_slave_sg = d40_prep_slave_sg;
2798		dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2799	}
2800
2801	if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2802		dev->device_prep_dma_memcpy = d40_prep_memcpy;
2803		dev->directions = BIT(DMA_MEM_TO_MEM);
2804		/*
2805		 * This controller can only access address at even
2806		 * 32bit boundaries, i.e. 2^2
2807		 */
2808		dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
2809	}
2810
 
 
 
2811	if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2812		dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2813
2814	dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2815	dev->device_free_chan_resources = d40_free_chan_resources;
2816	dev->device_issue_pending = d40_issue_pending;
2817	dev->device_tx_status = d40_tx_status;
2818	dev->device_config = d40_set_runtime_config;
2819	dev->device_pause = d40_pause;
2820	dev->device_resume = d40_resume;
2821	dev->device_terminate_all = d40_terminate_all;
2822	dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2823	dev->dev = base->dev;
2824}
2825
2826static int __init d40_dmaengine_init(struct d40_base *base,
2827				     int num_reserved_chans)
2828{
2829	int err ;
2830
2831	d40_chan_init(base, &base->dma_slave, base->log_chans,
2832		      0, base->num_log_chans);
2833
2834	dma_cap_zero(base->dma_slave.cap_mask);
2835	dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2836	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2837
2838	d40_ops_init(base, &base->dma_slave);
2839
2840	err = dma_async_device_register(&base->dma_slave);
2841
2842	if (err) {
2843		d40_err(base->dev, "Failed to register slave channels\n");
2844		goto exit;
2845	}
2846
2847	d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2848		      base->num_log_chans, base->num_memcpy_chans);
2849
2850	dma_cap_zero(base->dma_memcpy.cap_mask);
2851	dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
 
2852
2853	d40_ops_init(base, &base->dma_memcpy);
2854
2855	err = dma_async_device_register(&base->dma_memcpy);
2856
2857	if (err) {
2858		d40_err(base->dev,
2859			"Failed to register memcpy only channels\n");
2860		goto unregister_slave;
2861	}
2862
2863	d40_chan_init(base, &base->dma_both, base->phy_chans,
2864		      0, num_reserved_chans);
2865
2866	dma_cap_zero(base->dma_both.cap_mask);
2867	dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2868	dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
 
2869	dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2870
2871	d40_ops_init(base, &base->dma_both);
2872	err = dma_async_device_register(&base->dma_both);
2873
2874	if (err) {
2875		d40_err(base->dev,
2876			"Failed to register logical and physical capable channels\n");
2877		goto unregister_memcpy;
2878	}
2879	return 0;
2880 unregister_memcpy:
2881	dma_async_device_unregister(&base->dma_memcpy);
2882 unregister_slave:
2883	dma_async_device_unregister(&base->dma_slave);
2884 exit:
2885	return err;
2886}
2887
2888/* Suspend resume functionality */
2889#ifdef CONFIG_PM_SLEEP
2890static int dma40_suspend(struct device *dev)
2891{
2892	struct platform_device *pdev = to_platform_device(dev);
2893	struct d40_base *base = platform_get_drvdata(pdev);
2894	int ret;
2895
2896	ret = pm_runtime_force_suspend(dev);
2897	if (ret)
2898		return ret;
2899
2900	if (base->lcpa_regulator)
2901		ret = regulator_disable(base->lcpa_regulator);
2902	return ret;
2903}
2904
2905static int dma40_resume(struct device *dev)
2906{
2907	struct platform_device *pdev = to_platform_device(dev);
2908	struct d40_base *base = platform_get_drvdata(pdev);
2909	int ret = 0;
2910
2911	if (base->lcpa_regulator) {
2912		ret = regulator_enable(base->lcpa_regulator);
2913		if (ret)
2914			return ret;
2915	}
2916
2917	return pm_runtime_force_resume(dev);
2918}
2919#endif
2920
2921#ifdef CONFIG_PM
2922static void dma40_backup(void __iomem *baseaddr, u32 *backup,
2923			 u32 *regaddr, int num, bool save)
2924{
2925	int i;
2926
2927	for (i = 0; i < num; i++) {
2928		void __iomem *addr = baseaddr + regaddr[i];
2929
2930		if (save)
2931			backup[i] = readl_relaxed(addr);
2932		else
2933			writel_relaxed(backup[i], addr);
2934	}
2935}
2936
2937static void d40_save_restore_registers(struct d40_base *base, bool save)
2938{
2939	int i;
2940
2941	/* Save/Restore channel specific registers */
2942	for (i = 0; i < base->num_phy_chans; i++) {
2943		void __iomem *addr;
2944		int idx;
2945
2946		if (base->phy_res[i].reserved)
2947			continue;
2948
2949		addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
2950		idx = i * ARRAY_SIZE(d40_backup_regs_chan);
2951
2952		dma40_backup(addr, &base->reg_val_backup_chan[idx],
2953			     d40_backup_regs_chan,
2954			     ARRAY_SIZE(d40_backup_regs_chan),
2955			     save);
2956	}
2957
2958	/* Save/Restore global registers */
2959	dma40_backup(base->virtbase, base->reg_val_backup,
2960		     d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
2961		     save);
2962
2963	/* Save/Restore registers only existing on dma40 v3 and later */
2964	if (base->gen_dmac.backup)
2965		dma40_backup(base->virtbase, base->reg_val_backup_v4,
2966			     base->gen_dmac.backup,
2967			base->gen_dmac.backup_size,
2968			save);
2969}
2970
2971static int dma40_runtime_suspend(struct device *dev)
2972{
2973	struct platform_device *pdev = to_platform_device(dev);
2974	struct d40_base *base = platform_get_drvdata(pdev);
2975
2976	d40_save_restore_registers(base, true);
2977
2978	/* Don't disable/enable clocks for v1 due to HW bugs */
2979	if (base->rev != 1)
2980		writel_relaxed(base->gcc_pwr_off_mask,
2981			       base->virtbase + D40_DREG_GCC);
2982
2983	return 0;
2984}
2985
2986static int dma40_runtime_resume(struct device *dev)
2987{
2988	struct platform_device *pdev = to_platform_device(dev);
2989	struct d40_base *base = platform_get_drvdata(pdev);
2990
2991	d40_save_restore_registers(base, false);
2992
2993	writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
2994		       base->virtbase + D40_DREG_GCC);
2995	return 0;
2996}
2997#endif
2998
2999static const struct dev_pm_ops dma40_pm_ops = {
3000	SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
3001	SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
3002				dma40_runtime_resume,
3003				NULL)
3004};
3005
3006/* Initialization functions. */
3007
3008static int __init d40_phy_res_init(struct d40_base *base)
3009{
3010	int i;
3011	int num_phy_chans_avail = 0;
3012	u32 val[2];
3013	int odd_even_bit = -2;
3014	int gcc = D40_DREG_GCC_ENA;
3015
3016	val[0] = readl(base->virtbase + D40_DREG_PRSME);
3017	val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3018
3019	for (i = 0; i < base->num_phy_chans; i++) {
3020		base->phy_res[i].num = i;
3021		odd_even_bit += 2 * ((i % 2) == 0);
3022		if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3023			/* Mark security only channels as occupied */
3024			base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3025			base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3026			base->phy_res[i].reserved = true;
3027			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3028						       D40_DREG_GCC_SRC);
3029			gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3030						       D40_DREG_GCC_DST);
3031
3032
3033		} else {
3034			base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3035			base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3036			base->phy_res[i].reserved = false;
3037			num_phy_chans_avail++;
3038		}
3039		spin_lock_init(&base->phy_res[i].lock);
3040	}
3041
3042	/* Mark disabled channels as occupied */
3043	for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3044		int chan = base->plat_data->disabled_channels[i];
3045
3046		base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3047		base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3048		base->phy_res[chan].reserved = true;
3049		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3050					       D40_DREG_GCC_SRC);
3051		gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3052					       D40_DREG_GCC_DST);
3053		num_phy_chans_avail--;
3054	}
3055
3056	/* Mark soft_lli channels */
3057	for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3058		int chan = base->plat_data->soft_lli_chans[i];
3059
3060		base->phy_res[chan].use_soft_lli = true;
3061	}
3062
3063	dev_info(base->dev, "%d of %d physical DMA channels available\n",
3064		 num_phy_chans_avail, base->num_phy_chans);
3065
3066	/* Verify settings extended vs standard */
3067	val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3068
3069	for (i = 0; i < base->num_phy_chans; i++) {
3070
3071		if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3072		    (val[0] & 0x3) != 1)
3073			dev_info(base->dev,
3074				 "[%s] INFO: channel %d is misconfigured (%d)\n",
3075				 __func__, i, val[0] & 0x3);
3076
3077		val[0] = val[0] >> 2;
3078	}
3079
3080	/*
3081	 * To keep things simple, Enable all clocks initially.
3082	 * The clocks will get managed later post channel allocation.
3083	 * The clocks for the event lines on which reserved channels exists
3084	 * are not managed here.
3085	 */
3086	writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3087	base->gcc_pwr_off_mask = gcc;
3088
3089	return num_phy_chans_avail;
3090}
3091
3092static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3093{
3094	struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3095	struct clk *clk;
3096	void __iomem *virtbase;
3097	struct resource *res;
3098	struct d40_base *base;
3099	int num_log_chans;
3100	int num_phy_chans;
3101	int num_memcpy_chans;
3102	int clk_ret = -EINVAL;
3103	int i;
3104	u32 pid;
3105	u32 cid;
3106	u8 rev;
3107
3108	clk = clk_get(&pdev->dev, NULL);
 
3109	if (IS_ERR(clk)) {
3110		d40_err(&pdev->dev, "No matching clock found\n");
3111		goto check_prepare_enabled;
3112	}
3113
3114	clk_ret = clk_prepare_enable(clk);
3115	if (clk_ret) {
3116		d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3117		goto disable_unprepare;
3118	}
3119
3120	/* Get IO for DMAC base address */
3121	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3122	if (!res)
3123		goto disable_unprepare;
3124
3125	if (request_mem_region(res->start, resource_size(res),
3126			       D40_NAME " I/O base") == NULL)
3127		goto release_region;
3128
3129	virtbase = ioremap(res->start, resource_size(res));
3130	if (!virtbase)
3131		goto release_region;
3132
3133	/* This is just a regular AMBA PrimeCell ID actually */
3134	for (pid = 0, i = 0; i < 4; i++)
3135		pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3136			& 255) << (i * 8);
3137	for (cid = 0, i = 0; i < 4; i++)
3138		cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3139			& 255) << (i * 8);
3140
3141	if (cid != AMBA_CID) {
3142		d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3143		goto unmap_io;
3144	}
3145	if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3146		d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3147			AMBA_MANF_BITS(pid),
3148			AMBA_VENDOR_ST);
3149		goto unmap_io;
3150	}
3151	/*
3152	 * HW revision:
3153	 * DB8500ed has revision 0
3154	 * ? has revision 1
3155	 * DB8500v1 has revision 2
3156	 * DB8500v2 has revision 3
3157	 * AP9540v1 has revision 4
3158	 * DB8540v1 has revision 4
3159	 */
3160	rev = AMBA_REV_BITS(pid);
3161	if (rev < 2) {
3162		d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3163		goto unmap_io;
3164	}
3165
3166	/* The number of physical channels on this HW */
3167	if (plat_data->num_of_phy_chans)
3168		num_phy_chans = plat_data->num_of_phy_chans;
3169	else
3170		num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3171
3172	/* The number of channels used for memcpy */
3173	if (plat_data->num_of_memcpy_chans)
3174		num_memcpy_chans = plat_data->num_of_memcpy_chans;
3175	else
3176		num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3177
3178	num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
 
 
 
3179
3180	dev_info(&pdev->dev,
3181		 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3182		 rev, &res->start, num_phy_chans, num_log_chans);
3183
3184	base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3185		       (num_phy_chans + num_log_chans + num_memcpy_chans) *
3186		       sizeof(struct d40_chan), GFP_KERNEL);
3187
3188	if (base == NULL)
3189		goto unmap_io;
 
 
3190
3191	base->rev = rev;
3192	base->clk = clk;
3193	base->num_memcpy_chans = num_memcpy_chans;
3194	base->num_phy_chans = num_phy_chans;
3195	base->num_log_chans = num_log_chans;
3196	base->phy_start = res->start;
3197	base->phy_size = resource_size(res);
3198	base->virtbase = virtbase;
3199	base->plat_data = plat_data;
3200	base->dev = &pdev->dev;
3201	base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3202	base->log_chans = &base->phy_chans[num_phy_chans];
3203
3204	if (base->plat_data->num_of_phy_chans == 14) {
3205		base->gen_dmac.backup = d40_backup_regs_v4b;
3206		base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3207		base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3208		base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3209		base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3210		base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3211		base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3212		base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3213		base->gen_dmac.il = il_v4b;
3214		base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3215		base->gen_dmac.init_reg = dma_init_reg_v4b;
3216		base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3217	} else {
3218		if (base->rev >= 3) {
3219			base->gen_dmac.backup = d40_backup_regs_v4a;
3220			base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3221		}
3222		base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3223		base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3224		base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3225		base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3226		base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3227		base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3228		base->gen_dmac.il = il_v4a;
3229		base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3230		base->gen_dmac.init_reg = dma_init_reg_v4a;
3231		base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3232	}
3233
3234	base->phy_res = kcalloc(num_phy_chans,
3235				sizeof(*base->phy_res),
3236				GFP_KERNEL);
3237	if (!base->phy_res)
3238		goto free_base;
3239
3240	base->lookup_phy_chans = kcalloc(num_phy_chans,
3241					 sizeof(*base->lookup_phy_chans),
3242					 GFP_KERNEL);
3243	if (!base->lookup_phy_chans)
3244		goto free_phy_res;
3245
3246	base->lookup_log_chans = kcalloc(num_log_chans,
3247					 sizeof(*base->lookup_log_chans),
3248					 GFP_KERNEL);
3249	if (!base->lookup_log_chans)
3250		goto free_phy_chans;
 
 
 
 
 
 
3251
3252	base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3253						  sizeof(d40_backup_regs_chan),
3254						  GFP_KERNEL);
3255	if (!base->reg_val_backup_chan)
3256		goto free_log_chans;
3257
3258	base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3259					    * D40_LCLA_LINK_PER_EVENT_GRP,
3260					    sizeof(*base->lcla_pool.alloc_map),
3261					    GFP_KERNEL);
3262	if (!base->lcla_pool.alloc_map)
3263		goto free_backup_chan;
3264
3265	base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3266					    0, SLAB_HWCACHE_ALIGN,
3267					    NULL);
3268	if (base->desc_slab == NULL)
3269		goto free_map;
3270
3271	return base;
3272 free_map:
3273	kfree(base->lcla_pool.alloc_map);
3274 free_backup_chan:
3275	kfree(base->reg_val_backup_chan);
3276 free_log_chans:
3277	kfree(base->lookup_log_chans);
3278 free_phy_chans:
3279	kfree(base->lookup_phy_chans);
3280 free_phy_res:
3281	kfree(base->phy_res);
3282 free_base:
3283	kfree(base);
3284 unmap_io:
3285	iounmap(virtbase);
3286 release_region:
3287	release_mem_region(res->start, resource_size(res));
3288 check_prepare_enabled:
3289	if (!clk_ret)
3290 disable_unprepare:
3291		clk_disable_unprepare(clk);
3292	if (!IS_ERR(clk))
3293		clk_put(clk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3294	return NULL;
3295}
3296
3297static void __init d40_hw_init(struct d40_base *base)
3298{
3299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3300	int i;
3301	u32 prmseo[2] = {0, 0};
3302	u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3303	u32 pcmis = 0;
3304	u32 pcicr = 0;
3305	struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3306	u32 reg_size = base->gen_dmac.init_reg_size;
3307
3308	for (i = 0; i < reg_size; i++)
3309		writel(dma_init_reg[i].val,
3310		       base->virtbase + dma_init_reg[i].reg);
3311
3312	/* Configure all our dma channels to default settings */
3313	for (i = 0; i < base->num_phy_chans; i++) {
3314
3315		activeo[i % 2] = activeo[i % 2] << 2;
3316
3317		if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3318		    == D40_ALLOC_PHY) {
3319			activeo[i % 2] |= 3;
3320			continue;
3321		}
3322
3323		/* Enable interrupt # */
3324		pcmis = (pcmis << 1) | 1;
3325
3326		/* Clear interrupt # */
3327		pcicr = (pcicr << 1) | 1;
3328
3329		/* Set channel to physical mode */
3330		prmseo[i % 2] = prmseo[i % 2] << 2;
3331		prmseo[i % 2] |= 1;
3332
3333	}
3334
3335	writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3336	writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3337	writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3338	writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3339
3340	/* Write which interrupt to enable */
3341	writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3342
3343	/* Write which interrupt to clear */
3344	writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3345
3346	/* These are __initdata and cannot be accessed after init */
3347	base->gen_dmac.init_reg = NULL;
3348	base->gen_dmac.init_reg_size = 0;
3349}
3350
3351static int __init d40_lcla_allocate(struct d40_base *base)
3352{
3353	struct d40_lcla_pool *pool = &base->lcla_pool;
3354	unsigned long *page_list;
3355	int i, j;
3356	int ret;
3357
3358	/*
3359	 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3360	 * To full fill this hardware requirement without wasting 256 kb
3361	 * we allocate pages until we get an aligned one.
3362	 */
3363	page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3364				  sizeof(*page_list),
3365				  GFP_KERNEL);
3366	if (!page_list)
3367		return -ENOMEM;
 
 
3368
3369	/* Calculating how many pages that are required */
3370	base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3371
3372	for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3373		page_list[i] = __get_free_pages(GFP_KERNEL,
3374						base->lcla_pool.pages);
3375		if (!page_list[i]) {
3376
3377			d40_err(base->dev, "Failed to allocate %d pages.\n",
3378				base->lcla_pool.pages);
3379			ret = -ENOMEM;
3380
3381			for (j = 0; j < i; j++)
3382				free_pages(page_list[j], base->lcla_pool.pages);
3383			goto free_page_list;
3384		}
3385
3386		if ((virt_to_phys((void *)page_list[i]) &
3387		     (LCLA_ALIGNMENT - 1)) == 0)
3388			break;
3389	}
3390
3391	for (j = 0; j < i; j++)
3392		free_pages(page_list[j], base->lcla_pool.pages);
3393
3394	if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3395		base->lcla_pool.base = (void *)page_list[i];
3396	} else {
3397		/*
3398		 * After many attempts and no succees with finding the correct
3399		 * alignment, try with allocating a big buffer.
3400		 */
3401		dev_warn(base->dev,
3402			 "[%s] Failed to get %d pages @ 18 bit align.\n",
3403			 __func__, base->lcla_pool.pages);
3404		base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3405							 base->num_phy_chans +
3406							 LCLA_ALIGNMENT,
3407							 GFP_KERNEL);
3408		if (!base->lcla_pool.base_unaligned) {
3409			ret = -ENOMEM;
3410			goto free_page_list;
3411		}
3412
3413		base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3414						 LCLA_ALIGNMENT);
3415	}
3416
3417	pool->dma_addr = dma_map_single(base->dev, pool->base,
3418					SZ_1K * base->num_phy_chans,
3419					DMA_TO_DEVICE);
3420	if (dma_mapping_error(base->dev, pool->dma_addr)) {
3421		pool->dma_addr = 0;
3422		ret = -ENOMEM;
3423		goto free_page_list;
3424	}
3425
3426	writel(virt_to_phys(base->lcla_pool.base),
3427	       base->virtbase + D40_DREG_LCLA);
3428	ret = 0;
3429 free_page_list:
3430	kfree(page_list);
3431	return ret;
3432}
3433
3434static int __init d40_of_probe(struct platform_device *pdev,
3435			       struct device_node *np)
3436{
3437	struct stedma40_platform_data *pdata;
3438	int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3439	const __be32 *list;
3440
3441	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3442	if (!pdata)
3443		return -ENOMEM;
3444
3445	/* If absent this value will be obtained from h/w. */
3446	of_property_read_u32(np, "dma-channels", &num_phy);
3447	if (num_phy > 0)
3448		pdata->num_of_phy_chans = num_phy;
3449
3450	list = of_get_property(np, "memcpy-channels", &num_memcpy);
3451	num_memcpy /= sizeof(*list);
3452
3453	if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3454		d40_err(&pdev->dev,
3455			"Invalid number of memcpy channels specified (%d)\n",
3456			num_memcpy);
3457		return -EINVAL;
3458	}
3459	pdata->num_of_memcpy_chans = num_memcpy;
3460
3461	of_property_read_u32_array(np, "memcpy-channels",
3462				   dma40_memcpy_channels,
3463				   num_memcpy);
3464
3465	list = of_get_property(np, "disabled-channels", &num_disabled);
3466	num_disabled /= sizeof(*list);
3467
3468	if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3469		d40_err(&pdev->dev,
3470			"Invalid number of disabled channels specified (%d)\n",
3471			num_disabled);
3472		return -EINVAL;
3473	}
3474
3475	of_property_read_u32_array(np, "disabled-channels",
3476				   pdata->disabled_channels,
3477				   num_disabled);
3478	pdata->disabled_channels[num_disabled] = -1;
3479
3480	pdev->dev.platform_data = pdata;
3481
3482	return 0;
3483}
3484
3485static int __init d40_probe(struct platform_device *pdev)
3486{
3487	struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3488	struct device_node *np = pdev->dev.of_node;
3489	int ret = -ENOENT;
3490	struct d40_base *base;
3491	struct resource *res;
3492	int num_reserved_chans;
3493	u32 val;
3494
3495	if (!plat_data) {
3496		if (np) {
3497			if (d40_of_probe(pdev, np)) {
3498				ret = -ENOMEM;
3499				goto report_failure;
3500			}
3501		} else {
3502			d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3503			goto report_failure;
3504		}
3505	}
3506
3507	base = d40_hw_detect_init(pdev);
3508	if (!base)
3509		goto report_failure;
3510
3511	num_reserved_chans = d40_phy_res_init(base);
3512
3513	platform_set_drvdata(pdev, base);
3514
3515	spin_lock_init(&base->interrupt_lock);
3516	spin_lock_init(&base->execmd_lock);
3517
3518	/* Get IO for logical channel parameter address */
3519	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3520	if (!res) {
3521		ret = -ENOENT;
3522		d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3523		goto destroy_cache;
3524	}
3525	base->lcpa_size = resource_size(res);
3526	base->phy_lcpa = res->start;
3527
3528	if (request_mem_region(res->start, resource_size(res),
3529			       D40_NAME " I/O lcpa") == NULL) {
3530		ret = -EBUSY;
3531		d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3532		goto destroy_cache;
 
 
3533	}
3534
3535	/* We make use of ESRAM memory for this. */
3536	val = readl(base->virtbase + D40_DREG_LCPA);
3537	if (res->start != val && val != 0) {
3538		dev_warn(&pdev->dev,
3539			 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3540			 __func__, val, &res->start);
3541	} else
3542		writel(res->start, base->virtbase + D40_DREG_LCPA);
3543
3544	base->lcpa_base = ioremap(res->start, resource_size(res));
3545	if (!base->lcpa_base) {
3546		ret = -ENOMEM;
3547		d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3548		goto destroy_cache;
3549	}
3550	/* If lcla has to be located in ESRAM we don't need to allocate */
3551	if (base->plat_data->use_esram_lcla) {
3552		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3553							"lcla_esram");
3554		if (!res) {
3555			ret = -ENOENT;
3556			d40_err(&pdev->dev,
3557				"No \"lcla_esram\" memory resource\n");
3558			goto destroy_cache;
3559		}
3560		base->lcla_pool.base = ioremap(res->start,
3561						resource_size(res));
3562		if (!base->lcla_pool.base) {
3563			ret = -ENOMEM;
3564			d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3565			goto destroy_cache;
3566		}
3567		writel(res->start, base->virtbase + D40_DREG_LCLA);
3568
3569	} else {
3570		ret = d40_lcla_allocate(base);
3571		if (ret) {
3572			d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3573			goto destroy_cache;
3574		}
3575	}
3576
3577	spin_lock_init(&base->lcla_pool.lock);
3578
3579	base->irq = platform_get_irq(pdev, 0);
3580
3581	ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3582	if (ret) {
3583		d40_err(&pdev->dev, "No IRQ defined\n");
3584		goto destroy_cache;
3585	}
3586
3587	if (base->plat_data->use_esram_lcla) {
 
 
3588
3589		base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3590		if (IS_ERR(base->lcpa_regulator)) {
3591			d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3592			ret = PTR_ERR(base->lcpa_regulator);
3593			base->lcpa_regulator = NULL;
3594			goto destroy_cache;
3595		}
3596
3597		ret = regulator_enable(base->lcpa_regulator);
3598		if (ret) {
3599			d40_err(&pdev->dev,
3600				"Failed to enable lcpa_regulator\n");
3601			regulator_put(base->lcpa_regulator);
3602			base->lcpa_regulator = NULL;
3603			goto destroy_cache;
3604		}
3605	}
3606
3607	writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3608
3609	pm_runtime_irq_safe(base->dev);
3610	pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3611	pm_runtime_use_autosuspend(base->dev);
3612	pm_runtime_mark_last_busy(base->dev);
3613	pm_runtime_set_active(base->dev);
3614	pm_runtime_enable(base->dev);
3615
3616	ret = d40_dmaengine_init(base, num_reserved_chans);
3617	if (ret)
3618		goto destroy_cache;
3619
3620	base->dev->dma_parms = &base->dma_parms;
3621	ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3622	if (ret) {
3623		d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3624		goto destroy_cache;
3625	}
3626
3627	d40_hw_init(base);
3628
3629	if (np) {
3630		ret = of_dma_controller_register(np, d40_xlate, NULL);
3631		if (ret)
3632			dev_err(&pdev->dev,
3633				"could not register of_dma_controller\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3634	}
3635
3636	dev_info(base->dev, "initialized\n");
3637	return 0;
3638 destroy_cache:
3639	kmem_cache_destroy(base->desc_slab);
3640	if (base->virtbase)
3641		iounmap(base->virtbase);
3642
3643	if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3644		iounmap(base->lcla_pool.base);
3645		base->lcla_pool.base = NULL;
3646	}
3647
3648	if (base->lcla_pool.dma_addr)
3649		dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3650				 SZ_1K * base->num_phy_chans,
3651				 DMA_TO_DEVICE);
3652
3653	if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3654		free_pages((unsigned long)base->lcla_pool.base,
3655			   base->lcla_pool.pages);
3656
3657	kfree(base->lcla_pool.base_unaligned);
3658
3659	if (base->phy_lcpa)
3660		release_mem_region(base->phy_lcpa,
3661				   base->lcpa_size);
3662	if (base->phy_start)
3663		release_mem_region(base->phy_start,
3664				   base->phy_size);
3665	if (base->clk) {
3666		clk_disable_unprepare(base->clk);
3667		clk_put(base->clk);
3668	}
3669
3670	if (base->lcpa_regulator) {
3671		regulator_disable(base->lcpa_regulator);
3672		regulator_put(base->lcpa_regulator);
3673	}
3674
3675	kfree(base->lcla_pool.alloc_map);
3676	kfree(base->lookup_log_chans);
3677	kfree(base->lookup_phy_chans);
3678	kfree(base->phy_res);
3679	kfree(base);
3680 report_failure:
3681	d40_err(&pdev->dev, "probe failed\n");
3682	return ret;
3683}
3684
3685static const struct of_device_id d40_match[] = {
3686        { .compatible = "stericsson,dma40", },
3687        {}
3688};
3689
3690static struct platform_driver d40_driver = {
3691	.driver = {
 
3692		.name  = D40_NAME,
3693		.pm = &dma40_pm_ops,
3694		.of_match_table = d40_match,
3695	},
3696};
3697
3698static int __init stedma40_init(void)
3699{
3700	return platform_driver_probe(&d40_driver, d40_probe);
3701}
3702subsys_initcall(stedma40_init);