Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
   4 * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
   5 */
   6
   7#include <linux/bitmap.h>
   8#include <linux/bitops.h>
   9#include <linux/clk.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/dmaengine.h>
  12#include <linux/err.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/log2.h>
  16#include <linux/module.h>
  17#include <linux/of.h>
 
  18#include <linux/of_dma.h>
  19#include <linux/platform_device.h>
  20#include <linux/slab.h>
  21
  22#include <dt-bindings/dma/nbpfaxi.h>
  23
  24#include "dmaengine.h"
  25
  26#define NBPF_REG_CHAN_OFFSET	0
  27#define NBPF_REG_CHAN_SIZE	0x40
  28
  29/* Channel Current Transaction Byte register */
  30#define NBPF_CHAN_CUR_TR_BYTE	0x20
  31
  32/* Channel Status register */
  33#define NBPF_CHAN_STAT	0x24
  34#define NBPF_CHAN_STAT_EN	1
  35#define NBPF_CHAN_STAT_TACT	4
  36#define NBPF_CHAN_STAT_ERR	0x10
  37#define NBPF_CHAN_STAT_END	0x20
  38#define NBPF_CHAN_STAT_TC	0x40
  39#define NBPF_CHAN_STAT_DER	0x400
  40
  41/* Channel Control register */
  42#define NBPF_CHAN_CTRL	0x28
  43#define NBPF_CHAN_CTRL_SETEN	1
  44#define NBPF_CHAN_CTRL_CLREN	2
  45#define NBPF_CHAN_CTRL_STG	4
  46#define NBPF_CHAN_CTRL_SWRST	8
  47#define NBPF_CHAN_CTRL_CLRRQ	0x10
  48#define NBPF_CHAN_CTRL_CLREND	0x20
  49#define NBPF_CHAN_CTRL_CLRTC	0x40
  50#define NBPF_CHAN_CTRL_SETSUS	0x100
  51#define NBPF_CHAN_CTRL_CLRSUS	0x200
  52
  53/* Channel Configuration register */
  54#define NBPF_CHAN_CFG	0x2c
  55#define NBPF_CHAN_CFG_SEL	7		/* terminal SELect: 0..7 */
  56#define NBPF_CHAN_CFG_REQD	8		/* REQuest Direction: DMAREQ is 0: input, 1: output */
  57#define NBPF_CHAN_CFG_LOEN	0x10		/* LOw ENable: low DMA request line is: 0: inactive, 1: active */
  58#define NBPF_CHAN_CFG_HIEN	0x20		/* HIgh ENable: high DMA request line is: 0: inactive, 1: active */
  59#define NBPF_CHAN_CFG_LVL	0x40		/* LeVeL: DMA request line is sensed as 0: edge, 1: level */
  60#define NBPF_CHAN_CFG_AM	0x700		/* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */
  61#define NBPF_CHAN_CFG_SDS	0xf000		/* Source Data Size: 0: 8 bits,... , 7: 1024 bits */
  62#define NBPF_CHAN_CFG_DDS	0xf0000		/* Destination Data Size: as above */
  63#define NBPF_CHAN_CFG_SAD	0x100000	/* Source ADdress counting: 0: increment, 1: fixed */
  64#define NBPF_CHAN_CFG_DAD	0x200000	/* Destination ADdress counting: 0: increment, 1: fixed */
  65#define NBPF_CHAN_CFG_TM	0x400000	/* Transfer Mode: 0: single, 1: block TM */
  66#define NBPF_CHAN_CFG_DEM	0x1000000	/* DMAEND interrupt Mask */
  67#define NBPF_CHAN_CFG_TCM	0x2000000	/* DMATCO interrupt Mask */
  68#define NBPF_CHAN_CFG_SBE	0x8000000	/* Sweep Buffer Enable */
  69#define NBPF_CHAN_CFG_RSEL	0x10000000	/* RM: Register Set sELect */
  70#define NBPF_CHAN_CFG_RSW	0x20000000	/* RM: Register Select sWitch */
  71#define NBPF_CHAN_CFG_REN	0x40000000	/* RM: Register Set Enable */
  72#define NBPF_CHAN_CFG_DMS	0x80000000	/* 0: register mode (RM), 1: link mode (LM) */
  73
  74#define NBPF_CHAN_NXLA	0x38
  75#define NBPF_CHAN_CRLA	0x3c
  76
  77/* Link Header field */
  78#define NBPF_HEADER_LV	1
  79#define NBPF_HEADER_LE	2
  80#define NBPF_HEADER_WBD	4
  81#define NBPF_HEADER_DIM	8
  82
  83#define NBPF_CTRL	0x300
  84#define NBPF_CTRL_PR	1		/* 0: fixed priority, 1: round robin */
  85#define NBPF_CTRL_LVINT	2		/* DMAEND and DMAERR signalling: 0: pulse, 1: level */
  86
  87#define NBPF_DSTAT_ER	0x314
  88#define NBPF_DSTAT_END	0x318
  89
  90#define NBPF_DMA_BUSWIDTHS \
  91	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
  92	 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  93	 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  94	 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
  95	 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
  96
  97struct nbpf_config {
  98	int num_channels;
  99	int buffer_size;
 100};
 101
 102/*
 103 * We've got 3 types of objects, used to describe DMA transfers:
 104 * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object
 105 *	in it, used to communicate with the user
 106 * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer
 107 *	queuing, these must be DMAable, using either the streaming DMA API or
 108 *	allocated from coherent memory - one per SG segment
 109 * 3. one per SG segment descriptors, used to manage HW link descriptors from
 110 *	(2). They do not have to be DMAable. They can either be (a) allocated
 111 *	together with link descriptors as mixed (DMA / CPU) objects, or (b)
 112 *	separately. Even if allocated separately it would be best to link them
 113 *	to link descriptors once during channel resource allocation and always
 114 *	use them as a single object.
 115 * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be
 116 * treated as a single SG segment descriptor.
 117 */
 118
 119struct nbpf_link_reg {
 120	u32	header;
 121	u32	src_addr;
 122	u32	dst_addr;
 123	u32	transaction_size;
 124	u32	config;
 125	u32	interval;
 126	u32	extension;
 127	u32	next;
 128} __packed;
 129
 130struct nbpf_device;
 131struct nbpf_channel;
 132struct nbpf_desc;
 133
 134struct nbpf_link_desc {
 135	struct nbpf_link_reg *hwdesc;
 136	dma_addr_t hwdesc_dma_addr;
 137	struct nbpf_desc *desc;
 138	struct list_head node;
 139};
 140
 141/**
 142 * struct nbpf_desc - DMA transfer descriptor
 143 * @async_tx:	dmaengine object
 144 * @user_wait:	waiting for a user ack
 145 * @length:	total transfer length
 146 * @chan:	associated DMAC channel
 147 * @sg:		list of hardware descriptors, represented by struct nbpf_link_desc
 148 * @node:	member in channel descriptor lists
 149 */
 150struct nbpf_desc {
 151	struct dma_async_tx_descriptor async_tx;
 152	bool user_wait;
 153	size_t length;
 154	struct nbpf_channel *chan;
 155	struct list_head sg;
 156	struct list_head node;
 157};
 158
 159/* Take a wild guess: allocate 4 segments per descriptor */
 160#define NBPF_SEGMENTS_PER_DESC 4
 161#define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) /	\
 162	(sizeof(struct nbpf_desc) +					\
 163	 NBPF_SEGMENTS_PER_DESC *					\
 164	 (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg))))
 165#define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE)
 166
 167struct nbpf_desc_page {
 168	struct list_head node;
 169	struct nbpf_desc desc[NBPF_DESCS_PER_PAGE];
 170	struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE];
 171	struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE];
 172};
 173
 174/**
 175 * struct nbpf_channel - one DMAC channel
 176 * @dma_chan:	standard dmaengine channel object
 177 * @tasklet:	channel specific tasklet used for callbacks
 178 * @base:	register address base
 179 * @nbpf:	DMAC
 180 * @name:	IRQ name
 181 * @irq:	IRQ number
 182 * @slave_src_addr:	source address for slave DMA
 183 * @slave_src_width:	source slave data size in bytes
 184 * @slave_src_burst:	maximum source slave burst size in bytes
 185 * @slave_dst_addr:	destination address for slave DMA
 186 * @slave_dst_width:	destination slave data size in bytes
 187 * @slave_dst_burst:	maximum destination slave burst size in bytes
 188 * @terminal:	DMA terminal, assigned to this channel
 189 * @dmarq_cfg:	DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG
 190 * @flags:	configuration flags from DT
 191 * @lock:	protect descriptor lists
 192 * @free_links:	list of free link descriptors
 193 * @free:	list of free descriptors
 194 * @queued:	list of queued descriptors
 195 * @active:	list of descriptors, scheduled for processing
 196 * @done:	list of completed descriptors, waiting post-processing
 197 * @desc_page:	list of additionally allocated descriptor pages - if any
 198 * @running:	linked descriptor of running transaction
 199 * @paused:	are translations on this channel paused?
 200 */
 201struct nbpf_channel {
 202	struct dma_chan dma_chan;
 203	struct tasklet_struct tasklet;
 204	void __iomem *base;
 205	struct nbpf_device *nbpf;
 206	char name[16];
 207	int irq;
 208	dma_addr_t slave_src_addr;
 209	size_t slave_src_width;
 210	size_t slave_src_burst;
 211	dma_addr_t slave_dst_addr;
 212	size_t slave_dst_width;
 213	size_t slave_dst_burst;
 214	unsigned int terminal;
 215	u32 dmarq_cfg;
 216	unsigned long flags;
 217	spinlock_t lock;
 218	struct list_head free_links;
 219	struct list_head free;
 220	struct list_head queued;
 221	struct list_head active;
 222	struct list_head done;
 223	struct list_head desc_page;
 224	struct nbpf_desc *running;
 225	bool paused;
 226};
 227
 228struct nbpf_device {
 229	struct dma_device dma_dev;
 230	void __iomem *base;
 231	u32 max_burst_mem_read;
 232	u32 max_burst_mem_write;
 233	struct clk *clk;
 234	const struct nbpf_config *config;
 235	unsigned int eirq;
 236	struct nbpf_channel chan[];
 237};
 238
 239enum nbpf_model {
 240	NBPF1B4,
 241	NBPF1B8,
 242	NBPF1B16,
 243	NBPF4B4,
 244	NBPF4B8,
 245	NBPF4B16,
 246	NBPF8B4,
 247	NBPF8B8,
 248	NBPF8B16,
 249};
 250
 251static struct nbpf_config nbpf_cfg[] = {
 252	[NBPF1B4] = {
 253		.num_channels = 1,
 254		.buffer_size = 4,
 255	},
 256	[NBPF1B8] = {
 257		.num_channels = 1,
 258		.buffer_size = 8,
 259	},
 260	[NBPF1B16] = {
 261		.num_channels = 1,
 262		.buffer_size = 16,
 263	},
 264	[NBPF4B4] = {
 265		.num_channels = 4,
 266		.buffer_size = 4,
 267	},
 268	[NBPF4B8] = {
 269		.num_channels = 4,
 270		.buffer_size = 8,
 271	},
 272	[NBPF4B16] = {
 273		.num_channels = 4,
 274		.buffer_size = 16,
 275	},
 276	[NBPF8B4] = {
 277		.num_channels = 8,
 278		.buffer_size = 4,
 279	},
 280	[NBPF8B8] = {
 281		.num_channels = 8,
 282		.buffer_size = 8,
 283	},
 284	[NBPF8B16] = {
 285		.num_channels = 8,
 286		.buffer_size = 16,
 287	},
 288};
 289
 290#define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan)
 291
 292/*
 293 * dmaengine drivers seem to have a lot in common and instead of sharing more
 294 * code, they reimplement those common algorithms independently. In this driver
 295 * we try to separate the hardware-specific part from the (largely) generic
 296 * part. This improves code readability and makes it possible in the future to
 297 * reuse the generic code in form of a helper library. That generic code should
 298 * be suitable for various DMA controllers, using transfer descriptors in RAM
 299 * and pushing one SG list at a time to the DMA controller.
 300 */
 301
 302/*		Hardware-specific part		*/
 303
 304static inline u32 nbpf_chan_read(struct nbpf_channel *chan,
 305				 unsigned int offset)
 306{
 307	u32 data = ioread32(chan->base + offset);
 308	dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
 309		__func__, chan->base, offset, data);
 310	return data;
 311}
 312
 313static inline void nbpf_chan_write(struct nbpf_channel *chan,
 314				   unsigned int offset, u32 data)
 315{
 316	iowrite32(data, chan->base + offset);
 317	dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
 318		__func__, chan->base, offset, data);
 319}
 320
 321static inline u32 nbpf_read(struct nbpf_device *nbpf,
 322			    unsigned int offset)
 323{
 324	u32 data = ioread32(nbpf->base + offset);
 325	dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
 326		__func__, nbpf->base, offset, data);
 327	return data;
 328}
 329
 330static inline void nbpf_write(struct nbpf_device *nbpf,
 331			      unsigned int offset, u32 data)
 332{
 333	iowrite32(data, nbpf->base + offset);
 334	dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
 335		__func__, nbpf->base, offset, data);
 336}
 337
 338static void nbpf_chan_halt(struct nbpf_channel *chan)
 339{
 340	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
 341}
 342
 343static bool nbpf_status_get(struct nbpf_channel *chan)
 344{
 345	u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END);
 346
 347	return status & BIT(chan - chan->nbpf->chan);
 348}
 349
 350static void nbpf_status_ack(struct nbpf_channel *chan)
 351{
 352	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND);
 353}
 354
 355static u32 nbpf_error_get(struct nbpf_device *nbpf)
 356{
 357	return nbpf_read(nbpf, NBPF_DSTAT_ER);
 358}
 359
 360static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
 361{
 362	return nbpf->chan + __ffs(error);
 363}
 364
 365static void nbpf_error_clear(struct nbpf_channel *chan)
 366{
 367	u32 status;
 368	int i;
 369
 370	/* Stop the channel, make sure DMA has been aborted */
 371	nbpf_chan_halt(chan);
 372
 373	for (i = 1000; i; i--) {
 374		status = nbpf_chan_read(chan, NBPF_CHAN_STAT);
 375		if (!(status & NBPF_CHAN_STAT_TACT))
 376			break;
 377		cpu_relax();
 378	}
 379
 380	if (!i)
 381		dev_err(chan->dma_chan.device->dev,
 382			"%s(): abort timeout, channel status 0x%x\n", __func__, status);
 383
 384	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST);
 385}
 386
 387static int nbpf_start(struct nbpf_desc *desc)
 388{
 389	struct nbpf_channel *chan = desc->chan;
 390	struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node);
 391
 392	nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr);
 393	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS);
 394	chan->paused = false;
 395
 396	/* Software trigger MEMCPY - only MEMCPY uses the block mode */
 397	if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM)
 398		nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG);
 399
 400	dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__,
 401		nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA));
 402
 403	return 0;
 404}
 405
 406static void nbpf_chan_prepare(struct nbpf_channel *chan)
 407{
 408	chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) |
 409		(chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) |
 410		(chan->flags & NBPF_SLAVE_RQ_LEVEL ?
 411		 NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) |
 412		chan->terminal;
 413}
 414
 415static void nbpf_chan_prepare_default(struct nbpf_channel *chan)
 416{
 417	/* Don't output DMAACK */
 418	chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400;
 419	chan->terminal = 0;
 420	chan->flags = 0;
 421}
 422
 423static void nbpf_chan_configure(struct nbpf_channel *chan)
 424{
 425	/*
 426	 * We assume, that only the link mode and DMA request line configuration
 427	 * have to be set in the configuration register manually. Dynamic
 428	 * per-transfer configuration will be loaded from transfer descriptors.
 429	 */
 430	nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
 431}
 432
 433static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
 434			enum dma_transfer_direction direction)
 435{
 436	int max_burst = nbpf->config->buffer_size * 8;
 437
 438	if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
 439		switch (direction) {
 440		case DMA_MEM_TO_MEM:
 441			max_burst = min_not_zero(nbpf->max_burst_mem_read,
 442						 nbpf->max_burst_mem_write);
 443			break;
 444		case DMA_MEM_TO_DEV:
 445			if (nbpf->max_burst_mem_read)
 446				max_burst = nbpf->max_burst_mem_read;
 447			break;
 448		case DMA_DEV_TO_MEM:
 449			if (nbpf->max_burst_mem_write)
 450				max_burst = nbpf->max_burst_mem_write;
 451			break;
 452		case DMA_DEV_TO_DEV:
 453		default:
 454			break;
 455		}
 456	}
 457
 458	/* Maximum supported bursts depend on the buffer size */
 459	return min_t(int, __ffs(size), ilog2(max_burst));
 460}
 461
 462static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
 463			     enum dma_slave_buswidth width, u32 burst)
 464{
 465	size_t size;
 466
 467	if (!burst)
 468		burst = 1;
 469
 470	switch (width) {
 471	case DMA_SLAVE_BUSWIDTH_8_BYTES:
 472		size = 8 * burst;
 473		break;
 474
 475	case DMA_SLAVE_BUSWIDTH_4_BYTES:
 476		size = 4 * burst;
 477		break;
 478
 479	case DMA_SLAVE_BUSWIDTH_2_BYTES:
 480		size = 2 * burst;
 481		break;
 482
 483	default:
 484		pr_warn("%s(): invalid bus width %u\n", __func__, width);
 485		fallthrough;
 486	case DMA_SLAVE_BUSWIDTH_1_BYTE:
 487		size = burst;
 488	}
 489
 490	return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
 491}
 492
 493/*
 494 * We need a way to recognise slaves, whose data is sent "raw" over the bus,
 495 * i.e. it isn't known in advance how many bytes will be received. Therefore
 496 * the slave driver has to provide a "large enough" buffer and either read the
 497 * buffer, when it is full, or detect, that some data has arrived, then wait for
 498 * a timeout, if no more data arrives - receive what's already there. We want to
 499 * handle such slaves in a special way to allow an optimised mode for other
 500 * users, for whom the amount of data is known in advance. So far there's no way
 501 * to recognise such slaves. We use a data-width check to distinguish between
 502 * the SD host and the PL011 UART.
 503 */
 504
 505static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
 506			 enum dma_transfer_direction direction,
 507			 dma_addr_t src, dma_addr_t dst, size_t size, bool last)
 508{
 509	struct nbpf_link_reg *hwdesc = ldesc->hwdesc;
 510	struct nbpf_desc *desc = ldesc->desc;
 511	struct nbpf_channel *chan = desc->chan;
 512	struct device *dev = chan->dma_chan.device->dev;
 513	size_t mem_xfer, slave_xfer;
 514	bool can_burst;
 515
 516	hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV |
 517		(last ? NBPF_HEADER_LE : 0);
 518
 519	hwdesc->src_addr = src;
 520	hwdesc->dst_addr = dst;
 521	hwdesc->transaction_size = size;
 522
 523	/*
 524	 * set config: SAD, DAD, DDS, SDS, etc.
 525	 * Note on transfer sizes: the DMAC can perform unaligned DMA transfers,
 526	 * but it is important to have transaction size a multiple of both
 527	 * receiver and transmitter transfer sizes. It is also possible to use
 528	 * different RAM and device transfer sizes, and it does work well with
 529	 * some devices, e.g. with V08R07S01E SD host controllers, which can use
 530	 * 128 byte transfers. But this doesn't work with other devices,
 531	 * especially when the transaction size is unknown. This is the case,
 532	 * e.g. with serial drivers like amba-pl011.c. For reception it sets up
 533	 * the transaction size of 4K and if fewer bytes are received, it
 534	 * pauses DMA and reads out data received via DMA as well as those left
 535	 * in the Rx FIFO. For this to work with the RAM side using burst
 536	 * transfers we enable the SBE bit and terminate the transfer in our
 537	 * .device_pause handler.
 538	 */
 539	mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
 540
 541	switch (direction) {
 542	case DMA_DEV_TO_MEM:
 543		can_burst = chan->slave_src_width >= 3;
 544		slave_xfer = min(mem_xfer, can_burst ?
 545				 chan->slave_src_burst : chan->slave_src_width);
 546		/*
 547		 * Is the slave narrower than 64 bits, i.e. isn't using the full
 548		 * bus width and cannot use bursts?
 549		 */
 550		if (mem_xfer > chan->slave_src_burst && !can_burst)
 551			mem_xfer = chan->slave_src_burst;
 552		/* Device-to-RAM DMA is unreliable without REQD set */
 553		hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) |
 554			(NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD |
 555			NBPF_CHAN_CFG_SBE;
 556		break;
 557
 558	case DMA_MEM_TO_DEV:
 559		slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ?
 560				 chan->slave_dst_burst : chan->slave_dst_width);
 561		hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
 562			(NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD;
 563		break;
 564
 565	case DMA_MEM_TO_MEM:
 566		hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM |
 567			(NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
 568			(NBPF_CHAN_CFG_DDS & (mem_xfer << 16));
 569		break;
 570
 571	default:
 572		return -EINVAL;
 573	}
 574
 575	hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) |
 576		NBPF_CHAN_CFG_DMS;
 577
 578	dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n",
 579		__func__, &ldesc->hwdesc_dma_addr, hwdesc->header,
 580		hwdesc->config, size, &src, &dst);
 581
 582	dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc),
 583				   DMA_TO_DEVICE);
 584
 585	return 0;
 586}
 587
 588static size_t nbpf_bytes_left(struct nbpf_channel *chan)
 589{
 590	return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE);
 591}
 592
 593static void nbpf_configure(struct nbpf_device *nbpf)
 594{
 595	nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
 596}
 597
 598/*		Generic part			*/
 599
 600/* DMA ENGINE functions */
 601static void nbpf_issue_pending(struct dma_chan *dchan)
 602{
 603	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 604	unsigned long flags;
 605
 606	dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
 607
 608	spin_lock_irqsave(&chan->lock, flags);
 609	if (list_empty(&chan->queued))
 610		goto unlock;
 611
 612	list_splice_tail_init(&chan->queued, &chan->active);
 613
 614	if (!chan->running) {
 615		struct nbpf_desc *desc = list_first_entry(&chan->active,
 616						struct nbpf_desc, node);
 617		if (!nbpf_start(desc))
 618			chan->running = desc;
 619	}
 620
 621unlock:
 622	spin_unlock_irqrestore(&chan->lock, flags);
 623}
 624
 625static enum dma_status nbpf_tx_status(struct dma_chan *dchan,
 626		dma_cookie_t cookie, struct dma_tx_state *state)
 627{
 628	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 629	enum dma_status status = dma_cookie_status(dchan, cookie, state);
 630
 631	if (state) {
 632		dma_cookie_t running;
 633		unsigned long flags;
 634
 635		spin_lock_irqsave(&chan->lock, flags);
 636		running = chan->running ? chan->running->async_tx.cookie : -EINVAL;
 637
 638		if (cookie == running) {
 639			state->residue = nbpf_bytes_left(chan);
 640			dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__,
 641				state->residue);
 642		} else if (status == DMA_IN_PROGRESS) {
 643			struct nbpf_desc *desc;
 644			bool found = false;
 645
 646			list_for_each_entry(desc, &chan->active, node)
 647				if (desc->async_tx.cookie == cookie) {
 648					found = true;
 649					break;
 650				}
 651
 652			if (!found)
 653				list_for_each_entry(desc, &chan->queued, node)
 654					if (desc->async_tx.cookie == cookie) {
 655						found = true;
 656						break;
 657
 658					}
 659
 660			state->residue = found ? desc->length : 0;
 661		}
 662
 663		spin_unlock_irqrestore(&chan->lock, flags);
 664	}
 665
 666	if (chan->paused)
 667		status = DMA_PAUSED;
 668
 669	return status;
 670}
 671
 672static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx)
 673{
 674	struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx);
 675	struct nbpf_channel *chan = desc->chan;
 676	unsigned long flags;
 677	dma_cookie_t cookie;
 678
 679	spin_lock_irqsave(&chan->lock, flags);
 680	cookie = dma_cookie_assign(tx);
 681	list_add_tail(&desc->node, &chan->queued);
 682	spin_unlock_irqrestore(&chan->lock, flags);
 683
 684	dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie);
 685
 686	return cookie;
 687}
 688
 689static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
 690{
 691	struct dma_chan *dchan = &chan->dma_chan;
 692	struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 693	struct nbpf_link_desc *ldesc;
 694	struct nbpf_link_reg *hwdesc;
 695	struct nbpf_desc *desc;
 696	LIST_HEAD(head);
 697	LIST_HEAD(lhead);
 698	int i;
 699	struct device *dev = dchan->device->dev;
 700
 701	if (!dpage)
 702		return -ENOMEM;
 703
 704	dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n",
 705		__func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage));
 706
 707	for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc;
 708	     i < ARRAY_SIZE(dpage->ldesc);
 709	     i++, ldesc++, hwdesc++) {
 710		ldesc->hwdesc = hwdesc;
 711		list_add_tail(&ldesc->node, &lhead);
 712		ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
 713					hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
 714
 715		dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
 716			hwdesc, &ldesc->hwdesc_dma_addr);
 717	}
 718
 719	for (i = 0, desc = dpage->desc;
 720	     i < ARRAY_SIZE(dpage->desc);
 721	     i++, desc++) {
 722		dma_async_tx_descriptor_init(&desc->async_tx, dchan);
 723		desc->async_tx.tx_submit = nbpf_tx_submit;
 724		desc->chan = chan;
 725		INIT_LIST_HEAD(&desc->sg);
 726		list_add_tail(&desc->node, &head);
 727	}
 728
 729	/*
 730	 * This function cannot be called from interrupt context, so, no need to
 731	 * save flags
 732	 */
 733	spin_lock_irq(&chan->lock);
 734	list_splice_tail(&lhead, &chan->free_links);
 735	list_splice_tail(&head, &chan->free);
 736	list_add(&dpage->node, &chan->desc_page);
 737	spin_unlock_irq(&chan->lock);
 738
 739	return ARRAY_SIZE(dpage->desc);
 740}
 741
 742static void nbpf_desc_put(struct nbpf_desc *desc)
 743{
 744	struct nbpf_channel *chan = desc->chan;
 745	struct nbpf_link_desc *ldesc, *tmp;
 746	unsigned long flags;
 747
 748	spin_lock_irqsave(&chan->lock, flags);
 749	list_for_each_entry_safe(ldesc, tmp, &desc->sg, node)
 750		list_move(&ldesc->node, &chan->free_links);
 751
 752	list_add(&desc->node, &chan->free);
 753	spin_unlock_irqrestore(&chan->lock, flags);
 754}
 755
 756static void nbpf_scan_acked(struct nbpf_channel *chan)
 757{
 758	struct nbpf_desc *desc, *tmp;
 759	unsigned long flags;
 760	LIST_HEAD(head);
 761
 762	spin_lock_irqsave(&chan->lock, flags);
 763	list_for_each_entry_safe(desc, tmp, &chan->done, node)
 764		if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) {
 765			list_move(&desc->node, &head);
 766			desc->user_wait = false;
 767		}
 768	spin_unlock_irqrestore(&chan->lock, flags);
 769
 770	list_for_each_entry_safe(desc, tmp, &head, node) {
 771		list_del(&desc->node);
 772		nbpf_desc_put(desc);
 773	}
 774}
 775
 776/*
 777 * We have to allocate descriptors with the channel lock dropped. This means,
 778 * before we re-acquire the lock buffers can be taken already, so we have to
 779 * re-check after re-acquiring the lock and possibly retry, if buffers are gone
 780 * again.
 781 */
 782static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len)
 783{
 784	struct nbpf_desc *desc = NULL;
 785	struct nbpf_link_desc *ldesc, *prev = NULL;
 786
 787	nbpf_scan_acked(chan);
 788
 789	spin_lock_irq(&chan->lock);
 790
 791	do {
 792		int i = 0, ret;
 793
 794		if (list_empty(&chan->free)) {
 795			/* No more free descriptors */
 796			spin_unlock_irq(&chan->lock);
 797			ret = nbpf_desc_page_alloc(chan);
 798			if (ret < 0)
 799				return NULL;
 800			spin_lock_irq(&chan->lock);
 801			continue;
 802		}
 803		desc = list_first_entry(&chan->free, struct nbpf_desc, node);
 804		list_del(&desc->node);
 805
 806		do {
 807			if (list_empty(&chan->free_links)) {
 808				/* No more free link descriptors */
 809				spin_unlock_irq(&chan->lock);
 810				ret = nbpf_desc_page_alloc(chan);
 811				if (ret < 0) {
 812					nbpf_desc_put(desc);
 813					return NULL;
 814				}
 815				spin_lock_irq(&chan->lock);
 816				continue;
 817			}
 818
 819			ldesc = list_first_entry(&chan->free_links,
 820						 struct nbpf_link_desc, node);
 821			ldesc->desc = desc;
 822			if (prev)
 823				prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr;
 824
 825			prev = ldesc;
 826			list_move_tail(&ldesc->node, &desc->sg);
 827
 828			i++;
 829		} while (i < len);
 830	} while (!desc);
 831
 832	prev->hwdesc->next = 0;
 833
 834	spin_unlock_irq(&chan->lock);
 835
 836	return desc;
 837}
 838
 839static void nbpf_chan_idle(struct nbpf_channel *chan)
 840{
 841	struct nbpf_desc *desc, *tmp;
 842	unsigned long flags;
 843	LIST_HEAD(head);
 844
 845	spin_lock_irqsave(&chan->lock, flags);
 846
 847	list_splice_init(&chan->done, &head);
 848	list_splice_init(&chan->active, &head);
 849	list_splice_init(&chan->queued, &head);
 850
 851	chan->running = NULL;
 852
 853	spin_unlock_irqrestore(&chan->lock, flags);
 854
 855	list_for_each_entry_safe(desc, tmp, &head, node) {
 856		dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n",
 857			__func__, desc, desc->async_tx.cookie);
 858		list_del(&desc->node);
 859		nbpf_desc_put(desc);
 860	}
 861}
 862
 863static int nbpf_pause(struct dma_chan *dchan)
 864{
 865	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 866
 867	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
 868
 869	chan->paused = true;
 870	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
 871	/* See comment in nbpf_prep_one() */
 872	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
 873
 874	return 0;
 875}
 876
 877static int nbpf_terminate_all(struct dma_chan *dchan)
 878{
 879	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 880
 881	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
 882	dev_dbg(dchan->device->dev, "Terminating\n");
 883
 884	nbpf_chan_halt(chan);
 885	nbpf_chan_idle(chan);
 886
 887	return 0;
 888}
 889
 890static int nbpf_config(struct dma_chan *dchan,
 891		       struct dma_slave_config *config)
 892{
 893	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 894
 895	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
 896
 897	/*
 898	 * We could check config->slave_id to match chan->terminal here,
 899	 * but with DT they would be coming from the same source, so
 900	 * such a check would be superflous
 901	 */
 902
 903	chan->slave_dst_addr = config->dst_addr;
 904	chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
 905					       config->dst_addr_width, 1);
 906	chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
 907					       config->dst_addr_width,
 908					       config->dst_maxburst);
 909	chan->slave_src_addr = config->src_addr;
 910	chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
 911					       config->src_addr_width, 1);
 912	chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
 913					       config->src_addr_width,
 914					       config->src_maxburst);
 915
 916	return 0;
 917}
 918
 919static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan,
 920		struct scatterlist *src_sg, struct scatterlist *dst_sg,
 921		size_t len, enum dma_transfer_direction direction,
 922		unsigned long flags)
 923{
 924	struct nbpf_link_desc *ldesc;
 925	struct scatterlist *mem_sg;
 926	struct nbpf_desc *desc;
 927	bool inc_src, inc_dst;
 928	size_t data_len = 0;
 929	int i = 0;
 930
 931	switch (direction) {
 932	case DMA_DEV_TO_MEM:
 933		mem_sg = dst_sg;
 934		inc_src = false;
 935		inc_dst = true;
 936		break;
 937
 938	case DMA_MEM_TO_DEV:
 939		mem_sg = src_sg;
 940		inc_src = true;
 941		inc_dst = false;
 942		break;
 943
 944	default:
 945	case DMA_MEM_TO_MEM:
 946		mem_sg = src_sg;
 947		inc_src = true;
 948		inc_dst = true;
 949	}
 950
 951	desc = nbpf_desc_get(chan, len);
 952	if (!desc)
 953		return NULL;
 954
 955	desc->async_tx.flags = flags;
 956	desc->async_tx.cookie = -EBUSY;
 957	desc->user_wait = false;
 958
 959	/*
 960	 * This is a private descriptor list, and we own the descriptor. No need
 961	 * to lock.
 962	 */
 963	list_for_each_entry(ldesc, &desc->sg, node) {
 964		int ret = nbpf_prep_one(ldesc, direction,
 965					sg_dma_address(src_sg),
 966					sg_dma_address(dst_sg),
 967					sg_dma_len(mem_sg),
 968					i == len - 1);
 969		if (ret < 0) {
 970			nbpf_desc_put(desc);
 971			return NULL;
 972		}
 973		data_len += sg_dma_len(mem_sg);
 974		if (inc_src)
 975			src_sg = sg_next(src_sg);
 976		if (inc_dst)
 977			dst_sg = sg_next(dst_sg);
 978		mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg;
 979		i++;
 980	}
 981
 982	desc->length = data_len;
 983
 984	/* The user has to return the descriptor to us ASAP via .tx_submit() */
 985	return &desc->async_tx;
 986}
 987
 988static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
 989	struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
 990	size_t len, unsigned long flags)
 991{
 992	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 993	struct scatterlist dst_sg;
 994	struct scatterlist src_sg;
 995
 996	sg_init_table(&dst_sg, 1);
 997	sg_init_table(&src_sg, 1);
 998
 999	sg_dma_address(&dst_sg) = dst;
1000	sg_dma_address(&src_sg) = src;
1001
1002	sg_dma_len(&dst_sg) = len;
1003	sg_dma_len(&src_sg) = len;
1004
1005	dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n",
1006		__func__, len, &src, &dst);
1007
1008	return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1,
1009			    DMA_MEM_TO_MEM, flags);
1010}
1011
1012static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
1013	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1014	enum dma_transfer_direction direction, unsigned long flags, void *context)
1015{
1016	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1017	struct scatterlist slave_sg;
1018
1019	dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1020
1021	sg_init_table(&slave_sg, 1);
1022
1023	switch (direction) {
1024	case DMA_MEM_TO_DEV:
1025		sg_dma_address(&slave_sg) = chan->slave_dst_addr;
1026		return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len,
1027				    direction, flags);
1028
1029	case DMA_DEV_TO_MEM:
1030		sg_dma_address(&slave_sg) = chan->slave_src_addr;
1031		return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len,
1032				    direction, flags);
1033
1034	default:
1035		return NULL;
1036	}
1037}
1038
1039static int nbpf_alloc_chan_resources(struct dma_chan *dchan)
1040{
1041	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1042	int ret;
1043
1044	INIT_LIST_HEAD(&chan->free);
1045	INIT_LIST_HEAD(&chan->free_links);
1046	INIT_LIST_HEAD(&chan->queued);
1047	INIT_LIST_HEAD(&chan->active);
1048	INIT_LIST_HEAD(&chan->done);
1049
1050	ret = nbpf_desc_page_alloc(chan);
1051	if (ret < 0)
1052		return ret;
1053
1054	dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__,
1055		chan->terminal);
1056
1057	nbpf_chan_configure(chan);
1058
1059	return ret;
1060}
1061
1062static void nbpf_free_chan_resources(struct dma_chan *dchan)
1063{
1064	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1065	struct nbpf_desc_page *dpage, *tmp;
1066
1067	dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1068
1069	nbpf_chan_halt(chan);
1070	nbpf_chan_idle(chan);
1071	/* Clean up for if a channel is re-used for MEMCPY after slave DMA */
1072	nbpf_chan_prepare_default(chan);
1073
1074	list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) {
1075		struct nbpf_link_desc *ldesc;
1076		int i;
1077		list_del(&dpage->node);
1078		for (i = 0, ldesc = dpage->ldesc;
1079		     i < ARRAY_SIZE(dpage->ldesc);
1080		     i++, ldesc++)
1081			dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
1082					 sizeof(*ldesc->hwdesc), DMA_TO_DEVICE);
1083		free_page((unsigned long)dpage);
1084	}
1085}
1086
1087static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
1088				      struct of_dma *ofdma)
1089{
1090	struct nbpf_device *nbpf = ofdma->of_dma_data;
1091	struct dma_chan *dchan;
1092	struct nbpf_channel *chan;
1093
1094	if (dma_spec->args_count != 2)
1095		return NULL;
1096
1097	dchan = dma_get_any_slave_channel(&nbpf->dma_dev);
1098	if (!dchan)
1099		return NULL;
1100
1101	dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__,
1102		dma_spec->np);
1103
1104	chan = nbpf_to_chan(dchan);
1105
1106	chan->terminal = dma_spec->args[0];
1107	chan->flags = dma_spec->args[1];
1108
1109	nbpf_chan_prepare(chan);
1110	nbpf_chan_configure(chan);
1111
1112	return dchan;
1113}
1114
1115static void nbpf_chan_tasklet(struct tasklet_struct *t)
1116{
1117	struct nbpf_channel *chan = from_tasklet(chan, t, tasklet);
1118	struct nbpf_desc *desc, *tmp;
1119	struct dmaengine_desc_callback cb;
1120
1121	while (!list_empty(&chan->done)) {
1122		bool found = false, must_put, recycling = false;
1123
1124		spin_lock_irq(&chan->lock);
1125
1126		list_for_each_entry_safe(desc, tmp, &chan->done, node) {
1127			if (!desc->user_wait) {
1128				/* Newly completed descriptor, have to process */
1129				found = true;
1130				break;
1131			} else if (async_tx_test_ack(&desc->async_tx)) {
1132				/*
1133				 * This descriptor was waiting for a user ACK,
1134				 * it can be recycled now.
1135				 */
1136				list_del(&desc->node);
1137				spin_unlock_irq(&chan->lock);
1138				nbpf_desc_put(desc);
1139				recycling = true;
1140				break;
1141			}
1142		}
1143
1144		if (recycling)
1145			continue;
1146
1147		if (!found) {
1148			/* This can happen if TERMINATE_ALL has been called */
1149			spin_unlock_irq(&chan->lock);
1150			break;
1151		}
1152
1153		dma_cookie_complete(&desc->async_tx);
1154
1155		/*
1156		 * With released lock we cannot dereference desc, maybe it's
1157		 * still on the "done" list
1158		 */
1159		if (async_tx_test_ack(&desc->async_tx)) {
1160			list_del(&desc->node);
1161			must_put = true;
1162		} else {
1163			desc->user_wait = true;
1164			must_put = false;
1165		}
1166
1167		dmaengine_desc_get_callback(&desc->async_tx, &cb);
1168
1169		/* ack and callback completed descriptor */
1170		spin_unlock_irq(&chan->lock);
1171
1172		dmaengine_desc_callback_invoke(&cb, NULL);
1173
1174		if (must_put)
1175			nbpf_desc_put(desc);
1176	}
1177}
1178
1179static irqreturn_t nbpf_chan_irq(int irq, void *dev)
1180{
1181	struct nbpf_channel *chan = dev;
1182	bool done = nbpf_status_get(chan);
1183	struct nbpf_desc *desc;
1184	irqreturn_t ret;
1185	bool bh = false;
1186
1187	if (!done)
1188		return IRQ_NONE;
1189
1190	nbpf_status_ack(chan);
1191
1192	dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__);
1193
1194	spin_lock(&chan->lock);
1195	desc = chan->running;
1196	if (WARN_ON(!desc)) {
1197		ret = IRQ_NONE;
1198		goto unlock;
1199	} else {
1200		ret = IRQ_HANDLED;
1201		bh = true;
1202	}
1203
1204	list_move_tail(&desc->node, &chan->done);
1205	chan->running = NULL;
1206
1207	if (!list_empty(&chan->active)) {
1208		desc = list_first_entry(&chan->active,
1209					struct nbpf_desc, node);
1210		if (!nbpf_start(desc))
1211			chan->running = desc;
1212	}
1213
1214unlock:
1215	spin_unlock(&chan->lock);
1216
1217	if (bh)
1218		tasklet_schedule(&chan->tasklet);
1219
1220	return ret;
1221}
1222
1223static irqreturn_t nbpf_err_irq(int irq, void *dev)
1224{
1225	struct nbpf_device *nbpf = dev;
1226	u32 error = nbpf_error_get(nbpf);
1227
1228	dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq);
1229
1230	if (!error)
1231		return IRQ_NONE;
1232
1233	do {
1234		struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error);
1235		/* On error: abort all queued transfers, no callback */
1236		nbpf_error_clear(chan);
1237		nbpf_chan_idle(chan);
1238		error = nbpf_error_get(nbpf);
1239	} while (error);
1240
1241	return IRQ_HANDLED;
1242}
1243
1244static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
1245{
1246	struct dma_device *dma_dev = &nbpf->dma_dev;
1247	struct nbpf_channel *chan = nbpf->chan + n;
1248	int ret;
1249
1250	chan->nbpf = nbpf;
1251	chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n;
1252	INIT_LIST_HEAD(&chan->desc_page);
1253	spin_lock_init(&chan->lock);
1254	chan->dma_chan.device = dma_dev;
1255	dma_cookie_init(&chan->dma_chan);
1256	nbpf_chan_prepare_default(chan);
1257
1258	dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base);
1259
1260	snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
1261
1262	tasklet_setup(&chan->tasklet, nbpf_chan_tasklet);
1263	ret = devm_request_irq(dma_dev->dev, chan->irq,
1264			nbpf_chan_irq, IRQF_SHARED,
1265			chan->name, chan);
1266	if (ret < 0)
1267		return ret;
1268
1269	/* Add the channel to DMA device channel list */
1270	list_add_tail(&chan->dma_chan.device_node,
1271		      &dma_dev->channels);
1272
1273	return 0;
1274}
1275
1276static const struct of_device_id nbpf_match[] = {
1277	{.compatible = "renesas,nbpfaxi64dmac1b4",	.data = &nbpf_cfg[NBPF1B4]},
1278	{.compatible = "renesas,nbpfaxi64dmac1b8",	.data = &nbpf_cfg[NBPF1B8]},
1279	{.compatible = "renesas,nbpfaxi64dmac1b16",	.data = &nbpf_cfg[NBPF1B16]},
1280	{.compatible = "renesas,nbpfaxi64dmac4b4",	.data = &nbpf_cfg[NBPF4B4]},
1281	{.compatible = "renesas,nbpfaxi64dmac4b8",	.data = &nbpf_cfg[NBPF4B8]},
1282	{.compatible = "renesas,nbpfaxi64dmac4b16",	.data = &nbpf_cfg[NBPF4B16]},
1283	{.compatible = "renesas,nbpfaxi64dmac8b4",	.data = &nbpf_cfg[NBPF8B4]},
1284	{.compatible = "renesas,nbpfaxi64dmac8b8",	.data = &nbpf_cfg[NBPF8B8]},
1285	{.compatible = "renesas,nbpfaxi64dmac8b16",	.data = &nbpf_cfg[NBPF8B16]},
1286	{}
1287};
1288MODULE_DEVICE_TABLE(of, nbpf_match);
1289
1290static int nbpf_probe(struct platform_device *pdev)
1291{
1292	struct device *dev = &pdev->dev;
1293	struct device_node *np = dev->of_node;
1294	struct nbpf_device *nbpf;
1295	struct dma_device *dma_dev;
 
1296	const struct nbpf_config *cfg;
1297	int num_channels;
1298	int ret, irq, eirq, i;
1299	int irqbuf[9] /* maximum 8 channels + error IRQ */;
1300	unsigned int irqs = 0;
1301
1302	BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
1303
1304	/* DT only */
1305	if (!np)
1306		return -ENODEV;
1307
1308	cfg = of_device_get_match_data(dev);
1309	num_channels = cfg->num_channels;
1310
1311	nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels),
1312			    GFP_KERNEL);
1313	if (!nbpf)
1314		return -ENOMEM;
1315
1316	dma_dev = &nbpf->dma_dev;
1317	dma_dev->dev = dev;
1318
1319	nbpf->base = devm_platform_ioremap_resource(pdev, 0);
 
1320	if (IS_ERR(nbpf->base))
1321		return PTR_ERR(nbpf->base);
1322
1323	nbpf->clk = devm_clk_get(dev, NULL);
1324	if (IS_ERR(nbpf->clk))
1325		return PTR_ERR(nbpf->clk);
1326
1327	of_property_read_u32(np, "max-burst-mem-read",
1328			     &nbpf->max_burst_mem_read);
1329	of_property_read_u32(np, "max-burst-mem-write",
1330			     &nbpf->max_burst_mem_write);
1331
1332	nbpf->config = cfg;
1333
1334	for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
1335		irq = platform_get_irq_optional(pdev, i);
1336		if (irq < 0 && irq != -ENXIO)
1337			return irq;
1338		if (irq > 0)
1339			irqbuf[irqs++] = irq;
 
 
1340	}
1341
1342	/*
1343	 * 3 IRQ resource schemes are supported:
1344	 * 1. 1 shared IRQ for error and all channels
1345	 * 2. 2 IRQs: one for error and one shared for all channels
1346	 * 3. 1 IRQ for error and an own IRQ for each channel
1347	 */
1348	if (irqs != 1 && irqs != 2 && irqs != num_channels + 1)
1349		return -ENXIO;
1350
1351	if (irqs == 1) {
1352		eirq = irqbuf[0];
1353
1354		for (i = 0; i <= num_channels; i++)
1355			nbpf->chan[i].irq = irqbuf[0];
1356	} else {
1357		eirq = platform_get_irq_byname(pdev, "error");
1358		if (eirq < 0)
1359			return eirq;
1360
1361		if (irqs == num_channels + 1) {
1362			struct nbpf_channel *chan;
1363
1364			for (i = 0, chan = nbpf->chan; i <= num_channels;
1365			     i++, chan++) {
1366				/* Skip the error IRQ */
1367				if (irqbuf[i] == eirq)
1368					i++;
1369				chan->irq = irqbuf[i];
1370			}
1371
1372			if (chan != nbpf->chan + num_channels)
1373				return -EINVAL;
1374		} else {
1375			/* 2 IRQs and more than one channel */
1376			if (irqbuf[0] == eirq)
1377				irq = irqbuf[1];
1378			else
1379				irq = irqbuf[0];
1380
1381			for (i = 0; i <= num_channels; i++)
1382				nbpf->chan[i].irq = irq;
1383		}
1384	}
1385
1386	ret = devm_request_irq(dev, eirq, nbpf_err_irq,
1387			       IRQF_SHARED, "dma error", nbpf);
1388	if (ret < 0)
1389		return ret;
1390	nbpf->eirq = eirq;
1391
1392	INIT_LIST_HEAD(&dma_dev->channels);
1393
1394	/* Create DMA Channel */
1395	for (i = 0; i < num_channels; i++) {
1396		ret = nbpf_chan_probe(nbpf, i);
1397		if (ret < 0)
1398			return ret;
1399	}
1400
1401	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1402	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1403	dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1404
1405	/* Common and MEMCPY operations */
1406	dma_dev->device_alloc_chan_resources
1407		= nbpf_alloc_chan_resources;
1408	dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
1409	dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
1410	dma_dev->device_tx_status = nbpf_tx_status;
1411	dma_dev->device_issue_pending = nbpf_issue_pending;
1412
1413	/*
1414	 * If we drop support for unaligned MEMCPY buffer addresses and / or
1415	 * lengths by setting
1416	 * dma_dev->copy_align = 4;
1417	 * then we can set transfer length to 4 bytes in nbpf_prep_one() for
1418	 * DMA_MEM_TO_MEM
1419	 */
1420
1421	/* Compulsory for DMA_SLAVE fields */
1422	dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
1423	dma_dev->device_config = nbpf_config;
1424	dma_dev->device_pause = nbpf_pause;
1425	dma_dev->device_terminate_all = nbpf_terminate_all;
1426
1427	dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1428	dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
1429	dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1430
1431	platform_set_drvdata(pdev, nbpf);
1432
1433	ret = clk_prepare_enable(nbpf->clk);
1434	if (ret < 0)
1435		return ret;
1436
1437	nbpf_configure(nbpf);
1438
1439	ret = dma_async_device_register(dma_dev);
1440	if (ret < 0)
1441		goto e_clk_off;
1442
1443	ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf);
1444	if (ret < 0)
1445		goto e_dma_dev_unreg;
1446
1447	return 0;
1448
1449e_dma_dev_unreg:
1450	dma_async_device_unregister(dma_dev);
1451e_clk_off:
1452	clk_disable_unprepare(nbpf->clk);
1453
1454	return ret;
1455}
1456
1457static void nbpf_remove(struct platform_device *pdev)
1458{
1459	struct nbpf_device *nbpf = platform_get_drvdata(pdev);
1460	int i;
1461
1462	devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
1463
1464	for (i = 0; i < nbpf->config->num_channels; i++) {
1465		struct nbpf_channel *chan = nbpf->chan + i;
1466
1467		devm_free_irq(&pdev->dev, chan->irq, chan);
1468
1469		tasklet_kill(&chan->tasklet);
1470	}
1471
1472	of_dma_controller_free(pdev->dev.of_node);
1473	dma_async_device_unregister(&nbpf->dma_dev);
1474	clk_disable_unprepare(nbpf->clk);
 
 
1475}
1476
1477static const struct platform_device_id nbpf_ids[] = {
1478	{"nbpfaxi64dmac1b4",	(kernel_ulong_t)&nbpf_cfg[NBPF1B4]},
1479	{"nbpfaxi64dmac1b8",	(kernel_ulong_t)&nbpf_cfg[NBPF1B8]},
1480	{"nbpfaxi64dmac1b16",	(kernel_ulong_t)&nbpf_cfg[NBPF1B16]},
1481	{"nbpfaxi64dmac4b4",	(kernel_ulong_t)&nbpf_cfg[NBPF4B4]},
1482	{"nbpfaxi64dmac4b8",	(kernel_ulong_t)&nbpf_cfg[NBPF4B8]},
1483	{"nbpfaxi64dmac4b16",	(kernel_ulong_t)&nbpf_cfg[NBPF4B16]},
1484	{"nbpfaxi64dmac8b4",	(kernel_ulong_t)&nbpf_cfg[NBPF8B4]},
1485	{"nbpfaxi64dmac8b8",	(kernel_ulong_t)&nbpf_cfg[NBPF8B8]},
1486	{"nbpfaxi64dmac8b16",	(kernel_ulong_t)&nbpf_cfg[NBPF8B16]},
1487	{},
1488};
1489MODULE_DEVICE_TABLE(platform, nbpf_ids);
1490
1491#ifdef CONFIG_PM
1492static int nbpf_runtime_suspend(struct device *dev)
1493{
1494	struct nbpf_device *nbpf = dev_get_drvdata(dev);
1495	clk_disable_unprepare(nbpf->clk);
1496	return 0;
1497}
1498
1499static int nbpf_runtime_resume(struct device *dev)
1500{
1501	struct nbpf_device *nbpf = dev_get_drvdata(dev);
1502	return clk_prepare_enable(nbpf->clk);
1503}
1504#endif
1505
1506static const struct dev_pm_ops nbpf_pm_ops = {
1507	SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
1508};
1509
1510static struct platform_driver nbpf_driver = {
1511	.driver = {
1512		.name = "dma-nbpf",
1513		.of_match_table = nbpf_match,
1514		.pm = &nbpf_pm_ops,
1515	},
1516	.id_table = nbpf_ids,
1517	.probe = nbpf_probe,
1518	.remove_new = nbpf_remove,
1519};
1520
1521module_platform_driver(nbpf_driver);
1522
1523MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1524MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs");
1525MODULE_LICENSE("GPL v2");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
   4 * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
   5 */
   6
   7#include <linux/bitmap.h>
   8#include <linux/bitops.h>
   9#include <linux/clk.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/dmaengine.h>
  12#include <linux/err.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/log2.h>
  16#include <linux/module.h>
  17#include <linux/of.h>
  18#include <linux/of_device.h>
  19#include <linux/of_dma.h>
  20#include <linux/platform_device.h>
  21#include <linux/slab.h>
  22
  23#include <dt-bindings/dma/nbpfaxi.h>
  24
  25#include "dmaengine.h"
  26
  27#define NBPF_REG_CHAN_OFFSET	0
  28#define NBPF_REG_CHAN_SIZE	0x40
  29
  30/* Channel Current Transaction Byte register */
  31#define NBPF_CHAN_CUR_TR_BYTE	0x20
  32
  33/* Channel Status register */
  34#define NBPF_CHAN_STAT	0x24
  35#define NBPF_CHAN_STAT_EN	1
  36#define NBPF_CHAN_STAT_TACT	4
  37#define NBPF_CHAN_STAT_ERR	0x10
  38#define NBPF_CHAN_STAT_END	0x20
  39#define NBPF_CHAN_STAT_TC	0x40
  40#define NBPF_CHAN_STAT_DER	0x400
  41
  42/* Channel Control register */
  43#define NBPF_CHAN_CTRL	0x28
  44#define NBPF_CHAN_CTRL_SETEN	1
  45#define NBPF_CHAN_CTRL_CLREN	2
  46#define NBPF_CHAN_CTRL_STG	4
  47#define NBPF_CHAN_CTRL_SWRST	8
  48#define NBPF_CHAN_CTRL_CLRRQ	0x10
  49#define NBPF_CHAN_CTRL_CLREND	0x20
  50#define NBPF_CHAN_CTRL_CLRTC	0x40
  51#define NBPF_CHAN_CTRL_SETSUS	0x100
  52#define NBPF_CHAN_CTRL_CLRSUS	0x200
  53
  54/* Channel Configuration register */
  55#define NBPF_CHAN_CFG	0x2c
  56#define NBPF_CHAN_CFG_SEL	7		/* terminal SELect: 0..7 */
  57#define NBPF_CHAN_CFG_REQD	8		/* REQuest Direction: DMAREQ is 0: input, 1: output */
  58#define NBPF_CHAN_CFG_LOEN	0x10		/* LOw ENable: low DMA request line is: 0: inactive, 1: active */
  59#define NBPF_CHAN_CFG_HIEN	0x20		/* HIgh ENable: high DMA request line is: 0: inactive, 1: active */
  60#define NBPF_CHAN_CFG_LVL	0x40		/* LeVeL: DMA request line is sensed as 0: edge, 1: level */
  61#define NBPF_CHAN_CFG_AM	0x700		/* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */
  62#define NBPF_CHAN_CFG_SDS	0xf000		/* Source Data Size: 0: 8 bits,... , 7: 1024 bits */
  63#define NBPF_CHAN_CFG_DDS	0xf0000		/* Destination Data Size: as above */
  64#define NBPF_CHAN_CFG_SAD	0x100000	/* Source ADdress counting: 0: increment, 1: fixed */
  65#define NBPF_CHAN_CFG_DAD	0x200000	/* Destination ADdress counting: 0: increment, 1: fixed */
  66#define NBPF_CHAN_CFG_TM	0x400000	/* Transfer Mode: 0: single, 1: block TM */
  67#define NBPF_CHAN_CFG_DEM	0x1000000	/* DMAEND interrupt Mask */
  68#define NBPF_CHAN_CFG_TCM	0x2000000	/* DMATCO interrupt Mask */
  69#define NBPF_CHAN_CFG_SBE	0x8000000	/* Sweep Buffer Enable */
  70#define NBPF_CHAN_CFG_RSEL	0x10000000	/* RM: Register Set sELect */
  71#define NBPF_CHAN_CFG_RSW	0x20000000	/* RM: Register Select sWitch */
  72#define NBPF_CHAN_CFG_REN	0x40000000	/* RM: Register Set Enable */
  73#define NBPF_CHAN_CFG_DMS	0x80000000	/* 0: register mode (RM), 1: link mode (LM) */
  74
  75#define NBPF_CHAN_NXLA	0x38
  76#define NBPF_CHAN_CRLA	0x3c
  77
  78/* Link Header field */
  79#define NBPF_HEADER_LV	1
  80#define NBPF_HEADER_LE	2
  81#define NBPF_HEADER_WBD	4
  82#define NBPF_HEADER_DIM	8
  83
  84#define NBPF_CTRL	0x300
  85#define NBPF_CTRL_PR	1		/* 0: fixed priority, 1: round robin */
  86#define NBPF_CTRL_LVINT	2		/* DMAEND and DMAERR signalling: 0: pulse, 1: level */
  87
  88#define NBPF_DSTAT_ER	0x314
  89#define NBPF_DSTAT_END	0x318
  90
  91#define NBPF_DMA_BUSWIDTHS \
  92	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
  93	 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  94	 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  95	 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
  96	 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
  97
  98struct nbpf_config {
  99	int num_channels;
 100	int buffer_size;
 101};
 102
 103/*
 104 * We've got 3 types of objects, used to describe DMA transfers:
 105 * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object
 106 *	in it, used to communicate with the user
 107 * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer
 108 *	queuing, these must be DMAable, using either the streaming DMA API or
 109 *	allocated from coherent memory - one per SG segment
 110 * 3. one per SG segment descriptors, used to manage HW link descriptors from
 111 *	(2). They do not have to be DMAable. They can either be (a) allocated
 112 *	together with link descriptors as mixed (DMA / CPU) objects, or (b)
 113 *	separately. Even if allocated separately it would be best to link them
 114 *	to link descriptors once during channel resource allocation and always
 115 *	use them as a single object.
 116 * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be
 117 * treated as a single SG segment descriptor.
 118 */
 119
 120struct nbpf_link_reg {
 121	u32	header;
 122	u32	src_addr;
 123	u32	dst_addr;
 124	u32	transaction_size;
 125	u32	config;
 126	u32	interval;
 127	u32	extension;
 128	u32	next;
 129} __packed;
 130
 131struct nbpf_device;
 132struct nbpf_channel;
 133struct nbpf_desc;
 134
 135struct nbpf_link_desc {
 136	struct nbpf_link_reg *hwdesc;
 137	dma_addr_t hwdesc_dma_addr;
 138	struct nbpf_desc *desc;
 139	struct list_head node;
 140};
 141
 142/**
 143 * struct nbpf_desc - DMA transfer descriptor
 144 * @async_tx:	dmaengine object
 145 * @user_wait:	waiting for a user ack
 146 * @length:	total transfer length
 147 * @chan:	associated DMAC channel
 148 * @sg:		list of hardware descriptors, represented by struct nbpf_link_desc
 149 * @node:	member in channel descriptor lists
 150 */
 151struct nbpf_desc {
 152	struct dma_async_tx_descriptor async_tx;
 153	bool user_wait;
 154	size_t length;
 155	struct nbpf_channel *chan;
 156	struct list_head sg;
 157	struct list_head node;
 158};
 159
 160/* Take a wild guess: allocate 4 segments per descriptor */
 161#define NBPF_SEGMENTS_PER_DESC 4
 162#define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) /	\
 163	(sizeof(struct nbpf_desc) +					\
 164	 NBPF_SEGMENTS_PER_DESC *					\
 165	 (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg))))
 166#define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE)
 167
 168struct nbpf_desc_page {
 169	struct list_head node;
 170	struct nbpf_desc desc[NBPF_DESCS_PER_PAGE];
 171	struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE];
 172	struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE];
 173};
 174
 175/**
 176 * struct nbpf_channel - one DMAC channel
 177 * @dma_chan:	standard dmaengine channel object
 178 * @tasklet:	channel specific tasklet used for callbacks
 179 * @base:	register address base
 180 * @nbpf:	DMAC
 181 * @name:	IRQ name
 182 * @irq:	IRQ number
 183 * @slave_src_addr:	source address for slave DMA
 184 * @slave_src_width:	source slave data size in bytes
 185 * @slave_src_burst:	maximum source slave burst size in bytes
 186 * @slave_dst_addr:	destination address for slave DMA
 187 * @slave_dst_width:	destination slave data size in bytes
 188 * @slave_dst_burst:	maximum destination slave burst size in bytes
 189 * @terminal:	DMA terminal, assigned to this channel
 190 * @dmarq_cfg:	DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG
 191 * @flags:	configuration flags from DT
 192 * @lock:	protect descriptor lists
 193 * @free_links:	list of free link descriptors
 194 * @free:	list of free descriptors
 195 * @queued:	list of queued descriptors
 196 * @active:	list of descriptors, scheduled for processing
 197 * @done:	list of completed descriptors, waiting post-processing
 198 * @desc_page:	list of additionally allocated descriptor pages - if any
 199 * @running:	linked descriptor of running transaction
 200 * @paused:	are translations on this channel paused?
 201 */
 202struct nbpf_channel {
 203	struct dma_chan dma_chan;
 204	struct tasklet_struct tasklet;
 205	void __iomem *base;
 206	struct nbpf_device *nbpf;
 207	char name[16];
 208	int irq;
 209	dma_addr_t slave_src_addr;
 210	size_t slave_src_width;
 211	size_t slave_src_burst;
 212	dma_addr_t slave_dst_addr;
 213	size_t slave_dst_width;
 214	size_t slave_dst_burst;
 215	unsigned int terminal;
 216	u32 dmarq_cfg;
 217	unsigned long flags;
 218	spinlock_t lock;
 219	struct list_head free_links;
 220	struct list_head free;
 221	struct list_head queued;
 222	struct list_head active;
 223	struct list_head done;
 224	struct list_head desc_page;
 225	struct nbpf_desc *running;
 226	bool paused;
 227};
 228
 229struct nbpf_device {
 230	struct dma_device dma_dev;
 231	void __iomem *base;
 232	u32 max_burst_mem_read;
 233	u32 max_burst_mem_write;
 234	struct clk *clk;
 235	const struct nbpf_config *config;
 236	unsigned int eirq;
 237	struct nbpf_channel chan[];
 238};
 239
 240enum nbpf_model {
 241	NBPF1B4,
 242	NBPF1B8,
 243	NBPF1B16,
 244	NBPF4B4,
 245	NBPF4B8,
 246	NBPF4B16,
 247	NBPF8B4,
 248	NBPF8B8,
 249	NBPF8B16,
 250};
 251
 252static struct nbpf_config nbpf_cfg[] = {
 253	[NBPF1B4] = {
 254		.num_channels = 1,
 255		.buffer_size = 4,
 256	},
 257	[NBPF1B8] = {
 258		.num_channels = 1,
 259		.buffer_size = 8,
 260	},
 261	[NBPF1B16] = {
 262		.num_channels = 1,
 263		.buffer_size = 16,
 264	},
 265	[NBPF4B4] = {
 266		.num_channels = 4,
 267		.buffer_size = 4,
 268	},
 269	[NBPF4B8] = {
 270		.num_channels = 4,
 271		.buffer_size = 8,
 272	},
 273	[NBPF4B16] = {
 274		.num_channels = 4,
 275		.buffer_size = 16,
 276	},
 277	[NBPF8B4] = {
 278		.num_channels = 8,
 279		.buffer_size = 4,
 280	},
 281	[NBPF8B8] = {
 282		.num_channels = 8,
 283		.buffer_size = 8,
 284	},
 285	[NBPF8B16] = {
 286		.num_channels = 8,
 287		.buffer_size = 16,
 288	},
 289};
 290
 291#define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan)
 292
 293/*
 294 * dmaengine drivers seem to have a lot in common and instead of sharing more
 295 * code, they reimplement those common algorithms independently. In this driver
 296 * we try to separate the hardware-specific part from the (largely) generic
 297 * part. This improves code readability and makes it possible in the future to
 298 * reuse the generic code in form of a helper library. That generic code should
 299 * be suitable for various DMA controllers, using transfer descriptors in RAM
 300 * and pushing one SG list at a time to the DMA controller.
 301 */
 302
 303/*		Hardware-specific part		*/
 304
 305static inline u32 nbpf_chan_read(struct nbpf_channel *chan,
 306				 unsigned int offset)
 307{
 308	u32 data = ioread32(chan->base + offset);
 309	dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
 310		__func__, chan->base, offset, data);
 311	return data;
 312}
 313
 314static inline void nbpf_chan_write(struct nbpf_channel *chan,
 315				   unsigned int offset, u32 data)
 316{
 317	iowrite32(data, chan->base + offset);
 318	dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
 319		__func__, chan->base, offset, data);
 320}
 321
 322static inline u32 nbpf_read(struct nbpf_device *nbpf,
 323			    unsigned int offset)
 324{
 325	u32 data = ioread32(nbpf->base + offset);
 326	dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
 327		__func__, nbpf->base, offset, data);
 328	return data;
 329}
 330
 331static inline void nbpf_write(struct nbpf_device *nbpf,
 332			      unsigned int offset, u32 data)
 333{
 334	iowrite32(data, nbpf->base + offset);
 335	dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
 336		__func__, nbpf->base, offset, data);
 337}
 338
 339static void nbpf_chan_halt(struct nbpf_channel *chan)
 340{
 341	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
 342}
 343
 344static bool nbpf_status_get(struct nbpf_channel *chan)
 345{
 346	u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END);
 347
 348	return status & BIT(chan - chan->nbpf->chan);
 349}
 350
 351static void nbpf_status_ack(struct nbpf_channel *chan)
 352{
 353	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND);
 354}
 355
 356static u32 nbpf_error_get(struct nbpf_device *nbpf)
 357{
 358	return nbpf_read(nbpf, NBPF_DSTAT_ER);
 359}
 360
 361static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
 362{
 363	return nbpf->chan + __ffs(error);
 364}
 365
 366static void nbpf_error_clear(struct nbpf_channel *chan)
 367{
 368	u32 status;
 369	int i;
 370
 371	/* Stop the channel, make sure DMA has been aborted */
 372	nbpf_chan_halt(chan);
 373
 374	for (i = 1000; i; i--) {
 375		status = nbpf_chan_read(chan, NBPF_CHAN_STAT);
 376		if (!(status & NBPF_CHAN_STAT_TACT))
 377			break;
 378		cpu_relax();
 379	}
 380
 381	if (!i)
 382		dev_err(chan->dma_chan.device->dev,
 383			"%s(): abort timeout, channel status 0x%x\n", __func__, status);
 384
 385	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST);
 386}
 387
 388static int nbpf_start(struct nbpf_desc *desc)
 389{
 390	struct nbpf_channel *chan = desc->chan;
 391	struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node);
 392
 393	nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr);
 394	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS);
 395	chan->paused = false;
 396
 397	/* Software trigger MEMCPY - only MEMCPY uses the block mode */
 398	if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM)
 399		nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG);
 400
 401	dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__,
 402		nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA));
 403
 404	return 0;
 405}
 406
 407static void nbpf_chan_prepare(struct nbpf_channel *chan)
 408{
 409	chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) |
 410		(chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) |
 411		(chan->flags & NBPF_SLAVE_RQ_LEVEL ?
 412		 NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) |
 413		chan->terminal;
 414}
 415
 416static void nbpf_chan_prepare_default(struct nbpf_channel *chan)
 417{
 418	/* Don't output DMAACK */
 419	chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400;
 420	chan->terminal = 0;
 421	chan->flags = 0;
 422}
 423
 424static void nbpf_chan_configure(struct nbpf_channel *chan)
 425{
 426	/*
 427	 * We assume, that only the link mode and DMA request line configuration
 428	 * have to be set in the configuration register manually. Dynamic
 429	 * per-transfer configuration will be loaded from transfer descriptors.
 430	 */
 431	nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
 432}
 433
 434static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
 435			enum dma_transfer_direction direction)
 436{
 437	int max_burst = nbpf->config->buffer_size * 8;
 438
 439	if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
 440		switch (direction) {
 441		case DMA_MEM_TO_MEM:
 442			max_burst = min_not_zero(nbpf->max_burst_mem_read,
 443						 nbpf->max_burst_mem_write);
 444			break;
 445		case DMA_MEM_TO_DEV:
 446			if (nbpf->max_burst_mem_read)
 447				max_burst = nbpf->max_burst_mem_read;
 448			break;
 449		case DMA_DEV_TO_MEM:
 450			if (nbpf->max_burst_mem_write)
 451				max_burst = nbpf->max_burst_mem_write;
 452			break;
 453		case DMA_DEV_TO_DEV:
 454		default:
 455			break;
 456		}
 457	}
 458
 459	/* Maximum supported bursts depend on the buffer size */
 460	return min_t(int, __ffs(size), ilog2(max_burst));
 461}
 462
 463static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
 464			     enum dma_slave_buswidth width, u32 burst)
 465{
 466	size_t size;
 467
 468	if (!burst)
 469		burst = 1;
 470
 471	switch (width) {
 472	case DMA_SLAVE_BUSWIDTH_8_BYTES:
 473		size = 8 * burst;
 474		break;
 475
 476	case DMA_SLAVE_BUSWIDTH_4_BYTES:
 477		size = 4 * burst;
 478		break;
 479
 480	case DMA_SLAVE_BUSWIDTH_2_BYTES:
 481		size = 2 * burst;
 482		break;
 483
 484	default:
 485		pr_warn("%s(): invalid bus width %u\n", __func__, width);
 486		fallthrough;
 487	case DMA_SLAVE_BUSWIDTH_1_BYTE:
 488		size = burst;
 489	}
 490
 491	return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
 492}
 493
 494/*
 495 * We need a way to recognise slaves, whose data is sent "raw" over the bus,
 496 * i.e. it isn't known in advance how many bytes will be received. Therefore
 497 * the slave driver has to provide a "large enough" buffer and either read the
 498 * buffer, when it is full, or detect, that some data has arrived, then wait for
 499 * a timeout, if no more data arrives - receive what's already there. We want to
 500 * handle such slaves in a special way to allow an optimised mode for other
 501 * users, for whom the amount of data is known in advance. So far there's no way
 502 * to recognise such slaves. We use a data-width check to distinguish between
 503 * the SD host and the PL011 UART.
 504 */
 505
 506static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
 507			 enum dma_transfer_direction direction,
 508			 dma_addr_t src, dma_addr_t dst, size_t size, bool last)
 509{
 510	struct nbpf_link_reg *hwdesc = ldesc->hwdesc;
 511	struct nbpf_desc *desc = ldesc->desc;
 512	struct nbpf_channel *chan = desc->chan;
 513	struct device *dev = chan->dma_chan.device->dev;
 514	size_t mem_xfer, slave_xfer;
 515	bool can_burst;
 516
 517	hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV |
 518		(last ? NBPF_HEADER_LE : 0);
 519
 520	hwdesc->src_addr = src;
 521	hwdesc->dst_addr = dst;
 522	hwdesc->transaction_size = size;
 523
 524	/*
 525	 * set config: SAD, DAD, DDS, SDS, etc.
 526	 * Note on transfer sizes: the DMAC can perform unaligned DMA transfers,
 527	 * but it is important to have transaction size a multiple of both
 528	 * receiver and transmitter transfer sizes. It is also possible to use
 529	 * different RAM and device transfer sizes, and it does work well with
 530	 * some devices, e.g. with V08R07S01E SD host controllers, which can use
 531	 * 128 byte transfers. But this doesn't work with other devices,
 532	 * especially when the transaction size is unknown. This is the case,
 533	 * e.g. with serial drivers like amba-pl011.c. For reception it sets up
 534	 * the transaction size of 4K and if fewer bytes are received, it
 535	 * pauses DMA and reads out data received via DMA as well as those left
 536	 * in the Rx FIFO. For this to work with the RAM side using burst
 537	 * transfers we enable the SBE bit and terminate the transfer in our
 538	 * .device_pause handler.
 539	 */
 540	mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
 541
 542	switch (direction) {
 543	case DMA_DEV_TO_MEM:
 544		can_burst = chan->slave_src_width >= 3;
 545		slave_xfer = min(mem_xfer, can_burst ?
 546				 chan->slave_src_burst : chan->slave_src_width);
 547		/*
 548		 * Is the slave narrower than 64 bits, i.e. isn't using the full
 549		 * bus width and cannot use bursts?
 550		 */
 551		if (mem_xfer > chan->slave_src_burst && !can_burst)
 552			mem_xfer = chan->slave_src_burst;
 553		/* Device-to-RAM DMA is unreliable without REQD set */
 554		hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) |
 555			(NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD |
 556			NBPF_CHAN_CFG_SBE;
 557		break;
 558
 559	case DMA_MEM_TO_DEV:
 560		slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ?
 561				 chan->slave_dst_burst : chan->slave_dst_width);
 562		hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
 563			(NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD;
 564		break;
 565
 566	case DMA_MEM_TO_MEM:
 567		hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM |
 568			(NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
 569			(NBPF_CHAN_CFG_DDS & (mem_xfer << 16));
 570		break;
 571
 572	default:
 573		return -EINVAL;
 574	}
 575
 576	hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) |
 577		NBPF_CHAN_CFG_DMS;
 578
 579	dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n",
 580		__func__, &ldesc->hwdesc_dma_addr, hwdesc->header,
 581		hwdesc->config, size, &src, &dst);
 582
 583	dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc),
 584				   DMA_TO_DEVICE);
 585
 586	return 0;
 587}
 588
 589static size_t nbpf_bytes_left(struct nbpf_channel *chan)
 590{
 591	return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE);
 592}
 593
 594static void nbpf_configure(struct nbpf_device *nbpf)
 595{
 596	nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
 597}
 598
 599/*		Generic part			*/
 600
 601/* DMA ENGINE functions */
 602static void nbpf_issue_pending(struct dma_chan *dchan)
 603{
 604	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 605	unsigned long flags;
 606
 607	dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
 608
 609	spin_lock_irqsave(&chan->lock, flags);
 610	if (list_empty(&chan->queued))
 611		goto unlock;
 612
 613	list_splice_tail_init(&chan->queued, &chan->active);
 614
 615	if (!chan->running) {
 616		struct nbpf_desc *desc = list_first_entry(&chan->active,
 617						struct nbpf_desc, node);
 618		if (!nbpf_start(desc))
 619			chan->running = desc;
 620	}
 621
 622unlock:
 623	spin_unlock_irqrestore(&chan->lock, flags);
 624}
 625
 626static enum dma_status nbpf_tx_status(struct dma_chan *dchan,
 627		dma_cookie_t cookie, struct dma_tx_state *state)
 628{
 629	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 630	enum dma_status status = dma_cookie_status(dchan, cookie, state);
 631
 632	if (state) {
 633		dma_cookie_t running;
 634		unsigned long flags;
 635
 636		spin_lock_irqsave(&chan->lock, flags);
 637		running = chan->running ? chan->running->async_tx.cookie : -EINVAL;
 638
 639		if (cookie == running) {
 640			state->residue = nbpf_bytes_left(chan);
 641			dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__,
 642				state->residue);
 643		} else if (status == DMA_IN_PROGRESS) {
 644			struct nbpf_desc *desc;
 645			bool found = false;
 646
 647			list_for_each_entry(desc, &chan->active, node)
 648				if (desc->async_tx.cookie == cookie) {
 649					found = true;
 650					break;
 651				}
 652
 653			if (!found)
 654				list_for_each_entry(desc, &chan->queued, node)
 655					if (desc->async_tx.cookie == cookie) {
 656						found = true;
 657						break;
 658
 659					}
 660
 661			state->residue = found ? desc->length : 0;
 662		}
 663
 664		spin_unlock_irqrestore(&chan->lock, flags);
 665	}
 666
 667	if (chan->paused)
 668		status = DMA_PAUSED;
 669
 670	return status;
 671}
 672
 673static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx)
 674{
 675	struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx);
 676	struct nbpf_channel *chan = desc->chan;
 677	unsigned long flags;
 678	dma_cookie_t cookie;
 679
 680	spin_lock_irqsave(&chan->lock, flags);
 681	cookie = dma_cookie_assign(tx);
 682	list_add_tail(&desc->node, &chan->queued);
 683	spin_unlock_irqrestore(&chan->lock, flags);
 684
 685	dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie);
 686
 687	return cookie;
 688}
 689
 690static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
 691{
 692	struct dma_chan *dchan = &chan->dma_chan;
 693	struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
 694	struct nbpf_link_desc *ldesc;
 695	struct nbpf_link_reg *hwdesc;
 696	struct nbpf_desc *desc;
 697	LIST_HEAD(head);
 698	LIST_HEAD(lhead);
 699	int i;
 700	struct device *dev = dchan->device->dev;
 701
 702	if (!dpage)
 703		return -ENOMEM;
 704
 705	dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n",
 706		__func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage));
 707
 708	for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc;
 709	     i < ARRAY_SIZE(dpage->ldesc);
 710	     i++, ldesc++, hwdesc++) {
 711		ldesc->hwdesc = hwdesc;
 712		list_add_tail(&ldesc->node, &lhead);
 713		ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
 714					hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
 715
 716		dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
 717			hwdesc, &ldesc->hwdesc_dma_addr);
 718	}
 719
 720	for (i = 0, desc = dpage->desc;
 721	     i < ARRAY_SIZE(dpage->desc);
 722	     i++, desc++) {
 723		dma_async_tx_descriptor_init(&desc->async_tx, dchan);
 724		desc->async_tx.tx_submit = nbpf_tx_submit;
 725		desc->chan = chan;
 726		INIT_LIST_HEAD(&desc->sg);
 727		list_add_tail(&desc->node, &head);
 728	}
 729
 730	/*
 731	 * This function cannot be called from interrupt context, so, no need to
 732	 * save flags
 733	 */
 734	spin_lock_irq(&chan->lock);
 735	list_splice_tail(&lhead, &chan->free_links);
 736	list_splice_tail(&head, &chan->free);
 737	list_add(&dpage->node, &chan->desc_page);
 738	spin_unlock_irq(&chan->lock);
 739
 740	return ARRAY_SIZE(dpage->desc);
 741}
 742
 743static void nbpf_desc_put(struct nbpf_desc *desc)
 744{
 745	struct nbpf_channel *chan = desc->chan;
 746	struct nbpf_link_desc *ldesc, *tmp;
 747	unsigned long flags;
 748
 749	spin_lock_irqsave(&chan->lock, flags);
 750	list_for_each_entry_safe(ldesc, tmp, &desc->sg, node)
 751		list_move(&ldesc->node, &chan->free_links);
 752
 753	list_add(&desc->node, &chan->free);
 754	spin_unlock_irqrestore(&chan->lock, flags);
 755}
 756
 757static void nbpf_scan_acked(struct nbpf_channel *chan)
 758{
 759	struct nbpf_desc *desc, *tmp;
 760	unsigned long flags;
 761	LIST_HEAD(head);
 762
 763	spin_lock_irqsave(&chan->lock, flags);
 764	list_for_each_entry_safe(desc, tmp, &chan->done, node)
 765		if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) {
 766			list_move(&desc->node, &head);
 767			desc->user_wait = false;
 768		}
 769	spin_unlock_irqrestore(&chan->lock, flags);
 770
 771	list_for_each_entry_safe(desc, tmp, &head, node) {
 772		list_del(&desc->node);
 773		nbpf_desc_put(desc);
 774	}
 775}
 776
 777/*
 778 * We have to allocate descriptors with the channel lock dropped. This means,
 779 * before we re-acquire the lock buffers can be taken already, so we have to
 780 * re-check after re-acquiring the lock and possibly retry, if buffers are gone
 781 * again.
 782 */
 783static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len)
 784{
 785	struct nbpf_desc *desc = NULL;
 786	struct nbpf_link_desc *ldesc, *prev = NULL;
 787
 788	nbpf_scan_acked(chan);
 789
 790	spin_lock_irq(&chan->lock);
 791
 792	do {
 793		int i = 0, ret;
 794
 795		if (list_empty(&chan->free)) {
 796			/* No more free descriptors */
 797			spin_unlock_irq(&chan->lock);
 798			ret = nbpf_desc_page_alloc(chan);
 799			if (ret < 0)
 800				return NULL;
 801			spin_lock_irq(&chan->lock);
 802			continue;
 803		}
 804		desc = list_first_entry(&chan->free, struct nbpf_desc, node);
 805		list_del(&desc->node);
 806
 807		do {
 808			if (list_empty(&chan->free_links)) {
 809				/* No more free link descriptors */
 810				spin_unlock_irq(&chan->lock);
 811				ret = nbpf_desc_page_alloc(chan);
 812				if (ret < 0) {
 813					nbpf_desc_put(desc);
 814					return NULL;
 815				}
 816				spin_lock_irq(&chan->lock);
 817				continue;
 818			}
 819
 820			ldesc = list_first_entry(&chan->free_links,
 821						 struct nbpf_link_desc, node);
 822			ldesc->desc = desc;
 823			if (prev)
 824				prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr;
 825
 826			prev = ldesc;
 827			list_move_tail(&ldesc->node, &desc->sg);
 828
 829			i++;
 830		} while (i < len);
 831	} while (!desc);
 832
 833	prev->hwdesc->next = 0;
 834
 835	spin_unlock_irq(&chan->lock);
 836
 837	return desc;
 838}
 839
 840static void nbpf_chan_idle(struct nbpf_channel *chan)
 841{
 842	struct nbpf_desc *desc, *tmp;
 843	unsigned long flags;
 844	LIST_HEAD(head);
 845
 846	spin_lock_irqsave(&chan->lock, flags);
 847
 848	list_splice_init(&chan->done, &head);
 849	list_splice_init(&chan->active, &head);
 850	list_splice_init(&chan->queued, &head);
 851
 852	chan->running = NULL;
 853
 854	spin_unlock_irqrestore(&chan->lock, flags);
 855
 856	list_for_each_entry_safe(desc, tmp, &head, node) {
 857		dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n",
 858			__func__, desc, desc->async_tx.cookie);
 859		list_del(&desc->node);
 860		nbpf_desc_put(desc);
 861	}
 862}
 863
 864static int nbpf_pause(struct dma_chan *dchan)
 865{
 866	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 867
 868	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
 869
 870	chan->paused = true;
 871	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
 872	/* See comment in nbpf_prep_one() */
 873	nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
 874
 875	return 0;
 876}
 877
 878static int nbpf_terminate_all(struct dma_chan *dchan)
 879{
 880	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 881
 882	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
 883	dev_dbg(dchan->device->dev, "Terminating\n");
 884
 885	nbpf_chan_halt(chan);
 886	nbpf_chan_idle(chan);
 887
 888	return 0;
 889}
 890
 891static int nbpf_config(struct dma_chan *dchan,
 892		       struct dma_slave_config *config)
 893{
 894	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 895
 896	dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
 897
 898	/*
 899	 * We could check config->slave_id to match chan->terminal here,
 900	 * but with DT they would be coming from the same source, so
 901	 * such a check would be superflous
 902	 */
 903
 904	chan->slave_dst_addr = config->dst_addr;
 905	chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
 906					       config->dst_addr_width, 1);
 907	chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
 908					       config->dst_addr_width,
 909					       config->dst_maxburst);
 910	chan->slave_src_addr = config->src_addr;
 911	chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
 912					       config->src_addr_width, 1);
 913	chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
 914					       config->src_addr_width,
 915					       config->src_maxburst);
 916
 917	return 0;
 918}
 919
 920static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan,
 921		struct scatterlist *src_sg, struct scatterlist *dst_sg,
 922		size_t len, enum dma_transfer_direction direction,
 923		unsigned long flags)
 924{
 925	struct nbpf_link_desc *ldesc;
 926	struct scatterlist *mem_sg;
 927	struct nbpf_desc *desc;
 928	bool inc_src, inc_dst;
 929	size_t data_len = 0;
 930	int i = 0;
 931
 932	switch (direction) {
 933	case DMA_DEV_TO_MEM:
 934		mem_sg = dst_sg;
 935		inc_src = false;
 936		inc_dst = true;
 937		break;
 938
 939	case DMA_MEM_TO_DEV:
 940		mem_sg = src_sg;
 941		inc_src = true;
 942		inc_dst = false;
 943		break;
 944
 945	default:
 946	case DMA_MEM_TO_MEM:
 947		mem_sg = src_sg;
 948		inc_src = true;
 949		inc_dst = true;
 950	}
 951
 952	desc = nbpf_desc_get(chan, len);
 953	if (!desc)
 954		return NULL;
 955
 956	desc->async_tx.flags = flags;
 957	desc->async_tx.cookie = -EBUSY;
 958	desc->user_wait = false;
 959
 960	/*
 961	 * This is a private descriptor list, and we own the descriptor. No need
 962	 * to lock.
 963	 */
 964	list_for_each_entry(ldesc, &desc->sg, node) {
 965		int ret = nbpf_prep_one(ldesc, direction,
 966					sg_dma_address(src_sg),
 967					sg_dma_address(dst_sg),
 968					sg_dma_len(mem_sg),
 969					i == len - 1);
 970		if (ret < 0) {
 971			nbpf_desc_put(desc);
 972			return NULL;
 973		}
 974		data_len += sg_dma_len(mem_sg);
 975		if (inc_src)
 976			src_sg = sg_next(src_sg);
 977		if (inc_dst)
 978			dst_sg = sg_next(dst_sg);
 979		mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg;
 980		i++;
 981	}
 982
 983	desc->length = data_len;
 984
 985	/* The user has to return the descriptor to us ASAP via .tx_submit() */
 986	return &desc->async_tx;
 987}
 988
 989static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
 990	struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
 991	size_t len, unsigned long flags)
 992{
 993	struct nbpf_channel *chan = nbpf_to_chan(dchan);
 994	struct scatterlist dst_sg;
 995	struct scatterlist src_sg;
 996
 997	sg_init_table(&dst_sg, 1);
 998	sg_init_table(&src_sg, 1);
 999
1000	sg_dma_address(&dst_sg) = dst;
1001	sg_dma_address(&src_sg) = src;
1002
1003	sg_dma_len(&dst_sg) = len;
1004	sg_dma_len(&src_sg) = len;
1005
1006	dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n",
1007		__func__, len, &src, &dst);
1008
1009	return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1,
1010			    DMA_MEM_TO_MEM, flags);
1011}
1012
1013static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
1014	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1015	enum dma_transfer_direction direction, unsigned long flags, void *context)
1016{
1017	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1018	struct scatterlist slave_sg;
1019
1020	dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1021
1022	sg_init_table(&slave_sg, 1);
1023
1024	switch (direction) {
1025	case DMA_MEM_TO_DEV:
1026		sg_dma_address(&slave_sg) = chan->slave_dst_addr;
1027		return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len,
1028				    direction, flags);
1029
1030	case DMA_DEV_TO_MEM:
1031		sg_dma_address(&slave_sg) = chan->slave_src_addr;
1032		return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len,
1033				    direction, flags);
1034
1035	default:
1036		return NULL;
1037	}
1038}
1039
1040static int nbpf_alloc_chan_resources(struct dma_chan *dchan)
1041{
1042	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1043	int ret;
1044
1045	INIT_LIST_HEAD(&chan->free);
1046	INIT_LIST_HEAD(&chan->free_links);
1047	INIT_LIST_HEAD(&chan->queued);
1048	INIT_LIST_HEAD(&chan->active);
1049	INIT_LIST_HEAD(&chan->done);
1050
1051	ret = nbpf_desc_page_alloc(chan);
1052	if (ret < 0)
1053		return ret;
1054
1055	dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__,
1056		chan->terminal);
1057
1058	nbpf_chan_configure(chan);
1059
1060	return ret;
1061}
1062
1063static void nbpf_free_chan_resources(struct dma_chan *dchan)
1064{
1065	struct nbpf_channel *chan = nbpf_to_chan(dchan);
1066	struct nbpf_desc_page *dpage, *tmp;
1067
1068	dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
1069
1070	nbpf_chan_halt(chan);
1071	nbpf_chan_idle(chan);
1072	/* Clean up for if a channel is re-used for MEMCPY after slave DMA */
1073	nbpf_chan_prepare_default(chan);
1074
1075	list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) {
1076		struct nbpf_link_desc *ldesc;
1077		int i;
1078		list_del(&dpage->node);
1079		for (i = 0, ldesc = dpage->ldesc;
1080		     i < ARRAY_SIZE(dpage->ldesc);
1081		     i++, ldesc++)
1082			dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
1083					 sizeof(*ldesc->hwdesc), DMA_TO_DEVICE);
1084		free_page((unsigned long)dpage);
1085	}
1086}
1087
1088static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
1089				      struct of_dma *ofdma)
1090{
1091	struct nbpf_device *nbpf = ofdma->of_dma_data;
1092	struct dma_chan *dchan;
1093	struct nbpf_channel *chan;
1094
1095	if (dma_spec->args_count != 2)
1096		return NULL;
1097
1098	dchan = dma_get_any_slave_channel(&nbpf->dma_dev);
1099	if (!dchan)
1100		return NULL;
1101
1102	dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__,
1103		dma_spec->np);
1104
1105	chan = nbpf_to_chan(dchan);
1106
1107	chan->terminal = dma_spec->args[0];
1108	chan->flags = dma_spec->args[1];
1109
1110	nbpf_chan_prepare(chan);
1111	nbpf_chan_configure(chan);
1112
1113	return dchan;
1114}
1115
1116static void nbpf_chan_tasklet(struct tasklet_struct *t)
1117{
1118	struct nbpf_channel *chan = from_tasklet(chan, t, tasklet);
1119	struct nbpf_desc *desc, *tmp;
1120	struct dmaengine_desc_callback cb;
1121
1122	while (!list_empty(&chan->done)) {
1123		bool found = false, must_put, recycling = false;
1124
1125		spin_lock_irq(&chan->lock);
1126
1127		list_for_each_entry_safe(desc, tmp, &chan->done, node) {
1128			if (!desc->user_wait) {
1129				/* Newly completed descriptor, have to process */
1130				found = true;
1131				break;
1132			} else if (async_tx_test_ack(&desc->async_tx)) {
1133				/*
1134				 * This descriptor was waiting for a user ACK,
1135				 * it can be recycled now.
1136				 */
1137				list_del(&desc->node);
1138				spin_unlock_irq(&chan->lock);
1139				nbpf_desc_put(desc);
1140				recycling = true;
1141				break;
1142			}
1143		}
1144
1145		if (recycling)
1146			continue;
1147
1148		if (!found) {
1149			/* This can happen if TERMINATE_ALL has been called */
1150			spin_unlock_irq(&chan->lock);
1151			break;
1152		}
1153
1154		dma_cookie_complete(&desc->async_tx);
1155
1156		/*
1157		 * With released lock we cannot dereference desc, maybe it's
1158		 * still on the "done" list
1159		 */
1160		if (async_tx_test_ack(&desc->async_tx)) {
1161			list_del(&desc->node);
1162			must_put = true;
1163		} else {
1164			desc->user_wait = true;
1165			must_put = false;
1166		}
1167
1168		dmaengine_desc_get_callback(&desc->async_tx, &cb);
1169
1170		/* ack and callback completed descriptor */
1171		spin_unlock_irq(&chan->lock);
1172
1173		dmaengine_desc_callback_invoke(&cb, NULL);
1174
1175		if (must_put)
1176			nbpf_desc_put(desc);
1177	}
1178}
1179
1180static irqreturn_t nbpf_chan_irq(int irq, void *dev)
1181{
1182	struct nbpf_channel *chan = dev;
1183	bool done = nbpf_status_get(chan);
1184	struct nbpf_desc *desc;
1185	irqreturn_t ret;
1186	bool bh = false;
1187
1188	if (!done)
1189		return IRQ_NONE;
1190
1191	nbpf_status_ack(chan);
1192
1193	dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__);
1194
1195	spin_lock(&chan->lock);
1196	desc = chan->running;
1197	if (WARN_ON(!desc)) {
1198		ret = IRQ_NONE;
1199		goto unlock;
1200	} else {
1201		ret = IRQ_HANDLED;
1202		bh = true;
1203	}
1204
1205	list_move_tail(&desc->node, &chan->done);
1206	chan->running = NULL;
1207
1208	if (!list_empty(&chan->active)) {
1209		desc = list_first_entry(&chan->active,
1210					struct nbpf_desc, node);
1211		if (!nbpf_start(desc))
1212			chan->running = desc;
1213	}
1214
1215unlock:
1216	spin_unlock(&chan->lock);
1217
1218	if (bh)
1219		tasklet_schedule(&chan->tasklet);
1220
1221	return ret;
1222}
1223
1224static irqreturn_t nbpf_err_irq(int irq, void *dev)
1225{
1226	struct nbpf_device *nbpf = dev;
1227	u32 error = nbpf_error_get(nbpf);
1228
1229	dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq);
1230
1231	if (!error)
1232		return IRQ_NONE;
1233
1234	do {
1235		struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error);
1236		/* On error: abort all queued transfers, no callback */
1237		nbpf_error_clear(chan);
1238		nbpf_chan_idle(chan);
1239		error = nbpf_error_get(nbpf);
1240	} while (error);
1241
1242	return IRQ_HANDLED;
1243}
1244
1245static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
1246{
1247	struct dma_device *dma_dev = &nbpf->dma_dev;
1248	struct nbpf_channel *chan = nbpf->chan + n;
1249	int ret;
1250
1251	chan->nbpf = nbpf;
1252	chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n;
1253	INIT_LIST_HEAD(&chan->desc_page);
1254	spin_lock_init(&chan->lock);
1255	chan->dma_chan.device = dma_dev;
1256	dma_cookie_init(&chan->dma_chan);
1257	nbpf_chan_prepare_default(chan);
1258
1259	dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base);
1260
1261	snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
1262
1263	tasklet_setup(&chan->tasklet, nbpf_chan_tasklet);
1264	ret = devm_request_irq(dma_dev->dev, chan->irq,
1265			nbpf_chan_irq, IRQF_SHARED,
1266			chan->name, chan);
1267	if (ret < 0)
1268		return ret;
1269
1270	/* Add the channel to DMA device channel list */
1271	list_add_tail(&chan->dma_chan.device_node,
1272		      &dma_dev->channels);
1273
1274	return 0;
1275}
1276
1277static const struct of_device_id nbpf_match[] = {
1278	{.compatible = "renesas,nbpfaxi64dmac1b4",	.data = &nbpf_cfg[NBPF1B4]},
1279	{.compatible = "renesas,nbpfaxi64dmac1b8",	.data = &nbpf_cfg[NBPF1B8]},
1280	{.compatible = "renesas,nbpfaxi64dmac1b16",	.data = &nbpf_cfg[NBPF1B16]},
1281	{.compatible = "renesas,nbpfaxi64dmac4b4",	.data = &nbpf_cfg[NBPF4B4]},
1282	{.compatible = "renesas,nbpfaxi64dmac4b8",	.data = &nbpf_cfg[NBPF4B8]},
1283	{.compatible = "renesas,nbpfaxi64dmac4b16",	.data = &nbpf_cfg[NBPF4B16]},
1284	{.compatible = "renesas,nbpfaxi64dmac8b4",	.data = &nbpf_cfg[NBPF8B4]},
1285	{.compatible = "renesas,nbpfaxi64dmac8b8",	.data = &nbpf_cfg[NBPF8B8]},
1286	{.compatible = "renesas,nbpfaxi64dmac8b16",	.data = &nbpf_cfg[NBPF8B16]},
1287	{}
1288};
1289MODULE_DEVICE_TABLE(of, nbpf_match);
1290
1291static int nbpf_probe(struct platform_device *pdev)
1292{
1293	struct device *dev = &pdev->dev;
1294	struct device_node *np = dev->of_node;
1295	struct nbpf_device *nbpf;
1296	struct dma_device *dma_dev;
1297	struct resource *iomem, *irq_res;
1298	const struct nbpf_config *cfg;
1299	int num_channels;
1300	int ret, irq, eirq, i;
1301	int irqbuf[9] /* maximum 8 channels + error IRQ */;
1302	unsigned int irqs = 0;
1303
1304	BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
1305
1306	/* DT only */
1307	if (!np)
1308		return -ENODEV;
1309
1310	cfg = of_device_get_match_data(dev);
1311	num_channels = cfg->num_channels;
1312
1313	nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels),
1314			    GFP_KERNEL);
1315	if (!nbpf)
1316		return -ENOMEM;
1317
1318	dma_dev = &nbpf->dma_dev;
1319	dma_dev->dev = dev;
1320
1321	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1322	nbpf->base = devm_ioremap_resource(dev, iomem);
1323	if (IS_ERR(nbpf->base))
1324		return PTR_ERR(nbpf->base);
1325
1326	nbpf->clk = devm_clk_get(dev, NULL);
1327	if (IS_ERR(nbpf->clk))
1328		return PTR_ERR(nbpf->clk);
1329
1330	of_property_read_u32(np, "max-burst-mem-read",
1331			     &nbpf->max_burst_mem_read);
1332	of_property_read_u32(np, "max-burst-mem-write",
1333			     &nbpf->max_burst_mem_write);
1334
1335	nbpf->config = cfg;
1336
1337	for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
1338		irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1339		if (!irq_res)
1340			break;
1341
1342		for (irq = irq_res->start; irq <= irq_res->end;
1343		     irq++, irqs++)
1344			irqbuf[irqs] = irq;
1345	}
1346
1347	/*
1348	 * 3 IRQ resource schemes are supported:
1349	 * 1. 1 shared IRQ for error and all channels
1350	 * 2. 2 IRQs: one for error and one shared for all channels
1351	 * 3. 1 IRQ for error and an own IRQ for each channel
1352	 */
1353	if (irqs != 1 && irqs != 2 && irqs != num_channels + 1)
1354		return -ENXIO;
1355
1356	if (irqs == 1) {
1357		eirq = irqbuf[0];
1358
1359		for (i = 0; i <= num_channels; i++)
1360			nbpf->chan[i].irq = irqbuf[0];
1361	} else {
1362		eirq = platform_get_irq_byname(pdev, "error");
1363		if (eirq < 0)
1364			return eirq;
1365
1366		if (irqs == num_channels + 1) {
1367			struct nbpf_channel *chan;
1368
1369			for (i = 0, chan = nbpf->chan; i <= num_channels;
1370			     i++, chan++) {
1371				/* Skip the error IRQ */
1372				if (irqbuf[i] == eirq)
1373					i++;
1374				chan->irq = irqbuf[i];
1375			}
1376
1377			if (chan != nbpf->chan + num_channels)
1378				return -EINVAL;
1379		} else {
1380			/* 2 IRQs and more than one channel */
1381			if (irqbuf[0] == eirq)
1382				irq = irqbuf[1];
1383			else
1384				irq = irqbuf[0];
1385
1386			for (i = 0; i <= num_channels; i++)
1387				nbpf->chan[i].irq = irq;
1388		}
1389	}
1390
1391	ret = devm_request_irq(dev, eirq, nbpf_err_irq,
1392			       IRQF_SHARED, "dma error", nbpf);
1393	if (ret < 0)
1394		return ret;
1395	nbpf->eirq = eirq;
1396
1397	INIT_LIST_HEAD(&dma_dev->channels);
1398
1399	/* Create DMA Channel */
1400	for (i = 0; i < num_channels; i++) {
1401		ret = nbpf_chan_probe(nbpf, i);
1402		if (ret < 0)
1403			return ret;
1404	}
1405
1406	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1407	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1408	dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1409
1410	/* Common and MEMCPY operations */
1411	dma_dev->device_alloc_chan_resources
1412		= nbpf_alloc_chan_resources;
1413	dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
1414	dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
1415	dma_dev->device_tx_status = nbpf_tx_status;
1416	dma_dev->device_issue_pending = nbpf_issue_pending;
1417
1418	/*
1419	 * If we drop support for unaligned MEMCPY buffer addresses and / or
1420	 * lengths by setting
1421	 * dma_dev->copy_align = 4;
1422	 * then we can set transfer length to 4 bytes in nbpf_prep_one() for
1423	 * DMA_MEM_TO_MEM
1424	 */
1425
1426	/* Compulsory for DMA_SLAVE fields */
1427	dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
1428	dma_dev->device_config = nbpf_config;
1429	dma_dev->device_pause = nbpf_pause;
1430	dma_dev->device_terminate_all = nbpf_terminate_all;
1431
1432	dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1433	dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
1434	dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1435
1436	platform_set_drvdata(pdev, nbpf);
1437
1438	ret = clk_prepare_enable(nbpf->clk);
1439	if (ret < 0)
1440		return ret;
1441
1442	nbpf_configure(nbpf);
1443
1444	ret = dma_async_device_register(dma_dev);
1445	if (ret < 0)
1446		goto e_clk_off;
1447
1448	ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf);
1449	if (ret < 0)
1450		goto e_dma_dev_unreg;
1451
1452	return 0;
1453
1454e_dma_dev_unreg:
1455	dma_async_device_unregister(dma_dev);
1456e_clk_off:
1457	clk_disable_unprepare(nbpf->clk);
1458
1459	return ret;
1460}
1461
1462static int nbpf_remove(struct platform_device *pdev)
1463{
1464	struct nbpf_device *nbpf = platform_get_drvdata(pdev);
1465	int i;
1466
1467	devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
1468
1469	for (i = 0; i < nbpf->config->num_channels; i++) {
1470		struct nbpf_channel *chan = nbpf->chan + i;
1471
1472		devm_free_irq(&pdev->dev, chan->irq, chan);
1473
1474		tasklet_kill(&chan->tasklet);
1475	}
1476
1477	of_dma_controller_free(pdev->dev.of_node);
1478	dma_async_device_unregister(&nbpf->dma_dev);
1479	clk_disable_unprepare(nbpf->clk);
1480
1481	return 0;
1482}
1483
1484static const struct platform_device_id nbpf_ids[] = {
1485	{"nbpfaxi64dmac1b4",	(kernel_ulong_t)&nbpf_cfg[NBPF1B4]},
1486	{"nbpfaxi64dmac1b8",	(kernel_ulong_t)&nbpf_cfg[NBPF1B8]},
1487	{"nbpfaxi64dmac1b16",	(kernel_ulong_t)&nbpf_cfg[NBPF1B16]},
1488	{"nbpfaxi64dmac4b4",	(kernel_ulong_t)&nbpf_cfg[NBPF4B4]},
1489	{"nbpfaxi64dmac4b8",	(kernel_ulong_t)&nbpf_cfg[NBPF4B8]},
1490	{"nbpfaxi64dmac4b16",	(kernel_ulong_t)&nbpf_cfg[NBPF4B16]},
1491	{"nbpfaxi64dmac8b4",	(kernel_ulong_t)&nbpf_cfg[NBPF8B4]},
1492	{"nbpfaxi64dmac8b8",	(kernel_ulong_t)&nbpf_cfg[NBPF8B8]},
1493	{"nbpfaxi64dmac8b16",	(kernel_ulong_t)&nbpf_cfg[NBPF8B16]},
1494	{},
1495};
1496MODULE_DEVICE_TABLE(platform, nbpf_ids);
1497
1498#ifdef CONFIG_PM
1499static int nbpf_runtime_suspend(struct device *dev)
1500{
1501	struct nbpf_device *nbpf = dev_get_drvdata(dev);
1502	clk_disable_unprepare(nbpf->clk);
1503	return 0;
1504}
1505
1506static int nbpf_runtime_resume(struct device *dev)
1507{
1508	struct nbpf_device *nbpf = dev_get_drvdata(dev);
1509	return clk_prepare_enable(nbpf->clk);
1510}
1511#endif
1512
1513static const struct dev_pm_ops nbpf_pm_ops = {
1514	SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
1515};
1516
1517static struct platform_driver nbpf_driver = {
1518	.driver = {
1519		.name = "dma-nbpf",
1520		.of_match_table = nbpf_match,
1521		.pm = &nbpf_pm_ops,
1522	},
1523	.id_table = nbpf_ids,
1524	.probe = nbpf_probe,
1525	.remove = nbpf_remove,
1526};
1527
1528module_platform_driver(nbpf_driver);
1529
1530MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1531MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs");
1532MODULE_LICENSE("GPL v2");