Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2// Copyright (C) 2017 Broadcom
   3
   4/*
   5 * Broadcom SBA RAID Driver
   6 *
   7 * The Broadcom stream buffer accelerator (SBA) provides offloading
   8 * capabilities for RAID operations. The SBA offload engine is accessible
   9 * via Broadcom SoC specific ring manager. Two or more offload engines
  10 * can share same Broadcom SoC specific ring manager due to this Broadcom
  11 * SoC specific ring manager driver is implemented as a mailbox controller
  12 * driver and offload engine drivers are implemented as mallbox clients.
  13 *
  14 * Typically, Broadcom SoC specific ring manager will implement larger
  15 * number of hardware rings over one or more SBA hardware devices. By
  16 * design, the internal buffer size of SBA hardware device is limited
  17 * but all offload operations supported by SBA can be broken down into
  18 * multiple small size requests and executed parallely on multiple SBA
  19 * hardware devices for achieving high through-put.
  20 *
  21 * The Broadcom SBA RAID driver does not require any register programming
  22 * except submitting request to SBA hardware device via mailbox channels.
  23 * This driver implements a DMA device with one DMA channel using a single
  24 * mailbox channel provided by Broadcom SoC specific ring manager driver.
  25 * For having more SBA DMA channels, we can create more SBA device nodes
  26 * in Broadcom SoC specific DTS based on number of hardware rings supported
  27 * by Broadcom SoC ring manager.
  28 */
  29
  30#include <linux/bitops.h>
  31#include <linux/debugfs.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/dmaengine.h>
  34#include <linux/list.h>
  35#include <linux/mailbox_client.h>
  36#include <linux/mailbox/brcm-message.h>
  37#include <linux/module.h>
  38#include <linux/of_device.h>
  39#include <linux/slab.h>
  40#include <linux/raid/pq.h>
  41
  42#include "dmaengine.h"
  43
  44/* ====== Driver macros and defines ===== */
  45
  46#define SBA_TYPE_SHIFT					48
  47#define SBA_TYPE_MASK					GENMASK(1, 0)
  48#define SBA_TYPE_A					0x0
  49#define SBA_TYPE_B					0x2
  50#define SBA_TYPE_C					0x3
  51#define SBA_USER_DEF_SHIFT				32
  52#define SBA_USER_DEF_MASK				GENMASK(15, 0)
  53#define SBA_R_MDATA_SHIFT				24
  54#define SBA_R_MDATA_MASK				GENMASK(7, 0)
  55#define SBA_C_MDATA_MS_SHIFT				18
  56#define SBA_C_MDATA_MS_MASK				GENMASK(1, 0)
  57#define SBA_INT_SHIFT					17
  58#define SBA_INT_MASK					BIT(0)
  59#define SBA_RESP_SHIFT					16
  60#define SBA_RESP_MASK					BIT(0)
  61#define SBA_C_MDATA_SHIFT				8
  62#define SBA_C_MDATA_MASK				GENMASK(7, 0)
  63#define SBA_C_MDATA_BNUMx_SHIFT(__bnum)			(2 * (__bnum))
  64#define SBA_C_MDATA_BNUMx_MASK				GENMASK(1, 0)
  65#define SBA_C_MDATA_DNUM_SHIFT				5
  66#define SBA_C_MDATA_DNUM_MASK				GENMASK(4, 0)
  67#define SBA_C_MDATA_LS(__v)				((__v) & 0xff)
  68#define SBA_C_MDATA_MS(__v)				(((__v) >> 8) & 0x3)
  69#define SBA_CMD_SHIFT					0
  70#define SBA_CMD_MASK					GENMASK(3, 0)
  71#define SBA_CMD_ZERO_BUFFER				0x4
  72#define SBA_CMD_ZERO_ALL_BUFFERS			0x8
  73#define SBA_CMD_LOAD_BUFFER				0x9
  74#define SBA_CMD_XOR					0xa
  75#define SBA_CMD_GALOIS_XOR				0xb
  76#define SBA_CMD_WRITE_BUFFER				0xc
  77#define SBA_CMD_GALOIS					0xe
  78
  79#define SBA_MAX_REQ_PER_MBOX_CHANNEL			8192
  80#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL		8
  81
  82/* Driver helper macros */
  83#define to_sba_request(tx)		\
  84	container_of(tx, struct sba_request, tx)
  85#define to_sba_device(dchan)		\
  86	container_of(dchan, struct sba_device, dma_chan)
  87
  88/* ===== Driver data structures ===== */
  89
  90enum sba_request_flags {
  91	SBA_REQUEST_STATE_FREE		= 0x001,
  92	SBA_REQUEST_STATE_ALLOCED	= 0x002,
  93	SBA_REQUEST_STATE_PENDING	= 0x004,
  94	SBA_REQUEST_STATE_ACTIVE	= 0x008,
  95	SBA_REQUEST_STATE_ABORTED	= 0x010,
  96	SBA_REQUEST_STATE_MASK		= 0x0ff,
  97	SBA_REQUEST_FENCE		= 0x100,
  98};
  99
 100struct sba_request {
 101	/* Global state */
 102	struct list_head node;
 103	struct sba_device *sba;
 104	u32 flags;
 105	/* Chained requests management */
 106	struct sba_request *first;
 107	struct list_head next;
 108	atomic_t next_pending_count;
 109	/* BRCM message data */
 110	struct brcm_message msg;
 111	struct dma_async_tx_descriptor tx;
 112	/* SBA commands */
 113	struct brcm_sba_command cmds[];
 114};
 115
 116enum sba_version {
 117	SBA_VER_1 = 0,
 118	SBA_VER_2
 119};
 120
 121struct sba_device {
 122	/* Underlying device */
 123	struct device *dev;
 124	/* DT configuration parameters */
 125	enum sba_version ver;
 126	/* Derived configuration parameters */
 127	u32 max_req;
 128	u32 hw_buf_size;
 129	u32 hw_resp_size;
 130	u32 max_pq_coefs;
 131	u32 max_pq_srcs;
 132	u32 max_cmd_per_req;
 133	u32 max_xor_srcs;
 134	u32 max_resp_pool_size;
 135	u32 max_cmds_pool_size;
 136	/* Maibox client and Mailbox channels */
 137	struct mbox_client client;
 138	struct mbox_chan *mchan;
 139	struct device *mbox_dev;
 140	/* DMA device and DMA channel */
 141	struct dma_device dma_dev;
 142	struct dma_chan dma_chan;
 143	/* DMA channel resources */
 144	void *resp_base;
 145	dma_addr_t resp_dma_base;
 146	void *cmds_base;
 147	dma_addr_t cmds_dma_base;
 148	spinlock_t reqs_lock;
 149	bool reqs_fence;
 150	struct list_head reqs_alloc_list;
 151	struct list_head reqs_pending_list;
 152	struct list_head reqs_active_list;
 153	struct list_head reqs_aborted_list;
 154	struct list_head reqs_free_list;
 155	/* DebugFS directory entries */
 156	struct dentry *root;
 157};
 158
 159/* ====== Command helper routines ===== */
 160
 161static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
 162{
 163	cmd &= ~((u64)mask << shift);
 164	cmd |= ((u64)(val & mask) << shift);
 165	return cmd;
 166}
 167
 168static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
 169{
 170	return b0 & SBA_C_MDATA_BNUMx_MASK;
 171}
 172
 173static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
 174{
 175	return b0 & SBA_C_MDATA_BNUMx_MASK;
 176}
 177
 178static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
 179{
 180	return (b0 & SBA_C_MDATA_BNUMx_MASK) |
 181	       ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
 182}
 183
 184static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
 185{
 186	return (b0 & SBA_C_MDATA_BNUMx_MASK) |
 187	       ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
 188	       ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
 189}
 190
 191/* ====== General helper routines ===== */
 192
 193static struct sba_request *sba_alloc_request(struct sba_device *sba)
 194{
 195	bool found = false;
 196	unsigned long flags;
 197	struct sba_request *req = NULL;
 198
 199	spin_lock_irqsave(&sba->reqs_lock, flags);
 200	list_for_each_entry(req, &sba->reqs_free_list, node) {
 201		if (async_tx_test_ack(&req->tx)) {
 202			list_move_tail(&req->node, &sba->reqs_alloc_list);
 203			found = true;
 204			break;
 205		}
 206	}
 207	spin_unlock_irqrestore(&sba->reqs_lock, flags);
 208
 209	if (!found) {
 210		/*
 211		 * We have no more free requests so, we peek
 212		 * mailbox channels hoping few active requests
 213		 * would have completed which will create more
 214		 * room for new requests.
 215		 */
 216		mbox_client_peek_data(sba->mchan);
 217		return NULL;
 218	}
 219
 220	req->flags = SBA_REQUEST_STATE_ALLOCED;
 221	req->first = req;
 222	INIT_LIST_HEAD(&req->next);
 223	atomic_set(&req->next_pending_count, 1);
 224
 225	dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
 226	async_tx_ack(&req->tx);
 227
 228	return req;
 229}
 230
 231/* Note: Must be called with sba->reqs_lock held */
 232static void _sba_pending_request(struct sba_device *sba,
 233				 struct sba_request *req)
 234{
 235	lockdep_assert_held(&sba->reqs_lock);
 236	req->flags &= ~SBA_REQUEST_STATE_MASK;
 237	req->flags |= SBA_REQUEST_STATE_PENDING;
 238	list_move_tail(&req->node, &sba->reqs_pending_list);
 239	if (list_empty(&sba->reqs_active_list))
 240		sba->reqs_fence = false;
 241}
 242
 243/* Note: Must be called with sba->reqs_lock held */
 244static bool _sba_active_request(struct sba_device *sba,
 245				struct sba_request *req)
 246{
 247	lockdep_assert_held(&sba->reqs_lock);
 248	if (list_empty(&sba->reqs_active_list))
 249		sba->reqs_fence = false;
 250	if (sba->reqs_fence)
 251		return false;
 252	req->flags &= ~SBA_REQUEST_STATE_MASK;
 253	req->flags |= SBA_REQUEST_STATE_ACTIVE;
 254	list_move_tail(&req->node, &sba->reqs_active_list);
 255	if (req->flags & SBA_REQUEST_FENCE)
 256		sba->reqs_fence = true;
 257	return true;
 258}
 259
 260/* Note: Must be called with sba->reqs_lock held */
 261static void _sba_abort_request(struct sba_device *sba,
 262			       struct sba_request *req)
 263{
 264	lockdep_assert_held(&sba->reqs_lock);
 265	req->flags &= ~SBA_REQUEST_STATE_MASK;
 266	req->flags |= SBA_REQUEST_STATE_ABORTED;
 267	list_move_tail(&req->node, &sba->reqs_aborted_list);
 268	if (list_empty(&sba->reqs_active_list))
 269		sba->reqs_fence = false;
 270}
 271
 272/* Note: Must be called with sba->reqs_lock held */
 273static void _sba_free_request(struct sba_device *sba,
 274			      struct sba_request *req)
 275{
 276	lockdep_assert_held(&sba->reqs_lock);
 277	req->flags &= ~SBA_REQUEST_STATE_MASK;
 278	req->flags |= SBA_REQUEST_STATE_FREE;
 279	list_move_tail(&req->node, &sba->reqs_free_list);
 280	if (list_empty(&sba->reqs_active_list))
 281		sba->reqs_fence = false;
 282}
 283
 284static void sba_free_chained_requests(struct sba_request *req)
 285{
 286	unsigned long flags;
 287	struct sba_request *nreq;
 288	struct sba_device *sba = req->sba;
 289
 290	spin_lock_irqsave(&sba->reqs_lock, flags);
 291
 292	_sba_free_request(sba, req);
 293	list_for_each_entry(nreq, &req->next, next)
 294		_sba_free_request(sba, nreq);
 295
 296	spin_unlock_irqrestore(&sba->reqs_lock, flags);
 297}
 298
 299static void sba_chain_request(struct sba_request *first,
 300			      struct sba_request *req)
 301{
 302	unsigned long flags;
 303	struct sba_device *sba = req->sba;
 304
 305	spin_lock_irqsave(&sba->reqs_lock, flags);
 306
 307	list_add_tail(&req->next, &first->next);
 308	req->first = first;
 309	atomic_inc(&first->next_pending_count);
 310
 311	spin_unlock_irqrestore(&sba->reqs_lock, flags);
 312}
 313
 314static void sba_cleanup_nonpending_requests(struct sba_device *sba)
 315{
 316	unsigned long flags;
 317	struct sba_request *req, *req1;
 318
 319	spin_lock_irqsave(&sba->reqs_lock, flags);
 320
 321	/* Freeup all alloced request */
 322	list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
 323		_sba_free_request(sba, req);
 324
 325	/* Set all active requests as aborted */
 326	list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
 327		_sba_abort_request(sba, req);
 328
 329	/*
 330	 * Note: We expect that aborted request will be eventually
 331	 * freed by sba_receive_message()
 332	 */
 333
 334	spin_unlock_irqrestore(&sba->reqs_lock, flags);
 335}
 336
 337static void sba_cleanup_pending_requests(struct sba_device *sba)
 338{
 339	unsigned long flags;
 340	struct sba_request *req, *req1;
 341
 342	spin_lock_irqsave(&sba->reqs_lock, flags);
 343
 344	/* Freeup all pending request */
 345	list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
 346		_sba_free_request(sba, req);
 347
 348	spin_unlock_irqrestore(&sba->reqs_lock, flags);
 349}
 350
 351static int sba_send_mbox_request(struct sba_device *sba,
 352				 struct sba_request *req)
 353{
 354	int ret = 0;
 355
 356	/* Send message for the request */
 357	req->msg.error = 0;
 358	ret = mbox_send_message(sba->mchan, &req->msg);
 359	if (ret < 0) {
 360		dev_err(sba->dev, "send message failed with error %d", ret);
 361		return ret;
 362	}
 363
 364	/* Check error returned by mailbox controller */
 365	ret = req->msg.error;
 366	if (ret < 0) {
 367		dev_err(sba->dev, "message error %d", ret);
 368	}
 369
 370	/* Signal txdone for mailbox channel */
 371	mbox_client_txdone(sba->mchan, ret);
 372
 373	return ret;
 374}
 375
 376/* Note: Must be called with sba->reqs_lock held */
 377static void _sba_process_pending_requests(struct sba_device *sba)
 378{
 379	int ret;
 380	u32 count;
 381	struct sba_request *req;
 382
 383	/* Process few pending requests */
 384	count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
 385	while (!list_empty(&sba->reqs_pending_list) && count) {
 386		/* Get the first pending request */
 387		req = list_first_entry(&sba->reqs_pending_list,
 388				       struct sba_request, node);
 389
 390		/* Try to make request active */
 391		if (!_sba_active_request(sba, req))
 392			break;
 393
 394		/* Send request to mailbox channel */
 395		ret = sba_send_mbox_request(sba, req);
 396		if (ret < 0) {
 397			_sba_pending_request(sba, req);
 398			break;
 399		}
 400
 401		count--;
 402	}
 403}
 404
 405static void sba_process_received_request(struct sba_device *sba,
 406					 struct sba_request *req)
 407{
 408	unsigned long flags;
 409	struct dma_async_tx_descriptor *tx;
 410	struct sba_request *nreq, *first = req->first;
 411
 412	/* Process only after all chained requests are received */
 413	if (!atomic_dec_return(&first->next_pending_count)) {
 414		tx = &first->tx;
 415
 416		WARN_ON(tx->cookie < 0);
 417		if (tx->cookie > 0) {
 418			spin_lock_irqsave(&sba->reqs_lock, flags);
 419			dma_cookie_complete(tx);
 420			spin_unlock_irqrestore(&sba->reqs_lock, flags);
 421			dmaengine_desc_get_callback_invoke(tx, NULL);
 422			dma_descriptor_unmap(tx);
 423			tx->callback = NULL;
 424			tx->callback_result = NULL;
 425		}
 426
 427		dma_run_dependencies(tx);
 428
 429		spin_lock_irqsave(&sba->reqs_lock, flags);
 430
 431		/* Free all requests chained to first request */
 432		list_for_each_entry(nreq, &first->next, next)
 433			_sba_free_request(sba, nreq);
 434		INIT_LIST_HEAD(&first->next);
 435
 436		/* Free the first request */
 437		_sba_free_request(sba, first);
 438
 439		/* Process pending requests */
 440		_sba_process_pending_requests(sba);
 441
 442		spin_unlock_irqrestore(&sba->reqs_lock, flags);
 443	}
 444}
 445
 446static void sba_write_stats_in_seqfile(struct sba_device *sba,
 447				       struct seq_file *file)
 448{
 449	unsigned long flags;
 450	struct sba_request *req;
 451	u32 free_count = 0, alloced_count = 0;
 452	u32 pending_count = 0, active_count = 0, aborted_count = 0;
 453
 454	spin_lock_irqsave(&sba->reqs_lock, flags);
 455
 456	list_for_each_entry(req, &sba->reqs_free_list, node)
 457		if (async_tx_test_ack(&req->tx))
 458			free_count++;
 459
 460	list_for_each_entry(req, &sba->reqs_alloc_list, node)
 461		alloced_count++;
 462
 463	list_for_each_entry(req, &sba->reqs_pending_list, node)
 464		pending_count++;
 465
 466	list_for_each_entry(req, &sba->reqs_active_list, node)
 467		active_count++;
 468
 469	list_for_each_entry(req, &sba->reqs_aborted_list, node)
 470		aborted_count++;
 471
 472	spin_unlock_irqrestore(&sba->reqs_lock, flags);
 473
 474	seq_printf(file, "maximum requests   = %d\n", sba->max_req);
 475	seq_printf(file, "free requests      = %d\n", free_count);
 476	seq_printf(file, "alloced requests   = %d\n", alloced_count);
 477	seq_printf(file, "pending requests   = %d\n", pending_count);
 478	seq_printf(file, "active requests    = %d\n", active_count);
 479	seq_printf(file, "aborted requests   = %d\n", aborted_count);
 480}
 481
 482/* ====== DMAENGINE callbacks ===== */
 483
 484static void sba_free_chan_resources(struct dma_chan *dchan)
 485{
 486	/*
 487	 * Channel resources are pre-alloced so we just free-up
 488	 * whatever we can so that we can re-use pre-alloced
 489	 * channel resources next time.
 490	 */
 491	sba_cleanup_nonpending_requests(to_sba_device(dchan));
 492}
 493
 494static int sba_device_terminate_all(struct dma_chan *dchan)
 495{
 496	/* Cleanup all pending requests */
 497	sba_cleanup_pending_requests(to_sba_device(dchan));
 498
 499	return 0;
 500}
 501
 502static void sba_issue_pending(struct dma_chan *dchan)
 503{
 504	unsigned long flags;
 505	struct sba_device *sba = to_sba_device(dchan);
 506
 507	/* Process pending requests */
 508	spin_lock_irqsave(&sba->reqs_lock, flags);
 509	_sba_process_pending_requests(sba);
 510	spin_unlock_irqrestore(&sba->reqs_lock, flags);
 511}
 512
 513static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
 514{
 515	unsigned long flags;
 516	dma_cookie_t cookie;
 517	struct sba_device *sba;
 518	struct sba_request *req, *nreq;
 519
 520	if (unlikely(!tx))
 521		return -EINVAL;
 522
 523	sba = to_sba_device(tx->chan);
 524	req = to_sba_request(tx);
 525
 526	/* Assign cookie and mark all chained requests pending */
 527	spin_lock_irqsave(&sba->reqs_lock, flags);
 528	cookie = dma_cookie_assign(tx);
 529	_sba_pending_request(sba, req);
 530	list_for_each_entry(nreq, &req->next, next)
 531		_sba_pending_request(sba, nreq);
 532	spin_unlock_irqrestore(&sba->reqs_lock, flags);
 533
 534	return cookie;
 535}
 536
 537static enum dma_status sba_tx_status(struct dma_chan *dchan,
 538				     dma_cookie_t cookie,
 539				     struct dma_tx_state *txstate)
 540{
 541	enum dma_status ret;
 542	struct sba_device *sba = to_sba_device(dchan);
 543
 544	ret = dma_cookie_status(dchan, cookie, txstate);
 545	if (ret == DMA_COMPLETE)
 546		return ret;
 547
 548	mbox_client_peek_data(sba->mchan);
 549
 550	return dma_cookie_status(dchan, cookie, txstate);
 551}
 552
 553static void sba_fillup_interrupt_msg(struct sba_request *req,
 554				     struct brcm_sba_command *cmds,
 555				     struct brcm_message *msg)
 556{
 557	u64 cmd;
 558	u32 c_mdata;
 559	dma_addr_t resp_dma = req->tx.phys;
 560	struct brcm_sba_command *cmdsp = cmds;
 561
 562	/* Type-B command to load dummy data into buf0 */
 563	cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
 564			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 565	cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
 566			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 567	c_mdata = sba_cmd_load_c_mdata(0);
 568	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 569			  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 570	cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
 571			  SBA_CMD_SHIFT, SBA_CMD_MASK);
 572	cmdsp->cmd = cmd;
 573	*cmdsp->cmd_dma = cpu_to_le64(cmd);
 574	cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
 575	cmdsp->data = resp_dma;
 576	cmdsp->data_len = req->sba->hw_resp_size;
 577	cmdsp++;
 578
 579	/* Type-A command to write buf0 to dummy location */
 580	cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
 581			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 582	cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
 583			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 584	cmd = sba_cmd_enc(cmd, 0x1,
 585			  SBA_RESP_SHIFT, SBA_RESP_MASK);
 586	c_mdata = sba_cmd_write_c_mdata(0);
 587	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 588			  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 589	cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
 590			  SBA_CMD_SHIFT, SBA_CMD_MASK);
 591	cmdsp->cmd = cmd;
 592	*cmdsp->cmd_dma = cpu_to_le64(cmd);
 593	cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
 594	if (req->sba->hw_resp_size) {
 595		cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
 596		cmdsp->resp = resp_dma;
 597		cmdsp->resp_len = req->sba->hw_resp_size;
 598	}
 599	cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
 600	cmdsp->data = resp_dma;
 601	cmdsp->data_len = req->sba->hw_resp_size;
 602	cmdsp++;
 603
 604	/* Fillup brcm_message */
 605	msg->type = BRCM_MESSAGE_SBA;
 606	msg->sba.cmds = cmds;
 607	msg->sba.cmds_count = cmdsp - cmds;
 608	msg->ctx = req;
 609	msg->error = 0;
 610}
 611
 612static struct dma_async_tx_descriptor *
 613sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
 614{
 615	struct sba_request *req = NULL;
 616	struct sba_device *sba = to_sba_device(dchan);
 617
 618	/* Alloc new request */
 619	req = sba_alloc_request(sba);
 620	if (!req)
 621		return NULL;
 622
 623	/*
 624	 * Force fence so that no requests are submitted
 625	 * until DMA callback for this request is invoked.
 626	 */
 627	req->flags |= SBA_REQUEST_FENCE;
 628
 629	/* Fillup request message */
 630	sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
 631
 632	/* Init async_tx descriptor */
 633	req->tx.flags = flags;
 634	req->tx.cookie = -EBUSY;
 635
 636	return &req->tx;
 637}
 638
 639static void sba_fillup_memcpy_msg(struct sba_request *req,
 640				  struct brcm_sba_command *cmds,
 641				  struct brcm_message *msg,
 642				  dma_addr_t msg_offset, size_t msg_len,
 643				  dma_addr_t dst, dma_addr_t src)
 644{
 645	u64 cmd;
 646	u32 c_mdata;
 647	dma_addr_t resp_dma = req->tx.phys;
 648	struct brcm_sba_command *cmdsp = cmds;
 649
 650	/* Type-B command to load data into buf0 */
 651	cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
 652			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 653	cmd = sba_cmd_enc(cmd, msg_len,
 654			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 655	c_mdata = sba_cmd_load_c_mdata(0);
 656	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 657			  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 658	cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
 659			  SBA_CMD_SHIFT, SBA_CMD_MASK);
 660	cmdsp->cmd = cmd;
 661	*cmdsp->cmd_dma = cpu_to_le64(cmd);
 662	cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
 663	cmdsp->data = src + msg_offset;
 664	cmdsp->data_len = msg_len;
 665	cmdsp++;
 666
 667	/* Type-A command to write buf0 */
 668	cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
 669			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 670	cmd = sba_cmd_enc(cmd, msg_len,
 671			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 672	cmd = sba_cmd_enc(cmd, 0x1,
 673			  SBA_RESP_SHIFT, SBA_RESP_MASK);
 674	c_mdata = sba_cmd_write_c_mdata(0);
 675	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 676			  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 677	cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
 678			  SBA_CMD_SHIFT, SBA_CMD_MASK);
 679	cmdsp->cmd = cmd;
 680	*cmdsp->cmd_dma = cpu_to_le64(cmd);
 681	cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
 682	if (req->sba->hw_resp_size) {
 683		cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
 684		cmdsp->resp = resp_dma;
 685		cmdsp->resp_len = req->sba->hw_resp_size;
 686	}
 687	cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
 688	cmdsp->data = dst + msg_offset;
 689	cmdsp->data_len = msg_len;
 690	cmdsp++;
 691
 692	/* Fillup brcm_message */
 693	msg->type = BRCM_MESSAGE_SBA;
 694	msg->sba.cmds = cmds;
 695	msg->sba.cmds_count = cmdsp - cmds;
 696	msg->ctx = req;
 697	msg->error = 0;
 698}
 699
 700static struct sba_request *
 701sba_prep_dma_memcpy_req(struct sba_device *sba,
 702			dma_addr_t off, dma_addr_t dst, dma_addr_t src,
 703			size_t len, unsigned long flags)
 704{
 705	struct sba_request *req = NULL;
 706
 707	/* Alloc new request */
 708	req = sba_alloc_request(sba);
 709	if (!req)
 710		return NULL;
 711	if (flags & DMA_PREP_FENCE)
 712		req->flags |= SBA_REQUEST_FENCE;
 713
 714	/* Fillup request message */
 715	sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
 716			      off, len, dst, src);
 717
 718	/* Init async_tx descriptor */
 719	req->tx.flags = flags;
 720	req->tx.cookie = -EBUSY;
 721
 722	return req;
 723}
 724
 725static struct dma_async_tx_descriptor *
 726sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
 727		    size_t len, unsigned long flags)
 728{
 729	size_t req_len;
 730	dma_addr_t off = 0;
 731	struct sba_device *sba = to_sba_device(dchan);
 732	struct sba_request *first = NULL, *req;
 733
 734	/* Create chained requests where each request is upto hw_buf_size */
 735	while (len) {
 736		req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
 737
 738		req = sba_prep_dma_memcpy_req(sba, off, dst, src,
 739					      req_len, flags);
 740		if (!req) {
 741			if (first)
 742				sba_free_chained_requests(first);
 743			return NULL;
 744		}
 745
 746		if (first)
 747			sba_chain_request(first, req);
 748		else
 749			first = req;
 750
 751		off += req_len;
 752		len -= req_len;
 753	}
 754
 755	return (first) ? &first->tx : NULL;
 756}
 757
 758static void sba_fillup_xor_msg(struct sba_request *req,
 759				struct brcm_sba_command *cmds,
 760				struct brcm_message *msg,
 761				dma_addr_t msg_offset, size_t msg_len,
 762				dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
 763{
 764	u64 cmd;
 765	u32 c_mdata;
 766	unsigned int i;
 767	dma_addr_t resp_dma = req->tx.phys;
 768	struct brcm_sba_command *cmdsp = cmds;
 769
 770	/* Type-B command to load data into buf0 */
 771	cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
 772			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 773	cmd = sba_cmd_enc(cmd, msg_len,
 774			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 775	c_mdata = sba_cmd_load_c_mdata(0);
 776	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 777			  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 778	cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
 779			  SBA_CMD_SHIFT, SBA_CMD_MASK);
 780	cmdsp->cmd = cmd;
 781	*cmdsp->cmd_dma = cpu_to_le64(cmd);
 782	cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
 783	cmdsp->data = src[0] + msg_offset;
 784	cmdsp->data_len = msg_len;
 785	cmdsp++;
 786
 787	/* Type-B commands to xor data with buf0 and put it back in buf0 */
 788	for (i = 1; i < src_cnt; i++) {
 789		cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
 790				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 791		cmd = sba_cmd_enc(cmd, msg_len,
 792				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 793		c_mdata = sba_cmd_xor_c_mdata(0, 0);
 794		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 795				  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 796		cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
 797				  SBA_CMD_SHIFT, SBA_CMD_MASK);
 798		cmdsp->cmd = cmd;
 799		*cmdsp->cmd_dma = cpu_to_le64(cmd);
 800		cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
 801		cmdsp->data = src[i] + msg_offset;
 802		cmdsp->data_len = msg_len;
 803		cmdsp++;
 804	}
 805
 806	/* Type-A command to write buf0 */
 807	cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
 808			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 809	cmd = sba_cmd_enc(cmd, msg_len,
 810			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 811	cmd = sba_cmd_enc(cmd, 0x1,
 812			  SBA_RESP_SHIFT, SBA_RESP_MASK);
 813	c_mdata = sba_cmd_write_c_mdata(0);
 814	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 815			  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 816	cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
 817			  SBA_CMD_SHIFT, SBA_CMD_MASK);
 818	cmdsp->cmd = cmd;
 819	*cmdsp->cmd_dma = cpu_to_le64(cmd);
 820	cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
 821	if (req->sba->hw_resp_size) {
 822		cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
 823		cmdsp->resp = resp_dma;
 824		cmdsp->resp_len = req->sba->hw_resp_size;
 825	}
 826	cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
 827	cmdsp->data = dst + msg_offset;
 828	cmdsp->data_len = msg_len;
 829	cmdsp++;
 830
 831	/* Fillup brcm_message */
 832	msg->type = BRCM_MESSAGE_SBA;
 833	msg->sba.cmds = cmds;
 834	msg->sba.cmds_count = cmdsp - cmds;
 835	msg->ctx = req;
 836	msg->error = 0;
 837}
 838
 839static struct sba_request *
 840sba_prep_dma_xor_req(struct sba_device *sba,
 841		     dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
 842		     u32 src_cnt, size_t len, unsigned long flags)
 843{
 844	struct sba_request *req = NULL;
 845
 846	/* Alloc new request */
 847	req = sba_alloc_request(sba);
 848	if (!req)
 849		return NULL;
 850	if (flags & DMA_PREP_FENCE)
 851		req->flags |= SBA_REQUEST_FENCE;
 852
 853	/* Fillup request message */
 854	sba_fillup_xor_msg(req, req->cmds, &req->msg,
 855			   off, len, dst, src, src_cnt);
 856
 857	/* Init async_tx descriptor */
 858	req->tx.flags = flags;
 859	req->tx.cookie = -EBUSY;
 860
 861	return req;
 862}
 863
 864static struct dma_async_tx_descriptor *
 865sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
 866		 u32 src_cnt, size_t len, unsigned long flags)
 867{
 868	size_t req_len;
 869	dma_addr_t off = 0;
 870	struct sba_device *sba = to_sba_device(dchan);
 871	struct sba_request *first = NULL, *req;
 872
 873	/* Sanity checks */
 874	if (unlikely(src_cnt > sba->max_xor_srcs))
 875		return NULL;
 876
 877	/* Create chained requests where each request is upto hw_buf_size */
 878	while (len) {
 879		req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
 880
 881		req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
 882					   req_len, flags);
 883		if (!req) {
 884			if (first)
 885				sba_free_chained_requests(first);
 886			return NULL;
 887		}
 888
 889		if (first)
 890			sba_chain_request(first, req);
 891		else
 892			first = req;
 893
 894		off += req_len;
 895		len -= req_len;
 896	}
 897
 898	return (first) ? &first->tx : NULL;
 899}
 900
 901static void sba_fillup_pq_msg(struct sba_request *req,
 902				bool pq_continue,
 903				struct brcm_sba_command *cmds,
 904				struct brcm_message *msg,
 905				dma_addr_t msg_offset, size_t msg_len,
 906				dma_addr_t *dst_p, dma_addr_t *dst_q,
 907				const u8 *scf, dma_addr_t *src, u32 src_cnt)
 908{
 909	u64 cmd;
 910	u32 c_mdata;
 911	unsigned int i;
 912	dma_addr_t resp_dma = req->tx.phys;
 913	struct brcm_sba_command *cmdsp = cmds;
 914
 915	if (pq_continue) {
 916		/* Type-B command to load old P into buf0 */
 917		if (dst_p) {
 918			cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
 919				SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 920			cmd = sba_cmd_enc(cmd, msg_len,
 921				SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 922			c_mdata = sba_cmd_load_c_mdata(0);
 923			cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 924				SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 925			cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
 926				SBA_CMD_SHIFT, SBA_CMD_MASK);
 927			cmdsp->cmd = cmd;
 928			*cmdsp->cmd_dma = cpu_to_le64(cmd);
 929			cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
 930			cmdsp->data = *dst_p + msg_offset;
 931			cmdsp->data_len = msg_len;
 932			cmdsp++;
 933		}
 934
 935		/* Type-B command to load old Q into buf1 */
 936		if (dst_q) {
 937			cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
 938				SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 939			cmd = sba_cmd_enc(cmd, msg_len,
 940				SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 941			c_mdata = sba_cmd_load_c_mdata(1);
 942			cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 943				SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 944			cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
 945				SBA_CMD_SHIFT, SBA_CMD_MASK);
 946			cmdsp->cmd = cmd;
 947			*cmdsp->cmd_dma = cpu_to_le64(cmd);
 948			cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
 949			cmdsp->data = *dst_q + msg_offset;
 950			cmdsp->data_len = msg_len;
 951			cmdsp++;
 952		}
 953	} else {
 954		/* Type-A command to zero all buffers */
 955		cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
 956				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 957		cmd = sba_cmd_enc(cmd, msg_len,
 958				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 959		cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
 960				  SBA_CMD_SHIFT, SBA_CMD_MASK);
 961		cmdsp->cmd = cmd;
 962		*cmdsp->cmd_dma = cpu_to_le64(cmd);
 963		cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
 964		cmdsp++;
 965	}
 966
 967	/* Type-B commands for generate P onto buf0 and Q onto buf1 */
 968	for (i = 0; i < src_cnt; i++) {
 969		cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
 970				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 971		cmd = sba_cmd_enc(cmd, msg_len,
 972				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 973		c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
 974		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 975				  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 976		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
 977				  SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
 978		cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
 979				  SBA_CMD_SHIFT, SBA_CMD_MASK);
 980		cmdsp->cmd = cmd;
 981		*cmdsp->cmd_dma = cpu_to_le64(cmd);
 982		cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
 983		cmdsp->data = src[i] + msg_offset;
 984		cmdsp->data_len = msg_len;
 985		cmdsp++;
 986	}
 987
 988	/* Type-A command to write buf0 */
 989	if (dst_p) {
 990		cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
 991				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
 992		cmd = sba_cmd_enc(cmd, msg_len,
 993				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
 994		cmd = sba_cmd_enc(cmd, 0x1,
 995				  SBA_RESP_SHIFT, SBA_RESP_MASK);
 996		c_mdata = sba_cmd_write_c_mdata(0);
 997		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
 998				  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
 999		cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1000				  SBA_CMD_SHIFT, SBA_CMD_MASK);
1001		cmdsp->cmd = cmd;
1002		*cmdsp->cmd_dma = cpu_to_le64(cmd);
1003		cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1004		if (req->sba->hw_resp_size) {
1005			cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1006			cmdsp->resp = resp_dma;
1007			cmdsp->resp_len = req->sba->hw_resp_size;
1008		}
1009		cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1010		cmdsp->data = *dst_p + msg_offset;
1011		cmdsp->data_len = msg_len;
1012		cmdsp++;
1013	}
1014
1015	/* Type-A command to write buf1 */
1016	if (dst_q) {
1017		cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1018				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1019		cmd = sba_cmd_enc(cmd, msg_len,
1020				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1021		cmd = sba_cmd_enc(cmd, 0x1,
1022				  SBA_RESP_SHIFT, SBA_RESP_MASK);
1023		c_mdata = sba_cmd_write_c_mdata(1);
1024		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1025				  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1026		cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1027				  SBA_CMD_SHIFT, SBA_CMD_MASK);
1028		cmdsp->cmd = cmd;
1029		*cmdsp->cmd_dma = cpu_to_le64(cmd);
1030		cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1031		if (req->sba->hw_resp_size) {
1032			cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1033			cmdsp->resp = resp_dma;
1034			cmdsp->resp_len = req->sba->hw_resp_size;
1035		}
1036		cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1037		cmdsp->data = *dst_q + msg_offset;
1038		cmdsp->data_len = msg_len;
1039		cmdsp++;
1040	}
1041
1042	/* Fillup brcm_message */
1043	msg->type = BRCM_MESSAGE_SBA;
1044	msg->sba.cmds = cmds;
1045	msg->sba.cmds_count = cmdsp - cmds;
1046	msg->ctx = req;
1047	msg->error = 0;
1048}
1049
1050static struct sba_request *
1051sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
1052		    dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
1053		    u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1054{
1055	struct sba_request *req = NULL;
1056
1057	/* Alloc new request */
1058	req = sba_alloc_request(sba);
1059	if (!req)
1060		return NULL;
1061	if (flags & DMA_PREP_FENCE)
1062		req->flags |= SBA_REQUEST_FENCE;
1063
1064	/* Fillup request messages */
1065	sba_fillup_pq_msg(req, dmaf_continue(flags),
1066			  req->cmds, &req->msg,
1067			  off, len, dst_p, dst_q, scf, src, src_cnt);
1068
1069	/* Init async_tx descriptor */
1070	req->tx.flags = flags;
1071	req->tx.cookie = -EBUSY;
1072
1073	return req;
1074}
1075
1076static void sba_fillup_pq_single_msg(struct sba_request *req,
1077				bool pq_continue,
1078				struct brcm_sba_command *cmds,
1079				struct brcm_message *msg,
1080				dma_addr_t msg_offset, size_t msg_len,
1081				dma_addr_t *dst_p, dma_addr_t *dst_q,
1082				dma_addr_t src, u8 scf)
1083{
1084	u64 cmd;
1085	u32 c_mdata;
1086	u8 pos, dpos = raid6_gflog[scf];
1087	dma_addr_t resp_dma = req->tx.phys;
1088	struct brcm_sba_command *cmdsp = cmds;
1089
1090	if (!dst_p)
1091		goto skip_p;
1092
1093	if (pq_continue) {
1094		/* Type-B command to load old P into buf0 */
1095		cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1096				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1097		cmd = sba_cmd_enc(cmd, msg_len,
1098				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1099		c_mdata = sba_cmd_load_c_mdata(0);
1100		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1101				  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1102		cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1103				  SBA_CMD_SHIFT, SBA_CMD_MASK);
1104		cmdsp->cmd = cmd;
1105		*cmdsp->cmd_dma = cpu_to_le64(cmd);
1106		cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1107		cmdsp->data = *dst_p + msg_offset;
1108		cmdsp->data_len = msg_len;
1109		cmdsp++;
1110
1111		/*
1112		 * Type-B commands to xor data with buf0 and put it
1113		 * back in buf0
1114		 */
1115		cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1116				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1117		cmd = sba_cmd_enc(cmd, msg_len,
1118				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1119		c_mdata = sba_cmd_xor_c_mdata(0, 0);
1120		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1121				  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1122		cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1123				  SBA_CMD_SHIFT, SBA_CMD_MASK);
1124		cmdsp->cmd = cmd;
1125		*cmdsp->cmd_dma = cpu_to_le64(cmd);
1126		cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1127		cmdsp->data = src + msg_offset;
1128		cmdsp->data_len = msg_len;
1129		cmdsp++;
1130	} else {
1131		/* Type-B command to load old P into buf0 */
1132		cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1133				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1134		cmd = sba_cmd_enc(cmd, msg_len,
1135				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1136		c_mdata = sba_cmd_load_c_mdata(0);
1137		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1138				  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1139		cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1140				  SBA_CMD_SHIFT, SBA_CMD_MASK);
1141		cmdsp->cmd = cmd;
1142		*cmdsp->cmd_dma = cpu_to_le64(cmd);
1143		cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1144		cmdsp->data = src + msg_offset;
1145		cmdsp->data_len = msg_len;
1146		cmdsp++;
1147	}
1148
1149	/* Type-A command to write buf0 */
1150	cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1151			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1152	cmd = sba_cmd_enc(cmd, msg_len,
1153			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1154	cmd = sba_cmd_enc(cmd, 0x1,
1155			  SBA_RESP_SHIFT, SBA_RESP_MASK);
1156	c_mdata = sba_cmd_write_c_mdata(0);
1157	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1158			  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1159	cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1160			  SBA_CMD_SHIFT, SBA_CMD_MASK);
1161	cmdsp->cmd = cmd;
1162	*cmdsp->cmd_dma = cpu_to_le64(cmd);
1163	cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1164	if (req->sba->hw_resp_size) {
1165		cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1166		cmdsp->resp = resp_dma;
1167		cmdsp->resp_len = req->sba->hw_resp_size;
1168	}
1169	cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1170	cmdsp->data = *dst_p + msg_offset;
1171	cmdsp->data_len = msg_len;
1172	cmdsp++;
1173
1174skip_p:
1175	if (!dst_q)
1176		goto skip_q;
1177
1178	/* Type-A command to zero all buffers */
1179	cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1180			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1181	cmd = sba_cmd_enc(cmd, msg_len,
1182			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1183	cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
1184			  SBA_CMD_SHIFT, SBA_CMD_MASK);
1185	cmdsp->cmd = cmd;
1186	*cmdsp->cmd_dma = cpu_to_le64(cmd);
1187	cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1188	cmdsp++;
1189
1190	if (dpos == 255)
1191		goto skip_q_computation;
1192	pos = (dpos < req->sba->max_pq_coefs) ?
1193		dpos : (req->sba->max_pq_coefs - 1);
1194
1195	/*
1196	 * Type-B command to generate initial Q from data
1197	 * and store output into buf0
1198	 */
1199	cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1200			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1201	cmd = sba_cmd_enc(cmd, msg_len,
1202			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1203	c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
1204	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1205			  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1206	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1207			  SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1208	cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1209			  SBA_CMD_SHIFT, SBA_CMD_MASK);
1210	cmdsp->cmd = cmd;
1211	*cmdsp->cmd_dma = cpu_to_le64(cmd);
1212	cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1213	cmdsp->data = src + msg_offset;
1214	cmdsp->data_len = msg_len;
1215	cmdsp++;
1216
1217	dpos -= pos;
1218
1219	/* Multiple Type-A command to generate final Q */
1220	while (dpos) {
1221		pos = (dpos < req->sba->max_pq_coefs) ?
1222			dpos : (req->sba->max_pq_coefs - 1);
1223
1224		/*
1225		 * Type-A command to generate Q with buf0 and
1226		 * buf1 store result in buf0
1227		 */
1228		cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1229				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1230		cmd = sba_cmd_enc(cmd, msg_len,
1231				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1232		c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
1233		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1234				  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1235		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1236				  SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1237		cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1238				  SBA_CMD_SHIFT, SBA_CMD_MASK);
1239		cmdsp->cmd = cmd;
1240		*cmdsp->cmd_dma = cpu_to_le64(cmd);
1241		cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1242		cmdsp++;
1243
1244		dpos -= pos;
1245	}
1246
1247skip_q_computation:
1248	if (pq_continue) {
1249		/*
1250		 * Type-B command to XOR previous output with
1251		 * buf0 and write it into buf0
1252		 */
1253		cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1254				  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1255		cmd = sba_cmd_enc(cmd, msg_len,
1256				  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1257		c_mdata = sba_cmd_xor_c_mdata(0, 0);
1258		cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1259				  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1260		cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1261				  SBA_CMD_SHIFT, SBA_CMD_MASK);
1262		cmdsp->cmd = cmd;
1263		*cmdsp->cmd_dma = cpu_to_le64(cmd);
1264		cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1265		cmdsp->data = *dst_q + msg_offset;
1266		cmdsp->data_len = msg_len;
1267		cmdsp++;
1268	}
1269
1270	/* Type-A command to write buf0 */
1271	cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1272			  SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1273	cmd = sba_cmd_enc(cmd, msg_len,
1274			  SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1275	cmd = sba_cmd_enc(cmd, 0x1,
1276			  SBA_RESP_SHIFT, SBA_RESP_MASK);
1277	c_mdata = sba_cmd_write_c_mdata(0);
1278	cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1279			  SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1280	cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1281			  SBA_CMD_SHIFT, SBA_CMD_MASK);
1282	cmdsp->cmd = cmd;
1283	*cmdsp->cmd_dma = cpu_to_le64(cmd);
1284	cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1285	if (req->sba->hw_resp_size) {
1286		cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1287		cmdsp->resp = resp_dma;
1288		cmdsp->resp_len = req->sba->hw_resp_size;
1289	}
1290	cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1291	cmdsp->data = *dst_q + msg_offset;
1292	cmdsp->data_len = msg_len;
1293	cmdsp++;
1294
1295skip_q:
1296	/* Fillup brcm_message */
1297	msg->type = BRCM_MESSAGE_SBA;
1298	msg->sba.cmds = cmds;
1299	msg->sba.cmds_count = cmdsp - cmds;
1300	msg->ctx = req;
1301	msg->error = 0;
1302}
1303
1304static struct sba_request *
1305sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
1306			   dma_addr_t *dst_p, dma_addr_t *dst_q,
1307			   dma_addr_t src, u8 scf, size_t len,
1308			   unsigned long flags)
1309{
1310	struct sba_request *req = NULL;
1311
1312	/* Alloc new request */
1313	req = sba_alloc_request(sba);
1314	if (!req)
1315		return NULL;
1316	if (flags & DMA_PREP_FENCE)
1317		req->flags |= SBA_REQUEST_FENCE;
1318
1319	/* Fillup request messages */
1320	sba_fillup_pq_single_msg(req,  dmaf_continue(flags),
1321				 req->cmds, &req->msg, off, len,
1322				 dst_p, dst_q, src, scf);
1323
1324	/* Init async_tx descriptor */
1325	req->tx.flags = flags;
1326	req->tx.cookie = -EBUSY;
1327
1328	return req;
1329}
1330
1331static struct dma_async_tx_descriptor *
1332sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
1333		u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1334{
1335	u32 i, dst_q_index;
1336	size_t req_len;
1337	bool slow = false;
1338	dma_addr_t off = 0;
1339	dma_addr_t *dst_p = NULL, *dst_q = NULL;
1340	struct sba_device *sba = to_sba_device(dchan);
1341	struct sba_request *first = NULL, *req;
1342
1343	/* Sanity checks */
1344	if (unlikely(src_cnt > sba->max_pq_srcs))
1345		return NULL;
1346	for (i = 0; i < src_cnt; i++)
1347		if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
1348			slow = true;
1349
1350	/* Figure-out P and Q destination addresses */
1351	if (!(flags & DMA_PREP_PQ_DISABLE_P))
1352		dst_p = &dst[0];
1353	if (!(flags & DMA_PREP_PQ_DISABLE_Q))
1354		dst_q = &dst[1];
1355
1356	/* Create chained requests where each request is upto hw_buf_size */
1357	while (len) {
1358		req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
1359
1360		if (slow) {
1361			dst_q_index = src_cnt;
1362
1363			if (dst_q) {
1364				for (i = 0; i < src_cnt; i++) {
1365					if (*dst_q == src[i]) {
1366						dst_q_index = i;
1367						break;
1368					}
1369				}
1370			}
1371
1372			if (dst_q_index < src_cnt) {
1373				i = dst_q_index;
1374				req = sba_prep_dma_pq_single_req(sba,
1375					off, dst_p, dst_q, src[i], scf[i],
1376					req_len, flags | DMA_PREP_FENCE);
1377				if (!req)
1378					goto fail;
1379
1380				if (first)
1381					sba_chain_request(first, req);
1382				else
1383					first = req;
1384
1385				flags |= DMA_PREP_CONTINUE;
1386			}
1387
1388			for (i = 0; i < src_cnt; i++) {
1389				if (dst_q_index == i)
1390					continue;
1391
1392				req = sba_prep_dma_pq_single_req(sba,
1393					off, dst_p, dst_q, src[i], scf[i],
1394					req_len, flags | DMA_PREP_FENCE);
1395				if (!req)
1396					goto fail;
1397
1398				if (first)
1399					sba_chain_request(first, req);
1400				else
1401					first = req;
1402
1403				flags |= DMA_PREP_CONTINUE;
1404			}
1405		} else {
1406			req = sba_prep_dma_pq_req(sba, off,
1407						  dst_p, dst_q, src, src_cnt,
1408						  scf, req_len, flags);
1409			if (!req)
1410				goto fail;
1411
1412			if (first)
1413				sba_chain_request(first, req);
1414			else
1415				first = req;
1416		}
1417
1418		off += req_len;
1419		len -= req_len;
1420	}
1421
1422	return (first) ? &first->tx : NULL;
1423
1424fail:
1425	if (first)
1426		sba_free_chained_requests(first);
1427	return NULL;
1428}
1429
1430/* ====== Mailbox callbacks ===== */
1431
1432static void sba_receive_message(struct mbox_client *cl, void *msg)
1433{
1434	struct brcm_message *m = msg;
1435	struct sba_request *req = m->ctx;
1436	struct sba_device *sba = req->sba;
1437
1438	/* Error count if message has error */
1439	if (m->error < 0)
1440		dev_err(sba->dev, "%s got message with error %d",
1441			dma_chan_name(&sba->dma_chan), m->error);
1442
1443	/* Process received request */
1444	sba_process_received_request(sba, req);
1445}
1446
1447/* ====== Debugfs callbacks ====== */
1448
1449static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
1450{
1451	struct sba_device *sba = dev_get_drvdata(file->private);
1452
1453	/* Write stats in file */
1454	sba_write_stats_in_seqfile(sba, file);
1455
1456	return 0;
1457}
1458
1459/* ====== Platform driver routines ===== */
1460
1461static int sba_prealloc_channel_resources(struct sba_device *sba)
1462{
1463	int i, j, ret = 0;
1464	struct sba_request *req = NULL;
1465
1466	sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
1467					    sba->max_resp_pool_size,
1468					    &sba->resp_dma_base, GFP_KERNEL);
1469	if (!sba->resp_base)
1470		return -ENOMEM;
1471
1472	sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
1473					    sba->max_cmds_pool_size,
1474					    &sba->cmds_dma_base, GFP_KERNEL);
1475	if (!sba->cmds_base) {
1476		ret = -ENOMEM;
1477		goto fail_free_resp_pool;
1478	}
1479
1480	spin_lock_init(&sba->reqs_lock);
1481	sba->reqs_fence = false;
1482	INIT_LIST_HEAD(&sba->reqs_alloc_list);
1483	INIT_LIST_HEAD(&sba->reqs_pending_list);
1484	INIT_LIST_HEAD(&sba->reqs_active_list);
1485	INIT_LIST_HEAD(&sba->reqs_aborted_list);
1486	INIT_LIST_HEAD(&sba->reqs_free_list);
1487
1488	for (i = 0; i < sba->max_req; i++) {
1489		req = devm_kzalloc(sba->dev,
1490				   struct_size(req, cmds, sba->max_cmd_per_req),
1491				   GFP_KERNEL);
1492		if (!req) {
1493			ret = -ENOMEM;
1494			goto fail_free_cmds_pool;
1495		}
1496		INIT_LIST_HEAD(&req->node);
1497		req->sba = sba;
1498		req->flags = SBA_REQUEST_STATE_FREE;
1499		INIT_LIST_HEAD(&req->next);
1500		atomic_set(&req->next_pending_count, 0);
1501		for (j = 0; j < sba->max_cmd_per_req; j++) {
1502			req->cmds[j].cmd = 0;
1503			req->cmds[j].cmd_dma = sba->cmds_base +
1504				(i * sba->max_cmd_per_req + j) * sizeof(u64);
1505			req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1506				(i * sba->max_cmd_per_req + j) * sizeof(u64);
1507			req->cmds[j].flags = 0;
1508		}
1509		memset(&req->msg, 0, sizeof(req->msg));
1510		dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1511		async_tx_ack(&req->tx);
1512		req->tx.tx_submit = sba_tx_submit;
1513		req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
1514		list_add_tail(&req->node, &sba->reqs_free_list);
1515	}
1516
1517	return 0;
1518
1519fail_free_cmds_pool:
1520	dma_free_coherent(sba->mbox_dev,
1521			  sba->max_cmds_pool_size,
1522			  sba->cmds_base, sba->cmds_dma_base);
1523fail_free_resp_pool:
1524	dma_free_coherent(sba->mbox_dev,
1525			  sba->max_resp_pool_size,
1526			  sba->resp_base, sba->resp_dma_base);
1527	return ret;
1528}
1529
1530static void sba_freeup_channel_resources(struct sba_device *sba)
1531{
1532	dmaengine_terminate_all(&sba->dma_chan);
1533	dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
1534			  sba->cmds_base, sba->cmds_dma_base);
1535	dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
1536			  sba->resp_base, sba->resp_dma_base);
1537	sba->resp_base = NULL;
1538	sba->resp_dma_base = 0;
1539}
1540
1541static int sba_async_register(struct sba_device *sba)
1542{
1543	int ret;
1544	struct dma_device *dma_dev = &sba->dma_dev;
1545
1546	/* Initialize DMA channel cookie */
1547	sba->dma_chan.device = dma_dev;
1548	dma_cookie_init(&sba->dma_chan);
1549
1550	/* Initialize DMA device capability mask */
1551	dma_cap_zero(dma_dev->cap_mask);
1552	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
1553	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1554	dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1555	dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1556
1557	/*
1558	 * Set mailbox channel device as the base device of
1559	 * our dma_device because the actual memory accesses
1560	 * will be done by mailbox controller
1561	 */
1562	dma_dev->dev = sba->mbox_dev;
1563
1564	/* Set base prep routines */
1565	dma_dev->device_free_chan_resources = sba_free_chan_resources;
1566	dma_dev->device_terminate_all = sba_device_terminate_all;
1567	dma_dev->device_issue_pending = sba_issue_pending;
1568	dma_dev->device_tx_status = sba_tx_status;
1569
1570	/* Set interrupt routine */
1571	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1572		dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
1573
1574	/* Set memcpy routine */
1575	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1576		dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
1577
1578	/* Set xor routine and capability */
1579	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1580		dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
1581		dma_dev->max_xor = sba->max_xor_srcs;
1582	}
1583
1584	/* Set pq routine and capability */
1585	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1586		dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
1587		dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
1588	}
1589
1590	/* Initialize DMA device channel list */
1591	INIT_LIST_HEAD(&dma_dev->channels);
1592	list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
1593
1594	/* Register with Linux async DMA framework*/
1595	ret = dma_async_device_register(dma_dev);
1596	if (ret) {
1597		dev_err(sba->dev, "async device register error %d", ret);
1598		return ret;
1599	}
1600
1601	dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
1602	dma_chan_name(&sba->dma_chan),
1603	dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
1604	dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
1605	dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1606	dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
1607
1608	return 0;
1609}
1610
1611static int sba_probe(struct platform_device *pdev)
1612{
1613	int ret = 0;
1614	struct sba_device *sba;
1615	struct platform_device *mbox_pdev;
1616	struct of_phandle_args args;
1617
1618	/* Allocate main SBA struct */
1619	sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
1620	if (!sba)
1621		return -ENOMEM;
1622
1623	sba->dev = &pdev->dev;
1624	platform_set_drvdata(pdev, sba);
1625
1626	/* Number of mailbox channels should be atleast 1 */
1627	ret = of_count_phandle_with_args(pdev->dev.of_node,
1628					 "mboxes", "#mbox-cells");
1629	if (ret <= 0)
1630		return -ENODEV;
1631
1632	/* Determine SBA version from DT compatible string */
1633	if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
1634		sba->ver = SBA_VER_1;
1635	else if (of_device_is_compatible(sba->dev->of_node,
1636					 "brcm,iproc-sba-v2"))
1637		sba->ver = SBA_VER_2;
1638	else
1639		return -ENODEV;
1640
1641	/* Derived Configuration parameters */
1642	switch (sba->ver) {
1643	case SBA_VER_1:
1644		sba->hw_buf_size = 4096;
1645		sba->hw_resp_size = 8;
1646		sba->max_pq_coefs = 6;
1647		sba->max_pq_srcs = 6;
1648		break;
1649	case SBA_VER_2:
1650		sba->hw_buf_size = 4096;
1651		sba->hw_resp_size = 8;
1652		sba->max_pq_coefs = 30;
1653		/*
1654		 * We can support max_pq_srcs == max_pq_coefs because
1655		 * we are limited by number of SBA commands that we can
1656		 * fit in one message for underlying ring manager HW.
1657		 */
1658		sba->max_pq_srcs = 12;
1659		break;
1660	default:
1661		return -EINVAL;
1662	}
1663	sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
1664	sba->max_cmd_per_req = sba->max_pq_srcs + 3;
1665	sba->max_xor_srcs = sba->max_cmd_per_req - 1;
1666	sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
1667	sba->max_cmds_pool_size = sba->max_req *
1668				  sba->max_cmd_per_req * sizeof(u64);
1669
1670	/* Setup mailbox client */
1671	sba->client.dev			= &pdev->dev;
1672	sba->client.rx_callback		= sba_receive_message;
1673	sba->client.tx_block		= false;
1674	sba->client.knows_txdone	= true;
1675	sba->client.tx_tout		= 0;
1676
1677	/* Request mailbox channel */
1678	sba->mchan = mbox_request_channel(&sba->client, 0);
1679	if (IS_ERR(sba->mchan)) {
1680		ret = PTR_ERR(sba->mchan);
1681		goto fail_free_mchan;
1682	}
1683
1684	/* Find-out underlying mailbox device */
1685	ret = of_parse_phandle_with_args(pdev->dev.of_node,
1686					 "mboxes", "#mbox-cells", 0, &args);
1687	if (ret)
1688		goto fail_free_mchan;
1689	mbox_pdev = of_find_device_by_node(args.np);
1690	of_node_put(args.np);
1691	if (!mbox_pdev) {
1692		ret = -ENODEV;
1693		goto fail_free_mchan;
1694	}
1695	sba->mbox_dev = &mbox_pdev->dev;
1696
1697	/* Prealloc channel resource */
1698	ret = sba_prealloc_channel_resources(sba);
1699	if (ret)
1700		goto fail_free_mchan;
1701
1702	/* Check availability of debugfs */
1703	if (!debugfs_initialized())
1704		goto skip_debugfs;
1705
1706	/* Create debugfs root entry */
1707	sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
1708
1709	/* Create debugfs stats entry */
1710	debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
1711				    sba_debugfs_stats_show);
1712
1713skip_debugfs:
1714
1715	/* Register DMA device with Linux async framework */
1716	ret = sba_async_register(sba);
1717	if (ret)
1718		goto fail_free_resources;
1719
1720	/* Print device info */
1721	dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
1722		 dma_chan_name(&sba->dma_chan), sba->ver+1,
1723		 dev_name(sba->mbox_dev));
1724
1725	return 0;
1726
1727fail_free_resources:
1728	debugfs_remove_recursive(sba->root);
1729	sba_freeup_channel_resources(sba);
1730fail_free_mchan:
1731	mbox_free_channel(sba->mchan);
1732	return ret;
1733}
1734
1735static int sba_remove(struct platform_device *pdev)
1736{
1737	struct sba_device *sba = platform_get_drvdata(pdev);
1738
1739	dma_async_device_unregister(&sba->dma_dev);
1740
1741	debugfs_remove_recursive(sba->root);
1742
1743	sba_freeup_channel_resources(sba);
1744
1745	mbox_free_channel(sba->mchan);
1746
1747	return 0;
1748}
1749
1750static const struct of_device_id sba_of_match[] = {
1751	{ .compatible = "brcm,iproc-sba", },
1752	{ .compatible = "brcm,iproc-sba-v2", },
1753	{},
1754};
1755MODULE_DEVICE_TABLE(of, sba_of_match);
1756
1757static struct platform_driver sba_driver = {
1758	.probe = sba_probe,
1759	.remove = sba_remove,
1760	.driver = {
1761		.name = "bcm-sba-raid",
1762		.of_match_table = sba_of_match,
1763	},
1764};
1765module_platform_driver(sba_driver);
1766
1767MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1768MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1769MODULE_LICENSE("GPL v2");