Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2// Copyright (C) 2017 Broadcom
3
4/*
5 * Broadcom SBA RAID Driver
6 *
7 * The Broadcom stream buffer accelerator (SBA) provides offloading
8 * capabilities for RAID operations. The SBA offload engine is accessible
9 * via Broadcom SoC specific ring manager. Two or more offload engines
10 * can share same Broadcom SoC specific ring manager due to this Broadcom
11 * SoC specific ring manager driver is implemented as a mailbox controller
12 * driver and offload engine drivers are implemented as mallbox clients.
13 *
14 * Typically, Broadcom SoC specific ring manager will implement larger
15 * number of hardware rings over one or more SBA hardware devices. By
16 * design, the internal buffer size of SBA hardware device is limited
17 * but all offload operations supported by SBA can be broken down into
18 * multiple small size requests and executed parallely on multiple SBA
19 * hardware devices for achieving high through-put.
20 *
21 * The Broadcom SBA RAID driver does not require any register programming
22 * except submitting request to SBA hardware device via mailbox channels.
23 * This driver implements a DMA device with one DMA channel using a single
24 * mailbox channel provided by Broadcom SoC specific ring manager driver.
25 * For having more SBA DMA channels, we can create more SBA device nodes
26 * in Broadcom SoC specific DTS based on number of hardware rings supported
27 * by Broadcom SoC ring manager.
28 */
29
30#include <linux/bitops.h>
31#include <linux/debugfs.h>
32#include <linux/dma-mapping.h>
33#include <linux/dmaengine.h>
34#include <linux/list.h>
35#include <linux/mailbox_client.h>
36#include <linux/mailbox/brcm-message.h>
37#include <linux/module.h>
38#include <linux/of.h>
39#include <linux/of_platform.h>
40#include <linux/platform_device.h>
41#include <linux/slab.h>
42#include <linux/raid/pq.h>
43
44#include "dmaengine.h"
45
46/* ====== Driver macros and defines ===== */
47
48#define SBA_TYPE_SHIFT 48
49#define SBA_TYPE_MASK GENMASK(1, 0)
50#define SBA_TYPE_A 0x0
51#define SBA_TYPE_B 0x2
52#define SBA_TYPE_C 0x3
53#define SBA_USER_DEF_SHIFT 32
54#define SBA_USER_DEF_MASK GENMASK(15, 0)
55#define SBA_R_MDATA_SHIFT 24
56#define SBA_R_MDATA_MASK GENMASK(7, 0)
57#define SBA_C_MDATA_MS_SHIFT 18
58#define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
59#define SBA_INT_SHIFT 17
60#define SBA_INT_MASK BIT(0)
61#define SBA_RESP_SHIFT 16
62#define SBA_RESP_MASK BIT(0)
63#define SBA_C_MDATA_SHIFT 8
64#define SBA_C_MDATA_MASK GENMASK(7, 0)
65#define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
66#define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
67#define SBA_C_MDATA_DNUM_SHIFT 5
68#define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
69#define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
70#define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
71#define SBA_CMD_SHIFT 0
72#define SBA_CMD_MASK GENMASK(3, 0)
73#define SBA_CMD_ZERO_BUFFER 0x4
74#define SBA_CMD_ZERO_ALL_BUFFERS 0x8
75#define SBA_CMD_LOAD_BUFFER 0x9
76#define SBA_CMD_XOR 0xa
77#define SBA_CMD_GALOIS_XOR 0xb
78#define SBA_CMD_WRITE_BUFFER 0xc
79#define SBA_CMD_GALOIS 0xe
80
81#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
82#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
83
84/* Driver helper macros */
85#define to_sba_request(tx) \
86 container_of(tx, struct sba_request, tx)
87#define to_sba_device(dchan) \
88 container_of(dchan, struct sba_device, dma_chan)
89
90/* ===== Driver data structures ===== */
91
92enum sba_request_flags {
93 SBA_REQUEST_STATE_FREE = 0x001,
94 SBA_REQUEST_STATE_ALLOCED = 0x002,
95 SBA_REQUEST_STATE_PENDING = 0x004,
96 SBA_REQUEST_STATE_ACTIVE = 0x008,
97 SBA_REQUEST_STATE_ABORTED = 0x010,
98 SBA_REQUEST_STATE_MASK = 0x0ff,
99 SBA_REQUEST_FENCE = 0x100,
100};
101
102struct sba_request {
103 /* Global state */
104 struct list_head node;
105 struct sba_device *sba;
106 u32 flags;
107 /* Chained requests management */
108 struct sba_request *first;
109 struct list_head next;
110 atomic_t next_pending_count;
111 /* BRCM message data */
112 struct brcm_message msg;
113 struct dma_async_tx_descriptor tx;
114 /* SBA commands */
115 struct brcm_sba_command cmds[];
116};
117
118enum sba_version {
119 SBA_VER_1 = 0,
120 SBA_VER_2
121};
122
123struct sba_device {
124 /* Underlying device */
125 struct device *dev;
126 /* DT configuration parameters */
127 enum sba_version ver;
128 /* Derived configuration parameters */
129 u32 max_req;
130 u32 hw_buf_size;
131 u32 hw_resp_size;
132 u32 max_pq_coefs;
133 u32 max_pq_srcs;
134 u32 max_cmd_per_req;
135 u32 max_xor_srcs;
136 u32 max_resp_pool_size;
137 u32 max_cmds_pool_size;
138 /* Maibox client and Mailbox channels */
139 struct mbox_client client;
140 struct mbox_chan *mchan;
141 struct device *mbox_dev;
142 /* DMA device and DMA channel */
143 struct dma_device dma_dev;
144 struct dma_chan dma_chan;
145 /* DMA channel resources */
146 void *resp_base;
147 dma_addr_t resp_dma_base;
148 void *cmds_base;
149 dma_addr_t cmds_dma_base;
150 spinlock_t reqs_lock;
151 bool reqs_fence;
152 struct list_head reqs_alloc_list;
153 struct list_head reqs_pending_list;
154 struct list_head reqs_active_list;
155 struct list_head reqs_aborted_list;
156 struct list_head reqs_free_list;
157 /* DebugFS directory entries */
158 struct dentry *root;
159};
160
161/* ====== Command helper routines ===== */
162
163static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
164{
165 cmd &= ~((u64)mask << shift);
166 cmd |= ((u64)(val & mask) << shift);
167 return cmd;
168}
169
170static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
171{
172 return b0 & SBA_C_MDATA_BNUMx_MASK;
173}
174
175static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
176{
177 return b0 & SBA_C_MDATA_BNUMx_MASK;
178}
179
180static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
181{
182 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
183 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
184}
185
186static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
187{
188 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
189 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
190 ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
191}
192
193/* ====== General helper routines ===== */
194
195static struct sba_request *sba_alloc_request(struct sba_device *sba)
196{
197 bool found = false;
198 unsigned long flags;
199 struct sba_request *req = NULL;
200
201 spin_lock_irqsave(&sba->reqs_lock, flags);
202 list_for_each_entry(req, &sba->reqs_free_list, node) {
203 if (async_tx_test_ack(&req->tx)) {
204 list_move_tail(&req->node, &sba->reqs_alloc_list);
205 found = true;
206 break;
207 }
208 }
209 spin_unlock_irqrestore(&sba->reqs_lock, flags);
210
211 if (!found) {
212 /*
213 * We have no more free requests so, we peek
214 * mailbox channels hoping few active requests
215 * would have completed which will create more
216 * room for new requests.
217 */
218 mbox_client_peek_data(sba->mchan);
219 return NULL;
220 }
221
222 req->flags = SBA_REQUEST_STATE_ALLOCED;
223 req->first = req;
224 INIT_LIST_HEAD(&req->next);
225 atomic_set(&req->next_pending_count, 1);
226
227 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
228 async_tx_ack(&req->tx);
229
230 return req;
231}
232
233/* Note: Must be called with sba->reqs_lock held */
234static void _sba_pending_request(struct sba_device *sba,
235 struct sba_request *req)
236{
237 lockdep_assert_held(&sba->reqs_lock);
238 req->flags &= ~SBA_REQUEST_STATE_MASK;
239 req->flags |= SBA_REQUEST_STATE_PENDING;
240 list_move_tail(&req->node, &sba->reqs_pending_list);
241 if (list_empty(&sba->reqs_active_list))
242 sba->reqs_fence = false;
243}
244
245/* Note: Must be called with sba->reqs_lock held */
246static bool _sba_active_request(struct sba_device *sba,
247 struct sba_request *req)
248{
249 lockdep_assert_held(&sba->reqs_lock);
250 if (list_empty(&sba->reqs_active_list))
251 sba->reqs_fence = false;
252 if (sba->reqs_fence)
253 return false;
254 req->flags &= ~SBA_REQUEST_STATE_MASK;
255 req->flags |= SBA_REQUEST_STATE_ACTIVE;
256 list_move_tail(&req->node, &sba->reqs_active_list);
257 if (req->flags & SBA_REQUEST_FENCE)
258 sba->reqs_fence = true;
259 return true;
260}
261
262/* Note: Must be called with sba->reqs_lock held */
263static void _sba_abort_request(struct sba_device *sba,
264 struct sba_request *req)
265{
266 lockdep_assert_held(&sba->reqs_lock);
267 req->flags &= ~SBA_REQUEST_STATE_MASK;
268 req->flags |= SBA_REQUEST_STATE_ABORTED;
269 list_move_tail(&req->node, &sba->reqs_aborted_list);
270 if (list_empty(&sba->reqs_active_list))
271 sba->reqs_fence = false;
272}
273
274/* Note: Must be called with sba->reqs_lock held */
275static void _sba_free_request(struct sba_device *sba,
276 struct sba_request *req)
277{
278 lockdep_assert_held(&sba->reqs_lock);
279 req->flags &= ~SBA_REQUEST_STATE_MASK;
280 req->flags |= SBA_REQUEST_STATE_FREE;
281 list_move_tail(&req->node, &sba->reqs_free_list);
282 if (list_empty(&sba->reqs_active_list))
283 sba->reqs_fence = false;
284}
285
286static void sba_free_chained_requests(struct sba_request *req)
287{
288 unsigned long flags;
289 struct sba_request *nreq;
290 struct sba_device *sba = req->sba;
291
292 spin_lock_irqsave(&sba->reqs_lock, flags);
293
294 _sba_free_request(sba, req);
295 list_for_each_entry(nreq, &req->next, next)
296 _sba_free_request(sba, nreq);
297
298 spin_unlock_irqrestore(&sba->reqs_lock, flags);
299}
300
301static void sba_chain_request(struct sba_request *first,
302 struct sba_request *req)
303{
304 unsigned long flags;
305 struct sba_device *sba = req->sba;
306
307 spin_lock_irqsave(&sba->reqs_lock, flags);
308
309 list_add_tail(&req->next, &first->next);
310 req->first = first;
311 atomic_inc(&first->next_pending_count);
312
313 spin_unlock_irqrestore(&sba->reqs_lock, flags);
314}
315
316static void sba_cleanup_nonpending_requests(struct sba_device *sba)
317{
318 unsigned long flags;
319 struct sba_request *req, *req1;
320
321 spin_lock_irqsave(&sba->reqs_lock, flags);
322
323 /* Freeup all alloced request */
324 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
325 _sba_free_request(sba, req);
326
327 /* Set all active requests as aborted */
328 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
329 _sba_abort_request(sba, req);
330
331 /*
332 * Note: We expect that aborted request will be eventually
333 * freed by sba_receive_message()
334 */
335
336 spin_unlock_irqrestore(&sba->reqs_lock, flags);
337}
338
339static void sba_cleanup_pending_requests(struct sba_device *sba)
340{
341 unsigned long flags;
342 struct sba_request *req, *req1;
343
344 spin_lock_irqsave(&sba->reqs_lock, flags);
345
346 /* Freeup all pending request */
347 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
348 _sba_free_request(sba, req);
349
350 spin_unlock_irqrestore(&sba->reqs_lock, flags);
351}
352
353static int sba_send_mbox_request(struct sba_device *sba,
354 struct sba_request *req)
355{
356 int ret = 0;
357
358 /* Send message for the request */
359 req->msg.error = 0;
360 ret = mbox_send_message(sba->mchan, &req->msg);
361 if (ret < 0) {
362 dev_err(sba->dev, "send message failed with error %d", ret);
363 return ret;
364 }
365
366 /* Check error returned by mailbox controller */
367 ret = req->msg.error;
368 if (ret < 0) {
369 dev_err(sba->dev, "message error %d", ret);
370 }
371
372 /* Signal txdone for mailbox channel */
373 mbox_client_txdone(sba->mchan, ret);
374
375 return ret;
376}
377
378/* Note: Must be called with sba->reqs_lock held */
379static void _sba_process_pending_requests(struct sba_device *sba)
380{
381 int ret;
382 u32 count;
383 struct sba_request *req;
384
385 /* Process few pending requests */
386 count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
387 while (!list_empty(&sba->reqs_pending_list) && count) {
388 /* Get the first pending request */
389 req = list_first_entry(&sba->reqs_pending_list,
390 struct sba_request, node);
391
392 /* Try to make request active */
393 if (!_sba_active_request(sba, req))
394 break;
395
396 /* Send request to mailbox channel */
397 ret = sba_send_mbox_request(sba, req);
398 if (ret < 0) {
399 _sba_pending_request(sba, req);
400 break;
401 }
402
403 count--;
404 }
405}
406
407static void sba_process_received_request(struct sba_device *sba,
408 struct sba_request *req)
409{
410 unsigned long flags;
411 struct dma_async_tx_descriptor *tx;
412 struct sba_request *nreq, *first = req->first;
413
414 /* Process only after all chained requests are received */
415 if (!atomic_dec_return(&first->next_pending_count)) {
416 tx = &first->tx;
417
418 WARN_ON(tx->cookie < 0);
419 if (tx->cookie > 0) {
420 spin_lock_irqsave(&sba->reqs_lock, flags);
421 dma_cookie_complete(tx);
422 spin_unlock_irqrestore(&sba->reqs_lock, flags);
423 dmaengine_desc_get_callback_invoke(tx, NULL);
424 dma_descriptor_unmap(tx);
425 tx->callback = NULL;
426 tx->callback_result = NULL;
427 }
428
429 dma_run_dependencies(tx);
430
431 spin_lock_irqsave(&sba->reqs_lock, flags);
432
433 /* Free all requests chained to first request */
434 list_for_each_entry(nreq, &first->next, next)
435 _sba_free_request(sba, nreq);
436 INIT_LIST_HEAD(&first->next);
437
438 /* Free the first request */
439 _sba_free_request(sba, first);
440
441 /* Process pending requests */
442 _sba_process_pending_requests(sba);
443
444 spin_unlock_irqrestore(&sba->reqs_lock, flags);
445 }
446}
447
448static void sba_write_stats_in_seqfile(struct sba_device *sba,
449 struct seq_file *file)
450{
451 unsigned long flags;
452 struct sba_request *req;
453 u32 free_count = 0, alloced_count = 0;
454 u32 pending_count = 0, active_count = 0, aborted_count = 0;
455
456 spin_lock_irqsave(&sba->reqs_lock, flags);
457
458 list_for_each_entry(req, &sba->reqs_free_list, node)
459 if (async_tx_test_ack(&req->tx))
460 free_count++;
461
462 list_for_each_entry(req, &sba->reqs_alloc_list, node)
463 alloced_count++;
464
465 list_for_each_entry(req, &sba->reqs_pending_list, node)
466 pending_count++;
467
468 list_for_each_entry(req, &sba->reqs_active_list, node)
469 active_count++;
470
471 list_for_each_entry(req, &sba->reqs_aborted_list, node)
472 aborted_count++;
473
474 spin_unlock_irqrestore(&sba->reqs_lock, flags);
475
476 seq_printf(file, "maximum requests = %d\n", sba->max_req);
477 seq_printf(file, "free requests = %d\n", free_count);
478 seq_printf(file, "alloced requests = %d\n", alloced_count);
479 seq_printf(file, "pending requests = %d\n", pending_count);
480 seq_printf(file, "active requests = %d\n", active_count);
481 seq_printf(file, "aborted requests = %d\n", aborted_count);
482}
483
484/* ====== DMAENGINE callbacks ===== */
485
486static void sba_free_chan_resources(struct dma_chan *dchan)
487{
488 /*
489 * Channel resources are pre-alloced so we just free-up
490 * whatever we can so that we can re-use pre-alloced
491 * channel resources next time.
492 */
493 sba_cleanup_nonpending_requests(to_sba_device(dchan));
494}
495
496static int sba_device_terminate_all(struct dma_chan *dchan)
497{
498 /* Cleanup all pending requests */
499 sba_cleanup_pending_requests(to_sba_device(dchan));
500
501 return 0;
502}
503
504static void sba_issue_pending(struct dma_chan *dchan)
505{
506 unsigned long flags;
507 struct sba_device *sba = to_sba_device(dchan);
508
509 /* Process pending requests */
510 spin_lock_irqsave(&sba->reqs_lock, flags);
511 _sba_process_pending_requests(sba);
512 spin_unlock_irqrestore(&sba->reqs_lock, flags);
513}
514
515static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
516{
517 unsigned long flags;
518 dma_cookie_t cookie;
519 struct sba_device *sba;
520 struct sba_request *req, *nreq;
521
522 if (unlikely(!tx))
523 return -EINVAL;
524
525 sba = to_sba_device(tx->chan);
526 req = to_sba_request(tx);
527
528 /* Assign cookie and mark all chained requests pending */
529 spin_lock_irqsave(&sba->reqs_lock, flags);
530 cookie = dma_cookie_assign(tx);
531 _sba_pending_request(sba, req);
532 list_for_each_entry(nreq, &req->next, next)
533 _sba_pending_request(sba, nreq);
534 spin_unlock_irqrestore(&sba->reqs_lock, flags);
535
536 return cookie;
537}
538
539static enum dma_status sba_tx_status(struct dma_chan *dchan,
540 dma_cookie_t cookie,
541 struct dma_tx_state *txstate)
542{
543 enum dma_status ret;
544 struct sba_device *sba = to_sba_device(dchan);
545
546 ret = dma_cookie_status(dchan, cookie, txstate);
547 if (ret == DMA_COMPLETE)
548 return ret;
549
550 mbox_client_peek_data(sba->mchan);
551
552 return dma_cookie_status(dchan, cookie, txstate);
553}
554
555static void sba_fillup_interrupt_msg(struct sba_request *req,
556 struct brcm_sba_command *cmds,
557 struct brcm_message *msg)
558{
559 u64 cmd;
560 u32 c_mdata;
561 dma_addr_t resp_dma = req->tx.phys;
562 struct brcm_sba_command *cmdsp = cmds;
563
564 /* Type-B command to load dummy data into buf0 */
565 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
566 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
567 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
568 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
569 c_mdata = sba_cmd_load_c_mdata(0);
570 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
571 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
572 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
573 SBA_CMD_SHIFT, SBA_CMD_MASK);
574 cmdsp->cmd = cmd;
575 *cmdsp->cmd_dma = cpu_to_le64(cmd);
576 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
577 cmdsp->data = resp_dma;
578 cmdsp->data_len = req->sba->hw_resp_size;
579 cmdsp++;
580
581 /* Type-A command to write buf0 to dummy location */
582 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
583 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
584 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
585 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
586 cmd = sba_cmd_enc(cmd, 0x1,
587 SBA_RESP_SHIFT, SBA_RESP_MASK);
588 c_mdata = sba_cmd_write_c_mdata(0);
589 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
590 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
591 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
592 SBA_CMD_SHIFT, SBA_CMD_MASK);
593 cmdsp->cmd = cmd;
594 *cmdsp->cmd_dma = cpu_to_le64(cmd);
595 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
596 if (req->sba->hw_resp_size) {
597 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
598 cmdsp->resp = resp_dma;
599 cmdsp->resp_len = req->sba->hw_resp_size;
600 }
601 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
602 cmdsp->data = resp_dma;
603 cmdsp->data_len = req->sba->hw_resp_size;
604 cmdsp++;
605
606 /* Fillup brcm_message */
607 msg->type = BRCM_MESSAGE_SBA;
608 msg->sba.cmds = cmds;
609 msg->sba.cmds_count = cmdsp - cmds;
610 msg->ctx = req;
611 msg->error = 0;
612}
613
614static struct dma_async_tx_descriptor *
615sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
616{
617 struct sba_request *req = NULL;
618 struct sba_device *sba = to_sba_device(dchan);
619
620 /* Alloc new request */
621 req = sba_alloc_request(sba);
622 if (!req)
623 return NULL;
624
625 /*
626 * Force fence so that no requests are submitted
627 * until DMA callback for this request is invoked.
628 */
629 req->flags |= SBA_REQUEST_FENCE;
630
631 /* Fillup request message */
632 sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
633
634 /* Init async_tx descriptor */
635 req->tx.flags = flags;
636 req->tx.cookie = -EBUSY;
637
638 return &req->tx;
639}
640
641static void sba_fillup_memcpy_msg(struct sba_request *req,
642 struct brcm_sba_command *cmds,
643 struct brcm_message *msg,
644 dma_addr_t msg_offset, size_t msg_len,
645 dma_addr_t dst, dma_addr_t src)
646{
647 u64 cmd;
648 u32 c_mdata;
649 dma_addr_t resp_dma = req->tx.phys;
650 struct brcm_sba_command *cmdsp = cmds;
651
652 /* Type-B command to load data into buf0 */
653 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
654 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
655 cmd = sba_cmd_enc(cmd, msg_len,
656 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
657 c_mdata = sba_cmd_load_c_mdata(0);
658 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
659 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
660 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
661 SBA_CMD_SHIFT, SBA_CMD_MASK);
662 cmdsp->cmd = cmd;
663 *cmdsp->cmd_dma = cpu_to_le64(cmd);
664 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
665 cmdsp->data = src + msg_offset;
666 cmdsp->data_len = msg_len;
667 cmdsp++;
668
669 /* Type-A command to write buf0 */
670 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
671 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
672 cmd = sba_cmd_enc(cmd, msg_len,
673 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
674 cmd = sba_cmd_enc(cmd, 0x1,
675 SBA_RESP_SHIFT, SBA_RESP_MASK);
676 c_mdata = sba_cmd_write_c_mdata(0);
677 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
678 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
679 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
680 SBA_CMD_SHIFT, SBA_CMD_MASK);
681 cmdsp->cmd = cmd;
682 *cmdsp->cmd_dma = cpu_to_le64(cmd);
683 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
684 if (req->sba->hw_resp_size) {
685 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
686 cmdsp->resp = resp_dma;
687 cmdsp->resp_len = req->sba->hw_resp_size;
688 }
689 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
690 cmdsp->data = dst + msg_offset;
691 cmdsp->data_len = msg_len;
692 cmdsp++;
693
694 /* Fillup brcm_message */
695 msg->type = BRCM_MESSAGE_SBA;
696 msg->sba.cmds = cmds;
697 msg->sba.cmds_count = cmdsp - cmds;
698 msg->ctx = req;
699 msg->error = 0;
700}
701
702static struct sba_request *
703sba_prep_dma_memcpy_req(struct sba_device *sba,
704 dma_addr_t off, dma_addr_t dst, dma_addr_t src,
705 size_t len, unsigned long flags)
706{
707 struct sba_request *req = NULL;
708
709 /* Alloc new request */
710 req = sba_alloc_request(sba);
711 if (!req)
712 return NULL;
713 if (flags & DMA_PREP_FENCE)
714 req->flags |= SBA_REQUEST_FENCE;
715
716 /* Fillup request message */
717 sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
718 off, len, dst, src);
719
720 /* Init async_tx descriptor */
721 req->tx.flags = flags;
722 req->tx.cookie = -EBUSY;
723
724 return req;
725}
726
727static struct dma_async_tx_descriptor *
728sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
729 size_t len, unsigned long flags)
730{
731 size_t req_len;
732 dma_addr_t off = 0;
733 struct sba_device *sba = to_sba_device(dchan);
734 struct sba_request *first = NULL, *req;
735
736 /* Create chained requests where each request is upto hw_buf_size */
737 while (len) {
738 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
739
740 req = sba_prep_dma_memcpy_req(sba, off, dst, src,
741 req_len, flags);
742 if (!req) {
743 if (first)
744 sba_free_chained_requests(first);
745 return NULL;
746 }
747
748 if (first)
749 sba_chain_request(first, req);
750 else
751 first = req;
752
753 off += req_len;
754 len -= req_len;
755 }
756
757 return (first) ? &first->tx : NULL;
758}
759
760static void sba_fillup_xor_msg(struct sba_request *req,
761 struct brcm_sba_command *cmds,
762 struct brcm_message *msg,
763 dma_addr_t msg_offset, size_t msg_len,
764 dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
765{
766 u64 cmd;
767 u32 c_mdata;
768 unsigned int i;
769 dma_addr_t resp_dma = req->tx.phys;
770 struct brcm_sba_command *cmdsp = cmds;
771
772 /* Type-B command to load data into buf0 */
773 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
774 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
775 cmd = sba_cmd_enc(cmd, msg_len,
776 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
777 c_mdata = sba_cmd_load_c_mdata(0);
778 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
779 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
780 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
781 SBA_CMD_SHIFT, SBA_CMD_MASK);
782 cmdsp->cmd = cmd;
783 *cmdsp->cmd_dma = cpu_to_le64(cmd);
784 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
785 cmdsp->data = src[0] + msg_offset;
786 cmdsp->data_len = msg_len;
787 cmdsp++;
788
789 /* Type-B commands to xor data with buf0 and put it back in buf0 */
790 for (i = 1; i < src_cnt; i++) {
791 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
792 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
793 cmd = sba_cmd_enc(cmd, msg_len,
794 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
795 c_mdata = sba_cmd_xor_c_mdata(0, 0);
796 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
797 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
798 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
799 SBA_CMD_SHIFT, SBA_CMD_MASK);
800 cmdsp->cmd = cmd;
801 *cmdsp->cmd_dma = cpu_to_le64(cmd);
802 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
803 cmdsp->data = src[i] + msg_offset;
804 cmdsp->data_len = msg_len;
805 cmdsp++;
806 }
807
808 /* Type-A command to write buf0 */
809 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
810 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
811 cmd = sba_cmd_enc(cmd, msg_len,
812 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
813 cmd = sba_cmd_enc(cmd, 0x1,
814 SBA_RESP_SHIFT, SBA_RESP_MASK);
815 c_mdata = sba_cmd_write_c_mdata(0);
816 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
817 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
818 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
819 SBA_CMD_SHIFT, SBA_CMD_MASK);
820 cmdsp->cmd = cmd;
821 *cmdsp->cmd_dma = cpu_to_le64(cmd);
822 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
823 if (req->sba->hw_resp_size) {
824 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
825 cmdsp->resp = resp_dma;
826 cmdsp->resp_len = req->sba->hw_resp_size;
827 }
828 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
829 cmdsp->data = dst + msg_offset;
830 cmdsp->data_len = msg_len;
831 cmdsp++;
832
833 /* Fillup brcm_message */
834 msg->type = BRCM_MESSAGE_SBA;
835 msg->sba.cmds = cmds;
836 msg->sba.cmds_count = cmdsp - cmds;
837 msg->ctx = req;
838 msg->error = 0;
839}
840
841static struct sba_request *
842sba_prep_dma_xor_req(struct sba_device *sba,
843 dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
844 u32 src_cnt, size_t len, unsigned long flags)
845{
846 struct sba_request *req = NULL;
847
848 /* Alloc new request */
849 req = sba_alloc_request(sba);
850 if (!req)
851 return NULL;
852 if (flags & DMA_PREP_FENCE)
853 req->flags |= SBA_REQUEST_FENCE;
854
855 /* Fillup request message */
856 sba_fillup_xor_msg(req, req->cmds, &req->msg,
857 off, len, dst, src, src_cnt);
858
859 /* Init async_tx descriptor */
860 req->tx.flags = flags;
861 req->tx.cookie = -EBUSY;
862
863 return req;
864}
865
866static struct dma_async_tx_descriptor *
867sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
868 u32 src_cnt, size_t len, unsigned long flags)
869{
870 size_t req_len;
871 dma_addr_t off = 0;
872 struct sba_device *sba = to_sba_device(dchan);
873 struct sba_request *first = NULL, *req;
874
875 /* Sanity checks */
876 if (unlikely(src_cnt > sba->max_xor_srcs))
877 return NULL;
878
879 /* Create chained requests where each request is upto hw_buf_size */
880 while (len) {
881 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
882
883 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
884 req_len, flags);
885 if (!req) {
886 if (first)
887 sba_free_chained_requests(first);
888 return NULL;
889 }
890
891 if (first)
892 sba_chain_request(first, req);
893 else
894 first = req;
895
896 off += req_len;
897 len -= req_len;
898 }
899
900 return (first) ? &first->tx : NULL;
901}
902
903static void sba_fillup_pq_msg(struct sba_request *req,
904 bool pq_continue,
905 struct brcm_sba_command *cmds,
906 struct brcm_message *msg,
907 dma_addr_t msg_offset, size_t msg_len,
908 dma_addr_t *dst_p, dma_addr_t *dst_q,
909 const u8 *scf, dma_addr_t *src, u32 src_cnt)
910{
911 u64 cmd;
912 u32 c_mdata;
913 unsigned int i;
914 dma_addr_t resp_dma = req->tx.phys;
915 struct brcm_sba_command *cmdsp = cmds;
916
917 if (pq_continue) {
918 /* Type-B command to load old P into buf0 */
919 if (dst_p) {
920 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
921 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
922 cmd = sba_cmd_enc(cmd, msg_len,
923 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
924 c_mdata = sba_cmd_load_c_mdata(0);
925 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
926 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
927 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
928 SBA_CMD_SHIFT, SBA_CMD_MASK);
929 cmdsp->cmd = cmd;
930 *cmdsp->cmd_dma = cpu_to_le64(cmd);
931 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
932 cmdsp->data = *dst_p + msg_offset;
933 cmdsp->data_len = msg_len;
934 cmdsp++;
935 }
936
937 /* Type-B command to load old Q into buf1 */
938 if (dst_q) {
939 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
940 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
941 cmd = sba_cmd_enc(cmd, msg_len,
942 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
943 c_mdata = sba_cmd_load_c_mdata(1);
944 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
945 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
946 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
947 SBA_CMD_SHIFT, SBA_CMD_MASK);
948 cmdsp->cmd = cmd;
949 *cmdsp->cmd_dma = cpu_to_le64(cmd);
950 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
951 cmdsp->data = *dst_q + msg_offset;
952 cmdsp->data_len = msg_len;
953 cmdsp++;
954 }
955 } else {
956 /* Type-A command to zero all buffers */
957 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
958 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
959 cmd = sba_cmd_enc(cmd, msg_len,
960 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
961 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
962 SBA_CMD_SHIFT, SBA_CMD_MASK);
963 cmdsp->cmd = cmd;
964 *cmdsp->cmd_dma = cpu_to_le64(cmd);
965 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
966 cmdsp++;
967 }
968
969 /* Type-B commands for generate P onto buf0 and Q onto buf1 */
970 for (i = 0; i < src_cnt; i++) {
971 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
972 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
973 cmd = sba_cmd_enc(cmd, msg_len,
974 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
975 c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
976 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
977 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
978 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
979 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
980 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
981 SBA_CMD_SHIFT, SBA_CMD_MASK);
982 cmdsp->cmd = cmd;
983 *cmdsp->cmd_dma = cpu_to_le64(cmd);
984 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
985 cmdsp->data = src[i] + msg_offset;
986 cmdsp->data_len = msg_len;
987 cmdsp++;
988 }
989
990 /* Type-A command to write buf0 */
991 if (dst_p) {
992 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
993 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
994 cmd = sba_cmd_enc(cmd, msg_len,
995 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
996 cmd = sba_cmd_enc(cmd, 0x1,
997 SBA_RESP_SHIFT, SBA_RESP_MASK);
998 c_mdata = sba_cmd_write_c_mdata(0);
999 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1000 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1001 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1002 SBA_CMD_SHIFT, SBA_CMD_MASK);
1003 cmdsp->cmd = cmd;
1004 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1005 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1006 if (req->sba->hw_resp_size) {
1007 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1008 cmdsp->resp = resp_dma;
1009 cmdsp->resp_len = req->sba->hw_resp_size;
1010 }
1011 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1012 cmdsp->data = *dst_p + msg_offset;
1013 cmdsp->data_len = msg_len;
1014 cmdsp++;
1015 }
1016
1017 /* Type-A command to write buf1 */
1018 if (dst_q) {
1019 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1020 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1021 cmd = sba_cmd_enc(cmd, msg_len,
1022 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1023 cmd = sba_cmd_enc(cmd, 0x1,
1024 SBA_RESP_SHIFT, SBA_RESP_MASK);
1025 c_mdata = sba_cmd_write_c_mdata(1);
1026 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1027 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1028 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1029 SBA_CMD_SHIFT, SBA_CMD_MASK);
1030 cmdsp->cmd = cmd;
1031 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1032 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1033 if (req->sba->hw_resp_size) {
1034 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1035 cmdsp->resp = resp_dma;
1036 cmdsp->resp_len = req->sba->hw_resp_size;
1037 }
1038 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1039 cmdsp->data = *dst_q + msg_offset;
1040 cmdsp->data_len = msg_len;
1041 cmdsp++;
1042 }
1043
1044 /* Fillup brcm_message */
1045 msg->type = BRCM_MESSAGE_SBA;
1046 msg->sba.cmds = cmds;
1047 msg->sba.cmds_count = cmdsp - cmds;
1048 msg->ctx = req;
1049 msg->error = 0;
1050}
1051
1052static struct sba_request *
1053sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
1054 dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
1055 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1056{
1057 struct sba_request *req = NULL;
1058
1059 /* Alloc new request */
1060 req = sba_alloc_request(sba);
1061 if (!req)
1062 return NULL;
1063 if (flags & DMA_PREP_FENCE)
1064 req->flags |= SBA_REQUEST_FENCE;
1065
1066 /* Fillup request messages */
1067 sba_fillup_pq_msg(req, dmaf_continue(flags),
1068 req->cmds, &req->msg,
1069 off, len, dst_p, dst_q, scf, src, src_cnt);
1070
1071 /* Init async_tx descriptor */
1072 req->tx.flags = flags;
1073 req->tx.cookie = -EBUSY;
1074
1075 return req;
1076}
1077
1078static void sba_fillup_pq_single_msg(struct sba_request *req,
1079 bool pq_continue,
1080 struct brcm_sba_command *cmds,
1081 struct brcm_message *msg,
1082 dma_addr_t msg_offset, size_t msg_len,
1083 dma_addr_t *dst_p, dma_addr_t *dst_q,
1084 dma_addr_t src, u8 scf)
1085{
1086 u64 cmd;
1087 u32 c_mdata;
1088 u8 pos, dpos = raid6_gflog[scf];
1089 dma_addr_t resp_dma = req->tx.phys;
1090 struct brcm_sba_command *cmdsp = cmds;
1091
1092 if (!dst_p)
1093 goto skip_p;
1094
1095 if (pq_continue) {
1096 /* Type-B command to load old P into buf0 */
1097 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1098 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1099 cmd = sba_cmd_enc(cmd, msg_len,
1100 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1101 c_mdata = sba_cmd_load_c_mdata(0);
1102 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1103 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1104 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1105 SBA_CMD_SHIFT, SBA_CMD_MASK);
1106 cmdsp->cmd = cmd;
1107 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1108 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1109 cmdsp->data = *dst_p + msg_offset;
1110 cmdsp->data_len = msg_len;
1111 cmdsp++;
1112
1113 /*
1114 * Type-B commands to xor data with buf0 and put it
1115 * back in buf0
1116 */
1117 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1118 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1119 cmd = sba_cmd_enc(cmd, msg_len,
1120 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1121 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1122 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1123 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1124 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1125 SBA_CMD_SHIFT, SBA_CMD_MASK);
1126 cmdsp->cmd = cmd;
1127 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1128 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1129 cmdsp->data = src + msg_offset;
1130 cmdsp->data_len = msg_len;
1131 cmdsp++;
1132 } else {
1133 /* Type-B command to load old P into buf0 */
1134 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1135 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1136 cmd = sba_cmd_enc(cmd, msg_len,
1137 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1138 c_mdata = sba_cmd_load_c_mdata(0);
1139 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1140 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1141 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1142 SBA_CMD_SHIFT, SBA_CMD_MASK);
1143 cmdsp->cmd = cmd;
1144 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1145 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1146 cmdsp->data = src + msg_offset;
1147 cmdsp->data_len = msg_len;
1148 cmdsp++;
1149 }
1150
1151 /* Type-A command to write buf0 */
1152 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1153 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1154 cmd = sba_cmd_enc(cmd, msg_len,
1155 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1156 cmd = sba_cmd_enc(cmd, 0x1,
1157 SBA_RESP_SHIFT, SBA_RESP_MASK);
1158 c_mdata = sba_cmd_write_c_mdata(0);
1159 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1160 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1161 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1162 SBA_CMD_SHIFT, SBA_CMD_MASK);
1163 cmdsp->cmd = cmd;
1164 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1165 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1166 if (req->sba->hw_resp_size) {
1167 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1168 cmdsp->resp = resp_dma;
1169 cmdsp->resp_len = req->sba->hw_resp_size;
1170 }
1171 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1172 cmdsp->data = *dst_p + msg_offset;
1173 cmdsp->data_len = msg_len;
1174 cmdsp++;
1175
1176skip_p:
1177 if (!dst_q)
1178 goto skip_q;
1179
1180 /* Type-A command to zero all buffers */
1181 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1182 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1183 cmd = sba_cmd_enc(cmd, msg_len,
1184 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1185 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
1186 SBA_CMD_SHIFT, SBA_CMD_MASK);
1187 cmdsp->cmd = cmd;
1188 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1189 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1190 cmdsp++;
1191
1192 if (dpos == 255)
1193 goto skip_q_computation;
1194 pos = (dpos < req->sba->max_pq_coefs) ?
1195 dpos : (req->sba->max_pq_coefs - 1);
1196
1197 /*
1198 * Type-B command to generate initial Q from data
1199 * and store output into buf0
1200 */
1201 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1202 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1203 cmd = sba_cmd_enc(cmd, msg_len,
1204 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1205 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
1206 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1207 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1208 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1209 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1210 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1211 SBA_CMD_SHIFT, SBA_CMD_MASK);
1212 cmdsp->cmd = cmd;
1213 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1214 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1215 cmdsp->data = src + msg_offset;
1216 cmdsp->data_len = msg_len;
1217 cmdsp++;
1218
1219 dpos -= pos;
1220
1221 /* Multiple Type-A command to generate final Q */
1222 while (dpos) {
1223 pos = (dpos < req->sba->max_pq_coefs) ?
1224 dpos : (req->sba->max_pq_coefs - 1);
1225
1226 /*
1227 * Type-A command to generate Q with buf0 and
1228 * buf1 store result in buf0
1229 */
1230 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1231 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1232 cmd = sba_cmd_enc(cmd, msg_len,
1233 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1234 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
1235 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1236 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1237 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1238 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1239 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1240 SBA_CMD_SHIFT, SBA_CMD_MASK);
1241 cmdsp->cmd = cmd;
1242 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1243 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1244 cmdsp++;
1245
1246 dpos -= pos;
1247 }
1248
1249skip_q_computation:
1250 if (pq_continue) {
1251 /*
1252 * Type-B command to XOR previous output with
1253 * buf0 and write it into buf0
1254 */
1255 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1256 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1257 cmd = sba_cmd_enc(cmd, msg_len,
1258 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1259 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1260 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1261 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1262 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1263 SBA_CMD_SHIFT, SBA_CMD_MASK);
1264 cmdsp->cmd = cmd;
1265 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1266 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1267 cmdsp->data = *dst_q + msg_offset;
1268 cmdsp->data_len = msg_len;
1269 cmdsp++;
1270 }
1271
1272 /* Type-A command to write buf0 */
1273 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1274 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1275 cmd = sba_cmd_enc(cmd, msg_len,
1276 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1277 cmd = sba_cmd_enc(cmd, 0x1,
1278 SBA_RESP_SHIFT, SBA_RESP_MASK);
1279 c_mdata = sba_cmd_write_c_mdata(0);
1280 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1281 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1282 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1283 SBA_CMD_SHIFT, SBA_CMD_MASK);
1284 cmdsp->cmd = cmd;
1285 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1286 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1287 if (req->sba->hw_resp_size) {
1288 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1289 cmdsp->resp = resp_dma;
1290 cmdsp->resp_len = req->sba->hw_resp_size;
1291 }
1292 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1293 cmdsp->data = *dst_q + msg_offset;
1294 cmdsp->data_len = msg_len;
1295 cmdsp++;
1296
1297skip_q:
1298 /* Fillup brcm_message */
1299 msg->type = BRCM_MESSAGE_SBA;
1300 msg->sba.cmds = cmds;
1301 msg->sba.cmds_count = cmdsp - cmds;
1302 msg->ctx = req;
1303 msg->error = 0;
1304}
1305
1306static struct sba_request *
1307sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
1308 dma_addr_t *dst_p, dma_addr_t *dst_q,
1309 dma_addr_t src, u8 scf, size_t len,
1310 unsigned long flags)
1311{
1312 struct sba_request *req = NULL;
1313
1314 /* Alloc new request */
1315 req = sba_alloc_request(sba);
1316 if (!req)
1317 return NULL;
1318 if (flags & DMA_PREP_FENCE)
1319 req->flags |= SBA_REQUEST_FENCE;
1320
1321 /* Fillup request messages */
1322 sba_fillup_pq_single_msg(req, dmaf_continue(flags),
1323 req->cmds, &req->msg, off, len,
1324 dst_p, dst_q, src, scf);
1325
1326 /* Init async_tx descriptor */
1327 req->tx.flags = flags;
1328 req->tx.cookie = -EBUSY;
1329
1330 return req;
1331}
1332
1333static struct dma_async_tx_descriptor *
1334sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
1335 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1336{
1337 u32 i, dst_q_index;
1338 size_t req_len;
1339 bool slow = false;
1340 dma_addr_t off = 0;
1341 dma_addr_t *dst_p = NULL, *dst_q = NULL;
1342 struct sba_device *sba = to_sba_device(dchan);
1343 struct sba_request *first = NULL, *req;
1344
1345 /* Sanity checks */
1346 if (unlikely(src_cnt > sba->max_pq_srcs))
1347 return NULL;
1348 for (i = 0; i < src_cnt; i++)
1349 if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
1350 slow = true;
1351
1352 /* Figure-out P and Q destination addresses */
1353 if (!(flags & DMA_PREP_PQ_DISABLE_P))
1354 dst_p = &dst[0];
1355 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
1356 dst_q = &dst[1];
1357
1358 /* Create chained requests where each request is upto hw_buf_size */
1359 while (len) {
1360 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
1361
1362 if (slow) {
1363 dst_q_index = src_cnt;
1364
1365 if (dst_q) {
1366 for (i = 0; i < src_cnt; i++) {
1367 if (*dst_q == src[i]) {
1368 dst_q_index = i;
1369 break;
1370 }
1371 }
1372 }
1373
1374 if (dst_q_index < src_cnt) {
1375 i = dst_q_index;
1376 req = sba_prep_dma_pq_single_req(sba,
1377 off, dst_p, dst_q, src[i], scf[i],
1378 req_len, flags | DMA_PREP_FENCE);
1379 if (!req)
1380 goto fail;
1381
1382 if (first)
1383 sba_chain_request(first, req);
1384 else
1385 first = req;
1386
1387 flags |= DMA_PREP_CONTINUE;
1388 }
1389
1390 for (i = 0; i < src_cnt; i++) {
1391 if (dst_q_index == i)
1392 continue;
1393
1394 req = sba_prep_dma_pq_single_req(sba,
1395 off, dst_p, dst_q, src[i], scf[i],
1396 req_len, flags | DMA_PREP_FENCE);
1397 if (!req)
1398 goto fail;
1399
1400 if (first)
1401 sba_chain_request(first, req);
1402 else
1403 first = req;
1404
1405 flags |= DMA_PREP_CONTINUE;
1406 }
1407 } else {
1408 req = sba_prep_dma_pq_req(sba, off,
1409 dst_p, dst_q, src, src_cnt,
1410 scf, req_len, flags);
1411 if (!req)
1412 goto fail;
1413
1414 if (first)
1415 sba_chain_request(first, req);
1416 else
1417 first = req;
1418 }
1419
1420 off += req_len;
1421 len -= req_len;
1422 }
1423
1424 return (first) ? &first->tx : NULL;
1425
1426fail:
1427 if (first)
1428 sba_free_chained_requests(first);
1429 return NULL;
1430}
1431
1432/* ====== Mailbox callbacks ===== */
1433
1434static void sba_receive_message(struct mbox_client *cl, void *msg)
1435{
1436 struct brcm_message *m = msg;
1437 struct sba_request *req = m->ctx;
1438 struct sba_device *sba = req->sba;
1439
1440 /* Error count if message has error */
1441 if (m->error < 0)
1442 dev_err(sba->dev, "%s got message with error %d",
1443 dma_chan_name(&sba->dma_chan), m->error);
1444
1445 /* Process received request */
1446 sba_process_received_request(sba, req);
1447}
1448
1449/* ====== Debugfs callbacks ====== */
1450
1451static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
1452{
1453 struct sba_device *sba = dev_get_drvdata(file->private);
1454
1455 /* Write stats in file */
1456 sba_write_stats_in_seqfile(sba, file);
1457
1458 return 0;
1459}
1460
1461/* ====== Platform driver routines ===== */
1462
1463static int sba_prealloc_channel_resources(struct sba_device *sba)
1464{
1465 int i, j, ret = 0;
1466 struct sba_request *req = NULL;
1467
1468 sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
1469 sba->max_resp_pool_size,
1470 &sba->resp_dma_base, GFP_KERNEL);
1471 if (!sba->resp_base)
1472 return -ENOMEM;
1473
1474 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
1475 sba->max_cmds_pool_size,
1476 &sba->cmds_dma_base, GFP_KERNEL);
1477 if (!sba->cmds_base) {
1478 ret = -ENOMEM;
1479 goto fail_free_resp_pool;
1480 }
1481
1482 spin_lock_init(&sba->reqs_lock);
1483 sba->reqs_fence = false;
1484 INIT_LIST_HEAD(&sba->reqs_alloc_list);
1485 INIT_LIST_HEAD(&sba->reqs_pending_list);
1486 INIT_LIST_HEAD(&sba->reqs_active_list);
1487 INIT_LIST_HEAD(&sba->reqs_aborted_list);
1488 INIT_LIST_HEAD(&sba->reqs_free_list);
1489
1490 for (i = 0; i < sba->max_req; i++) {
1491 req = devm_kzalloc(sba->dev,
1492 struct_size(req, cmds, sba->max_cmd_per_req),
1493 GFP_KERNEL);
1494 if (!req) {
1495 ret = -ENOMEM;
1496 goto fail_free_cmds_pool;
1497 }
1498 INIT_LIST_HEAD(&req->node);
1499 req->sba = sba;
1500 req->flags = SBA_REQUEST_STATE_FREE;
1501 INIT_LIST_HEAD(&req->next);
1502 atomic_set(&req->next_pending_count, 0);
1503 for (j = 0; j < sba->max_cmd_per_req; j++) {
1504 req->cmds[j].cmd = 0;
1505 req->cmds[j].cmd_dma = sba->cmds_base +
1506 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1507 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1508 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1509 req->cmds[j].flags = 0;
1510 }
1511 memset(&req->msg, 0, sizeof(req->msg));
1512 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1513 async_tx_ack(&req->tx);
1514 req->tx.tx_submit = sba_tx_submit;
1515 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
1516 list_add_tail(&req->node, &sba->reqs_free_list);
1517 }
1518
1519 return 0;
1520
1521fail_free_cmds_pool:
1522 dma_free_coherent(sba->mbox_dev,
1523 sba->max_cmds_pool_size,
1524 sba->cmds_base, sba->cmds_dma_base);
1525fail_free_resp_pool:
1526 dma_free_coherent(sba->mbox_dev,
1527 sba->max_resp_pool_size,
1528 sba->resp_base, sba->resp_dma_base);
1529 return ret;
1530}
1531
1532static void sba_freeup_channel_resources(struct sba_device *sba)
1533{
1534 dmaengine_terminate_all(&sba->dma_chan);
1535 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
1536 sba->cmds_base, sba->cmds_dma_base);
1537 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
1538 sba->resp_base, sba->resp_dma_base);
1539 sba->resp_base = NULL;
1540 sba->resp_dma_base = 0;
1541}
1542
1543static int sba_async_register(struct sba_device *sba)
1544{
1545 int ret;
1546 struct dma_device *dma_dev = &sba->dma_dev;
1547
1548 /* Initialize DMA channel cookie */
1549 sba->dma_chan.device = dma_dev;
1550 dma_cookie_init(&sba->dma_chan);
1551
1552 /* Initialize DMA device capability mask */
1553 dma_cap_zero(dma_dev->cap_mask);
1554 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
1555 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1556 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1557 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1558
1559 /*
1560 * Set mailbox channel device as the base device of
1561 * our dma_device because the actual memory accesses
1562 * will be done by mailbox controller
1563 */
1564 dma_dev->dev = sba->mbox_dev;
1565
1566 /* Set base prep routines */
1567 dma_dev->device_free_chan_resources = sba_free_chan_resources;
1568 dma_dev->device_terminate_all = sba_device_terminate_all;
1569 dma_dev->device_issue_pending = sba_issue_pending;
1570 dma_dev->device_tx_status = sba_tx_status;
1571
1572 /* Set interrupt routine */
1573 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1574 dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
1575
1576 /* Set memcpy routine */
1577 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1578 dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
1579
1580 /* Set xor routine and capability */
1581 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1582 dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
1583 dma_dev->max_xor = sba->max_xor_srcs;
1584 }
1585
1586 /* Set pq routine and capability */
1587 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1588 dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
1589 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
1590 }
1591
1592 /* Initialize DMA device channel list */
1593 INIT_LIST_HEAD(&dma_dev->channels);
1594 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
1595
1596 /* Register with Linux async DMA framework*/
1597 ret = dma_async_device_register(dma_dev);
1598 if (ret) {
1599 dev_err(sba->dev, "async device register error %d", ret);
1600 return ret;
1601 }
1602
1603 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
1604 dma_chan_name(&sba->dma_chan),
1605 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
1606 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
1607 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1608 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
1609
1610 return 0;
1611}
1612
1613static int sba_probe(struct platform_device *pdev)
1614{
1615 int ret = 0;
1616 struct sba_device *sba;
1617 struct platform_device *mbox_pdev;
1618 struct of_phandle_args args;
1619
1620 /* Allocate main SBA struct */
1621 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
1622 if (!sba)
1623 return -ENOMEM;
1624
1625 sba->dev = &pdev->dev;
1626 platform_set_drvdata(pdev, sba);
1627
1628 /* Number of mailbox channels should be atleast 1 */
1629 ret = of_count_phandle_with_args(pdev->dev.of_node,
1630 "mboxes", "#mbox-cells");
1631 if (ret <= 0)
1632 return -ENODEV;
1633
1634 /* Determine SBA version from DT compatible string */
1635 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
1636 sba->ver = SBA_VER_1;
1637 else if (of_device_is_compatible(sba->dev->of_node,
1638 "brcm,iproc-sba-v2"))
1639 sba->ver = SBA_VER_2;
1640 else
1641 return -ENODEV;
1642
1643 /* Derived Configuration parameters */
1644 switch (sba->ver) {
1645 case SBA_VER_1:
1646 sba->hw_buf_size = 4096;
1647 sba->hw_resp_size = 8;
1648 sba->max_pq_coefs = 6;
1649 sba->max_pq_srcs = 6;
1650 break;
1651 case SBA_VER_2:
1652 sba->hw_buf_size = 4096;
1653 sba->hw_resp_size = 8;
1654 sba->max_pq_coefs = 30;
1655 /*
1656 * We can support max_pq_srcs == max_pq_coefs because
1657 * we are limited by number of SBA commands that we can
1658 * fit in one message for underlying ring manager HW.
1659 */
1660 sba->max_pq_srcs = 12;
1661 break;
1662 default:
1663 return -EINVAL;
1664 }
1665 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
1666 sba->max_cmd_per_req = sba->max_pq_srcs + 3;
1667 sba->max_xor_srcs = sba->max_cmd_per_req - 1;
1668 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
1669 sba->max_cmds_pool_size = sba->max_req *
1670 sba->max_cmd_per_req * sizeof(u64);
1671
1672 /* Setup mailbox client */
1673 sba->client.dev = &pdev->dev;
1674 sba->client.rx_callback = sba_receive_message;
1675 sba->client.tx_block = false;
1676 sba->client.knows_txdone = true;
1677 sba->client.tx_tout = 0;
1678
1679 /* Request mailbox channel */
1680 sba->mchan = mbox_request_channel(&sba->client, 0);
1681 if (IS_ERR(sba->mchan)) {
1682 ret = PTR_ERR(sba->mchan);
1683 goto fail_free_mchan;
1684 }
1685
1686 /* Find-out underlying mailbox device */
1687 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1688 "mboxes", "#mbox-cells", 0, &args);
1689 if (ret)
1690 goto fail_free_mchan;
1691 mbox_pdev = of_find_device_by_node(args.np);
1692 of_node_put(args.np);
1693 if (!mbox_pdev) {
1694 ret = -ENODEV;
1695 goto fail_free_mchan;
1696 }
1697 sba->mbox_dev = &mbox_pdev->dev;
1698
1699 /* Prealloc channel resource */
1700 ret = sba_prealloc_channel_resources(sba);
1701 if (ret)
1702 goto fail_free_mchan;
1703
1704 /* Check availability of debugfs */
1705 if (!debugfs_initialized())
1706 goto skip_debugfs;
1707
1708 /* Create debugfs root entry */
1709 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
1710
1711 /* Create debugfs stats entry */
1712 debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
1713 sba_debugfs_stats_show);
1714
1715skip_debugfs:
1716
1717 /* Register DMA device with Linux async framework */
1718 ret = sba_async_register(sba);
1719 if (ret)
1720 goto fail_free_resources;
1721
1722 /* Print device info */
1723 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
1724 dma_chan_name(&sba->dma_chan), sba->ver+1,
1725 dev_name(sba->mbox_dev));
1726
1727 return 0;
1728
1729fail_free_resources:
1730 debugfs_remove_recursive(sba->root);
1731 sba_freeup_channel_resources(sba);
1732fail_free_mchan:
1733 mbox_free_channel(sba->mchan);
1734 return ret;
1735}
1736
1737static void sba_remove(struct platform_device *pdev)
1738{
1739 struct sba_device *sba = platform_get_drvdata(pdev);
1740
1741 dma_async_device_unregister(&sba->dma_dev);
1742
1743 debugfs_remove_recursive(sba->root);
1744
1745 sba_freeup_channel_resources(sba);
1746
1747 mbox_free_channel(sba->mchan);
1748}
1749
1750static const struct of_device_id sba_of_match[] = {
1751 { .compatible = "brcm,iproc-sba", },
1752 { .compatible = "brcm,iproc-sba-v2", },
1753 {},
1754};
1755MODULE_DEVICE_TABLE(of, sba_of_match);
1756
1757static struct platform_driver sba_driver = {
1758 .probe = sba_probe,
1759 .remove_new = sba_remove,
1760 .driver = {
1761 .name = "bcm-sba-raid",
1762 .of_match_table = sba_of_match,
1763 },
1764};
1765module_platform_driver(sba_driver);
1766
1767MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1768MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1769MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2// Copyright (C) 2017 Broadcom
3
4/*
5 * Broadcom SBA RAID Driver
6 *
7 * The Broadcom stream buffer accelerator (SBA) provides offloading
8 * capabilities for RAID operations. The SBA offload engine is accessible
9 * via Broadcom SoC specific ring manager. Two or more offload engines
10 * can share same Broadcom SoC specific ring manager due to this Broadcom
11 * SoC specific ring manager driver is implemented as a mailbox controller
12 * driver and offload engine drivers are implemented as mallbox clients.
13 *
14 * Typically, Broadcom SoC specific ring manager will implement larger
15 * number of hardware rings over one or more SBA hardware devices. By
16 * design, the internal buffer size of SBA hardware device is limited
17 * but all offload operations supported by SBA can be broken down into
18 * multiple small size requests and executed parallely on multiple SBA
19 * hardware devices for achieving high through-put.
20 *
21 * The Broadcom SBA RAID driver does not require any register programming
22 * except submitting request to SBA hardware device via mailbox channels.
23 * This driver implements a DMA device with one DMA channel using a single
24 * mailbox channel provided by Broadcom SoC specific ring manager driver.
25 * For having more SBA DMA channels, we can create more SBA device nodes
26 * in Broadcom SoC specific DTS based on number of hardware rings supported
27 * by Broadcom SoC ring manager.
28 */
29
30#include <linux/bitops.h>
31#include <linux/debugfs.h>
32#include <linux/dma-mapping.h>
33#include <linux/dmaengine.h>
34#include <linux/list.h>
35#include <linux/mailbox_client.h>
36#include <linux/mailbox/brcm-message.h>
37#include <linux/module.h>
38#include <linux/of_device.h>
39#include <linux/slab.h>
40#include <linux/raid/pq.h>
41
42#include "dmaengine.h"
43
44/* ====== Driver macros and defines ===== */
45
46#define SBA_TYPE_SHIFT 48
47#define SBA_TYPE_MASK GENMASK(1, 0)
48#define SBA_TYPE_A 0x0
49#define SBA_TYPE_B 0x2
50#define SBA_TYPE_C 0x3
51#define SBA_USER_DEF_SHIFT 32
52#define SBA_USER_DEF_MASK GENMASK(15, 0)
53#define SBA_R_MDATA_SHIFT 24
54#define SBA_R_MDATA_MASK GENMASK(7, 0)
55#define SBA_C_MDATA_MS_SHIFT 18
56#define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
57#define SBA_INT_SHIFT 17
58#define SBA_INT_MASK BIT(0)
59#define SBA_RESP_SHIFT 16
60#define SBA_RESP_MASK BIT(0)
61#define SBA_C_MDATA_SHIFT 8
62#define SBA_C_MDATA_MASK GENMASK(7, 0)
63#define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
64#define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
65#define SBA_C_MDATA_DNUM_SHIFT 5
66#define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
67#define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
68#define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
69#define SBA_CMD_SHIFT 0
70#define SBA_CMD_MASK GENMASK(3, 0)
71#define SBA_CMD_ZERO_BUFFER 0x4
72#define SBA_CMD_ZERO_ALL_BUFFERS 0x8
73#define SBA_CMD_LOAD_BUFFER 0x9
74#define SBA_CMD_XOR 0xa
75#define SBA_CMD_GALOIS_XOR 0xb
76#define SBA_CMD_WRITE_BUFFER 0xc
77#define SBA_CMD_GALOIS 0xe
78
79#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
80#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
81
82/* Driver helper macros */
83#define to_sba_request(tx) \
84 container_of(tx, struct sba_request, tx)
85#define to_sba_device(dchan) \
86 container_of(dchan, struct sba_device, dma_chan)
87
88/* ===== Driver data structures ===== */
89
90enum sba_request_flags {
91 SBA_REQUEST_STATE_FREE = 0x001,
92 SBA_REQUEST_STATE_ALLOCED = 0x002,
93 SBA_REQUEST_STATE_PENDING = 0x004,
94 SBA_REQUEST_STATE_ACTIVE = 0x008,
95 SBA_REQUEST_STATE_ABORTED = 0x010,
96 SBA_REQUEST_STATE_MASK = 0x0ff,
97 SBA_REQUEST_FENCE = 0x100,
98};
99
100struct sba_request {
101 /* Global state */
102 struct list_head node;
103 struct sba_device *sba;
104 u32 flags;
105 /* Chained requests management */
106 struct sba_request *first;
107 struct list_head next;
108 atomic_t next_pending_count;
109 /* BRCM message data */
110 struct brcm_message msg;
111 struct dma_async_tx_descriptor tx;
112 /* SBA commands */
113 struct brcm_sba_command cmds[];
114};
115
116enum sba_version {
117 SBA_VER_1 = 0,
118 SBA_VER_2
119};
120
121struct sba_device {
122 /* Underlying device */
123 struct device *dev;
124 /* DT configuration parameters */
125 enum sba_version ver;
126 /* Derived configuration parameters */
127 u32 max_req;
128 u32 hw_buf_size;
129 u32 hw_resp_size;
130 u32 max_pq_coefs;
131 u32 max_pq_srcs;
132 u32 max_cmd_per_req;
133 u32 max_xor_srcs;
134 u32 max_resp_pool_size;
135 u32 max_cmds_pool_size;
136 /* Maibox client and Mailbox channels */
137 struct mbox_client client;
138 struct mbox_chan *mchan;
139 struct device *mbox_dev;
140 /* DMA device and DMA channel */
141 struct dma_device dma_dev;
142 struct dma_chan dma_chan;
143 /* DMA channel resources */
144 void *resp_base;
145 dma_addr_t resp_dma_base;
146 void *cmds_base;
147 dma_addr_t cmds_dma_base;
148 spinlock_t reqs_lock;
149 bool reqs_fence;
150 struct list_head reqs_alloc_list;
151 struct list_head reqs_pending_list;
152 struct list_head reqs_active_list;
153 struct list_head reqs_aborted_list;
154 struct list_head reqs_free_list;
155 /* DebugFS directory entries */
156 struct dentry *root;
157};
158
159/* ====== Command helper routines ===== */
160
161static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
162{
163 cmd &= ~((u64)mask << shift);
164 cmd |= ((u64)(val & mask) << shift);
165 return cmd;
166}
167
168static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
169{
170 return b0 & SBA_C_MDATA_BNUMx_MASK;
171}
172
173static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
174{
175 return b0 & SBA_C_MDATA_BNUMx_MASK;
176}
177
178static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
179{
180 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
181 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
182}
183
184static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
185{
186 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
187 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
188 ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
189}
190
191/* ====== General helper routines ===== */
192
193static struct sba_request *sba_alloc_request(struct sba_device *sba)
194{
195 bool found = false;
196 unsigned long flags;
197 struct sba_request *req = NULL;
198
199 spin_lock_irqsave(&sba->reqs_lock, flags);
200 list_for_each_entry(req, &sba->reqs_free_list, node) {
201 if (async_tx_test_ack(&req->tx)) {
202 list_move_tail(&req->node, &sba->reqs_alloc_list);
203 found = true;
204 break;
205 }
206 }
207 spin_unlock_irqrestore(&sba->reqs_lock, flags);
208
209 if (!found) {
210 /*
211 * We have no more free requests so, we peek
212 * mailbox channels hoping few active requests
213 * would have completed which will create more
214 * room for new requests.
215 */
216 mbox_client_peek_data(sba->mchan);
217 return NULL;
218 }
219
220 req->flags = SBA_REQUEST_STATE_ALLOCED;
221 req->first = req;
222 INIT_LIST_HEAD(&req->next);
223 atomic_set(&req->next_pending_count, 1);
224
225 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
226 async_tx_ack(&req->tx);
227
228 return req;
229}
230
231/* Note: Must be called with sba->reqs_lock held */
232static void _sba_pending_request(struct sba_device *sba,
233 struct sba_request *req)
234{
235 lockdep_assert_held(&sba->reqs_lock);
236 req->flags &= ~SBA_REQUEST_STATE_MASK;
237 req->flags |= SBA_REQUEST_STATE_PENDING;
238 list_move_tail(&req->node, &sba->reqs_pending_list);
239 if (list_empty(&sba->reqs_active_list))
240 sba->reqs_fence = false;
241}
242
243/* Note: Must be called with sba->reqs_lock held */
244static bool _sba_active_request(struct sba_device *sba,
245 struct sba_request *req)
246{
247 lockdep_assert_held(&sba->reqs_lock);
248 if (list_empty(&sba->reqs_active_list))
249 sba->reqs_fence = false;
250 if (sba->reqs_fence)
251 return false;
252 req->flags &= ~SBA_REQUEST_STATE_MASK;
253 req->flags |= SBA_REQUEST_STATE_ACTIVE;
254 list_move_tail(&req->node, &sba->reqs_active_list);
255 if (req->flags & SBA_REQUEST_FENCE)
256 sba->reqs_fence = true;
257 return true;
258}
259
260/* Note: Must be called with sba->reqs_lock held */
261static void _sba_abort_request(struct sba_device *sba,
262 struct sba_request *req)
263{
264 lockdep_assert_held(&sba->reqs_lock);
265 req->flags &= ~SBA_REQUEST_STATE_MASK;
266 req->flags |= SBA_REQUEST_STATE_ABORTED;
267 list_move_tail(&req->node, &sba->reqs_aborted_list);
268 if (list_empty(&sba->reqs_active_list))
269 sba->reqs_fence = false;
270}
271
272/* Note: Must be called with sba->reqs_lock held */
273static void _sba_free_request(struct sba_device *sba,
274 struct sba_request *req)
275{
276 lockdep_assert_held(&sba->reqs_lock);
277 req->flags &= ~SBA_REQUEST_STATE_MASK;
278 req->flags |= SBA_REQUEST_STATE_FREE;
279 list_move_tail(&req->node, &sba->reqs_free_list);
280 if (list_empty(&sba->reqs_active_list))
281 sba->reqs_fence = false;
282}
283
284static void sba_free_chained_requests(struct sba_request *req)
285{
286 unsigned long flags;
287 struct sba_request *nreq;
288 struct sba_device *sba = req->sba;
289
290 spin_lock_irqsave(&sba->reqs_lock, flags);
291
292 _sba_free_request(sba, req);
293 list_for_each_entry(nreq, &req->next, next)
294 _sba_free_request(sba, nreq);
295
296 spin_unlock_irqrestore(&sba->reqs_lock, flags);
297}
298
299static void sba_chain_request(struct sba_request *first,
300 struct sba_request *req)
301{
302 unsigned long flags;
303 struct sba_device *sba = req->sba;
304
305 spin_lock_irqsave(&sba->reqs_lock, flags);
306
307 list_add_tail(&req->next, &first->next);
308 req->first = first;
309 atomic_inc(&first->next_pending_count);
310
311 spin_unlock_irqrestore(&sba->reqs_lock, flags);
312}
313
314static void sba_cleanup_nonpending_requests(struct sba_device *sba)
315{
316 unsigned long flags;
317 struct sba_request *req, *req1;
318
319 spin_lock_irqsave(&sba->reqs_lock, flags);
320
321 /* Freeup all alloced request */
322 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
323 _sba_free_request(sba, req);
324
325 /* Set all active requests as aborted */
326 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
327 _sba_abort_request(sba, req);
328
329 /*
330 * Note: We expect that aborted request will be eventually
331 * freed by sba_receive_message()
332 */
333
334 spin_unlock_irqrestore(&sba->reqs_lock, flags);
335}
336
337static void sba_cleanup_pending_requests(struct sba_device *sba)
338{
339 unsigned long flags;
340 struct sba_request *req, *req1;
341
342 spin_lock_irqsave(&sba->reqs_lock, flags);
343
344 /* Freeup all pending request */
345 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
346 _sba_free_request(sba, req);
347
348 spin_unlock_irqrestore(&sba->reqs_lock, flags);
349}
350
351static int sba_send_mbox_request(struct sba_device *sba,
352 struct sba_request *req)
353{
354 int ret = 0;
355
356 /* Send message for the request */
357 req->msg.error = 0;
358 ret = mbox_send_message(sba->mchan, &req->msg);
359 if (ret < 0) {
360 dev_err(sba->dev, "send message failed with error %d", ret);
361 return ret;
362 }
363
364 /* Check error returned by mailbox controller */
365 ret = req->msg.error;
366 if (ret < 0) {
367 dev_err(sba->dev, "message error %d", ret);
368 }
369
370 /* Signal txdone for mailbox channel */
371 mbox_client_txdone(sba->mchan, ret);
372
373 return ret;
374}
375
376/* Note: Must be called with sba->reqs_lock held */
377static void _sba_process_pending_requests(struct sba_device *sba)
378{
379 int ret;
380 u32 count;
381 struct sba_request *req;
382
383 /* Process few pending requests */
384 count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
385 while (!list_empty(&sba->reqs_pending_list) && count) {
386 /* Get the first pending request */
387 req = list_first_entry(&sba->reqs_pending_list,
388 struct sba_request, node);
389
390 /* Try to make request active */
391 if (!_sba_active_request(sba, req))
392 break;
393
394 /* Send request to mailbox channel */
395 ret = sba_send_mbox_request(sba, req);
396 if (ret < 0) {
397 _sba_pending_request(sba, req);
398 break;
399 }
400
401 count--;
402 }
403}
404
405static void sba_process_received_request(struct sba_device *sba,
406 struct sba_request *req)
407{
408 unsigned long flags;
409 struct dma_async_tx_descriptor *tx;
410 struct sba_request *nreq, *first = req->first;
411
412 /* Process only after all chained requests are received */
413 if (!atomic_dec_return(&first->next_pending_count)) {
414 tx = &first->tx;
415
416 WARN_ON(tx->cookie < 0);
417 if (tx->cookie > 0) {
418 spin_lock_irqsave(&sba->reqs_lock, flags);
419 dma_cookie_complete(tx);
420 spin_unlock_irqrestore(&sba->reqs_lock, flags);
421 dmaengine_desc_get_callback_invoke(tx, NULL);
422 dma_descriptor_unmap(tx);
423 tx->callback = NULL;
424 tx->callback_result = NULL;
425 }
426
427 dma_run_dependencies(tx);
428
429 spin_lock_irqsave(&sba->reqs_lock, flags);
430
431 /* Free all requests chained to first request */
432 list_for_each_entry(nreq, &first->next, next)
433 _sba_free_request(sba, nreq);
434 INIT_LIST_HEAD(&first->next);
435
436 /* Free the first request */
437 _sba_free_request(sba, first);
438
439 /* Process pending requests */
440 _sba_process_pending_requests(sba);
441
442 spin_unlock_irqrestore(&sba->reqs_lock, flags);
443 }
444}
445
446static void sba_write_stats_in_seqfile(struct sba_device *sba,
447 struct seq_file *file)
448{
449 unsigned long flags;
450 struct sba_request *req;
451 u32 free_count = 0, alloced_count = 0;
452 u32 pending_count = 0, active_count = 0, aborted_count = 0;
453
454 spin_lock_irqsave(&sba->reqs_lock, flags);
455
456 list_for_each_entry(req, &sba->reqs_free_list, node)
457 if (async_tx_test_ack(&req->tx))
458 free_count++;
459
460 list_for_each_entry(req, &sba->reqs_alloc_list, node)
461 alloced_count++;
462
463 list_for_each_entry(req, &sba->reqs_pending_list, node)
464 pending_count++;
465
466 list_for_each_entry(req, &sba->reqs_active_list, node)
467 active_count++;
468
469 list_for_each_entry(req, &sba->reqs_aborted_list, node)
470 aborted_count++;
471
472 spin_unlock_irqrestore(&sba->reqs_lock, flags);
473
474 seq_printf(file, "maximum requests = %d\n", sba->max_req);
475 seq_printf(file, "free requests = %d\n", free_count);
476 seq_printf(file, "alloced requests = %d\n", alloced_count);
477 seq_printf(file, "pending requests = %d\n", pending_count);
478 seq_printf(file, "active requests = %d\n", active_count);
479 seq_printf(file, "aborted requests = %d\n", aborted_count);
480}
481
482/* ====== DMAENGINE callbacks ===== */
483
484static void sba_free_chan_resources(struct dma_chan *dchan)
485{
486 /*
487 * Channel resources are pre-alloced so we just free-up
488 * whatever we can so that we can re-use pre-alloced
489 * channel resources next time.
490 */
491 sba_cleanup_nonpending_requests(to_sba_device(dchan));
492}
493
494static int sba_device_terminate_all(struct dma_chan *dchan)
495{
496 /* Cleanup all pending requests */
497 sba_cleanup_pending_requests(to_sba_device(dchan));
498
499 return 0;
500}
501
502static void sba_issue_pending(struct dma_chan *dchan)
503{
504 unsigned long flags;
505 struct sba_device *sba = to_sba_device(dchan);
506
507 /* Process pending requests */
508 spin_lock_irqsave(&sba->reqs_lock, flags);
509 _sba_process_pending_requests(sba);
510 spin_unlock_irqrestore(&sba->reqs_lock, flags);
511}
512
513static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
514{
515 unsigned long flags;
516 dma_cookie_t cookie;
517 struct sba_device *sba;
518 struct sba_request *req, *nreq;
519
520 if (unlikely(!tx))
521 return -EINVAL;
522
523 sba = to_sba_device(tx->chan);
524 req = to_sba_request(tx);
525
526 /* Assign cookie and mark all chained requests pending */
527 spin_lock_irqsave(&sba->reqs_lock, flags);
528 cookie = dma_cookie_assign(tx);
529 _sba_pending_request(sba, req);
530 list_for_each_entry(nreq, &req->next, next)
531 _sba_pending_request(sba, nreq);
532 spin_unlock_irqrestore(&sba->reqs_lock, flags);
533
534 return cookie;
535}
536
537static enum dma_status sba_tx_status(struct dma_chan *dchan,
538 dma_cookie_t cookie,
539 struct dma_tx_state *txstate)
540{
541 enum dma_status ret;
542 struct sba_device *sba = to_sba_device(dchan);
543
544 ret = dma_cookie_status(dchan, cookie, txstate);
545 if (ret == DMA_COMPLETE)
546 return ret;
547
548 mbox_client_peek_data(sba->mchan);
549
550 return dma_cookie_status(dchan, cookie, txstate);
551}
552
553static void sba_fillup_interrupt_msg(struct sba_request *req,
554 struct brcm_sba_command *cmds,
555 struct brcm_message *msg)
556{
557 u64 cmd;
558 u32 c_mdata;
559 dma_addr_t resp_dma = req->tx.phys;
560 struct brcm_sba_command *cmdsp = cmds;
561
562 /* Type-B command to load dummy data into buf0 */
563 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
564 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
565 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
566 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
567 c_mdata = sba_cmd_load_c_mdata(0);
568 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
569 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
570 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
571 SBA_CMD_SHIFT, SBA_CMD_MASK);
572 cmdsp->cmd = cmd;
573 *cmdsp->cmd_dma = cpu_to_le64(cmd);
574 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
575 cmdsp->data = resp_dma;
576 cmdsp->data_len = req->sba->hw_resp_size;
577 cmdsp++;
578
579 /* Type-A command to write buf0 to dummy location */
580 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
581 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
582 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
583 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
584 cmd = sba_cmd_enc(cmd, 0x1,
585 SBA_RESP_SHIFT, SBA_RESP_MASK);
586 c_mdata = sba_cmd_write_c_mdata(0);
587 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
588 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
589 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
590 SBA_CMD_SHIFT, SBA_CMD_MASK);
591 cmdsp->cmd = cmd;
592 *cmdsp->cmd_dma = cpu_to_le64(cmd);
593 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
594 if (req->sba->hw_resp_size) {
595 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
596 cmdsp->resp = resp_dma;
597 cmdsp->resp_len = req->sba->hw_resp_size;
598 }
599 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
600 cmdsp->data = resp_dma;
601 cmdsp->data_len = req->sba->hw_resp_size;
602 cmdsp++;
603
604 /* Fillup brcm_message */
605 msg->type = BRCM_MESSAGE_SBA;
606 msg->sba.cmds = cmds;
607 msg->sba.cmds_count = cmdsp - cmds;
608 msg->ctx = req;
609 msg->error = 0;
610}
611
612static struct dma_async_tx_descriptor *
613sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
614{
615 struct sba_request *req = NULL;
616 struct sba_device *sba = to_sba_device(dchan);
617
618 /* Alloc new request */
619 req = sba_alloc_request(sba);
620 if (!req)
621 return NULL;
622
623 /*
624 * Force fence so that no requests are submitted
625 * until DMA callback for this request is invoked.
626 */
627 req->flags |= SBA_REQUEST_FENCE;
628
629 /* Fillup request message */
630 sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
631
632 /* Init async_tx descriptor */
633 req->tx.flags = flags;
634 req->tx.cookie = -EBUSY;
635
636 return &req->tx;
637}
638
639static void sba_fillup_memcpy_msg(struct sba_request *req,
640 struct brcm_sba_command *cmds,
641 struct brcm_message *msg,
642 dma_addr_t msg_offset, size_t msg_len,
643 dma_addr_t dst, dma_addr_t src)
644{
645 u64 cmd;
646 u32 c_mdata;
647 dma_addr_t resp_dma = req->tx.phys;
648 struct brcm_sba_command *cmdsp = cmds;
649
650 /* Type-B command to load data into buf0 */
651 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
652 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
653 cmd = sba_cmd_enc(cmd, msg_len,
654 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
655 c_mdata = sba_cmd_load_c_mdata(0);
656 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
657 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
658 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
659 SBA_CMD_SHIFT, SBA_CMD_MASK);
660 cmdsp->cmd = cmd;
661 *cmdsp->cmd_dma = cpu_to_le64(cmd);
662 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
663 cmdsp->data = src + msg_offset;
664 cmdsp->data_len = msg_len;
665 cmdsp++;
666
667 /* Type-A command to write buf0 */
668 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
669 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
670 cmd = sba_cmd_enc(cmd, msg_len,
671 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
672 cmd = sba_cmd_enc(cmd, 0x1,
673 SBA_RESP_SHIFT, SBA_RESP_MASK);
674 c_mdata = sba_cmd_write_c_mdata(0);
675 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
676 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
677 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
678 SBA_CMD_SHIFT, SBA_CMD_MASK);
679 cmdsp->cmd = cmd;
680 *cmdsp->cmd_dma = cpu_to_le64(cmd);
681 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
682 if (req->sba->hw_resp_size) {
683 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
684 cmdsp->resp = resp_dma;
685 cmdsp->resp_len = req->sba->hw_resp_size;
686 }
687 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
688 cmdsp->data = dst + msg_offset;
689 cmdsp->data_len = msg_len;
690 cmdsp++;
691
692 /* Fillup brcm_message */
693 msg->type = BRCM_MESSAGE_SBA;
694 msg->sba.cmds = cmds;
695 msg->sba.cmds_count = cmdsp - cmds;
696 msg->ctx = req;
697 msg->error = 0;
698}
699
700static struct sba_request *
701sba_prep_dma_memcpy_req(struct sba_device *sba,
702 dma_addr_t off, dma_addr_t dst, dma_addr_t src,
703 size_t len, unsigned long flags)
704{
705 struct sba_request *req = NULL;
706
707 /* Alloc new request */
708 req = sba_alloc_request(sba);
709 if (!req)
710 return NULL;
711 if (flags & DMA_PREP_FENCE)
712 req->flags |= SBA_REQUEST_FENCE;
713
714 /* Fillup request message */
715 sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
716 off, len, dst, src);
717
718 /* Init async_tx descriptor */
719 req->tx.flags = flags;
720 req->tx.cookie = -EBUSY;
721
722 return req;
723}
724
725static struct dma_async_tx_descriptor *
726sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
727 size_t len, unsigned long flags)
728{
729 size_t req_len;
730 dma_addr_t off = 0;
731 struct sba_device *sba = to_sba_device(dchan);
732 struct sba_request *first = NULL, *req;
733
734 /* Create chained requests where each request is upto hw_buf_size */
735 while (len) {
736 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
737
738 req = sba_prep_dma_memcpy_req(sba, off, dst, src,
739 req_len, flags);
740 if (!req) {
741 if (first)
742 sba_free_chained_requests(first);
743 return NULL;
744 }
745
746 if (first)
747 sba_chain_request(first, req);
748 else
749 first = req;
750
751 off += req_len;
752 len -= req_len;
753 }
754
755 return (first) ? &first->tx : NULL;
756}
757
758static void sba_fillup_xor_msg(struct sba_request *req,
759 struct brcm_sba_command *cmds,
760 struct brcm_message *msg,
761 dma_addr_t msg_offset, size_t msg_len,
762 dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
763{
764 u64 cmd;
765 u32 c_mdata;
766 unsigned int i;
767 dma_addr_t resp_dma = req->tx.phys;
768 struct brcm_sba_command *cmdsp = cmds;
769
770 /* Type-B command to load data into buf0 */
771 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
772 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
773 cmd = sba_cmd_enc(cmd, msg_len,
774 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
775 c_mdata = sba_cmd_load_c_mdata(0);
776 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
777 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
778 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
779 SBA_CMD_SHIFT, SBA_CMD_MASK);
780 cmdsp->cmd = cmd;
781 *cmdsp->cmd_dma = cpu_to_le64(cmd);
782 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
783 cmdsp->data = src[0] + msg_offset;
784 cmdsp->data_len = msg_len;
785 cmdsp++;
786
787 /* Type-B commands to xor data with buf0 and put it back in buf0 */
788 for (i = 1; i < src_cnt; i++) {
789 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
790 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
791 cmd = sba_cmd_enc(cmd, msg_len,
792 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
793 c_mdata = sba_cmd_xor_c_mdata(0, 0);
794 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
795 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
796 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
797 SBA_CMD_SHIFT, SBA_CMD_MASK);
798 cmdsp->cmd = cmd;
799 *cmdsp->cmd_dma = cpu_to_le64(cmd);
800 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
801 cmdsp->data = src[i] + msg_offset;
802 cmdsp->data_len = msg_len;
803 cmdsp++;
804 }
805
806 /* Type-A command to write buf0 */
807 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
808 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
809 cmd = sba_cmd_enc(cmd, msg_len,
810 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
811 cmd = sba_cmd_enc(cmd, 0x1,
812 SBA_RESP_SHIFT, SBA_RESP_MASK);
813 c_mdata = sba_cmd_write_c_mdata(0);
814 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
815 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
816 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
817 SBA_CMD_SHIFT, SBA_CMD_MASK);
818 cmdsp->cmd = cmd;
819 *cmdsp->cmd_dma = cpu_to_le64(cmd);
820 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
821 if (req->sba->hw_resp_size) {
822 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
823 cmdsp->resp = resp_dma;
824 cmdsp->resp_len = req->sba->hw_resp_size;
825 }
826 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
827 cmdsp->data = dst + msg_offset;
828 cmdsp->data_len = msg_len;
829 cmdsp++;
830
831 /* Fillup brcm_message */
832 msg->type = BRCM_MESSAGE_SBA;
833 msg->sba.cmds = cmds;
834 msg->sba.cmds_count = cmdsp - cmds;
835 msg->ctx = req;
836 msg->error = 0;
837}
838
839static struct sba_request *
840sba_prep_dma_xor_req(struct sba_device *sba,
841 dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
842 u32 src_cnt, size_t len, unsigned long flags)
843{
844 struct sba_request *req = NULL;
845
846 /* Alloc new request */
847 req = sba_alloc_request(sba);
848 if (!req)
849 return NULL;
850 if (flags & DMA_PREP_FENCE)
851 req->flags |= SBA_REQUEST_FENCE;
852
853 /* Fillup request message */
854 sba_fillup_xor_msg(req, req->cmds, &req->msg,
855 off, len, dst, src, src_cnt);
856
857 /* Init async_tx descriptor */
858 req->tx.flags = flags;
859 req->tx.cookie = -EBUSY;
860
861 return req;
862}
863
864static struct dma_async_tx_descriptor *
865sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
866 u32 src_cnt, size_t len, unsigned long flags)
867{
868 size_t req_len;
869 dma_addr_t off = 0;
870 struct sba_device *sba = to_sba_device(dchan);
871 struct sba_request *first = NULL, *req;
872
873 /* Sanity checks */
874 if (unlikely(src_cnt > sba->max_xor_srcs))
875 return NULL;
876
877 /* Create chained requests where each request is upto hw_buf_size */
878 while (len) {
879 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
880
881 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
882 req_len, flags);
883 if (!req) {
884 if (first)
885 sba_free_chained_requests(first);
886 return NULL;
887 }
888
889 if (first)
890 sba_chain_request(first, req);
891 else
892 first = req;
893
894 off += req_len;
895 len -= req_len;
896 }
897
898 return (first) ? &first->tx : NULL;
899}
900
901static void sba_fillup_pq_msg(struct sba_request *req,
902 bool pq_continue,
903 struct brcm_sba_command *cmds,
904 struct brcm_message *msg,
905 dma_addr_t msg_offset, size_t msg_len,
906 dma_addr_t *dst_p, dma_addr_t *dst_q,
907 const u8 *scf, dma_addr_t *src, u32 src_cnt)
908{
909 u64 cmd;
910 u32 c_mdata;
911 unsigned int i;
912 dma_addr_t resp_dma = req->tx.phys;
913 struct brcm_sba_command *cmdsp = cmds;
914
915 if (pq_continue) {
916 /* Type-B command to load old P into buf0 */
917 if (dst_p) {
918 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
919 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
920 cmd = sba_cmd_enc(cmd, msg_len,
921 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
922 c_mdata = sba_cmd_load_c_mdata(0);
923 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
924 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
925 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
926 SBA_CMD_SHIFT, SBA_CMD_MASK);
927 cmdsp->cmd = cmd;
928 *cmdsp->cmd_dma = cpu_to_le64(cmd);
929 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
930 cmdsp->data = *dst_p + msg_offset;
931 cmdsp->data_len = msg_len;
932 cmdsp++;
933 }
934
935 /* Type-B command to load old Q into buf1 */
936 if (dst_q) {
937 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
938 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
939 cmd = sba_cmd_enc(cmd, msg_len,
940 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
941 c_mdata = sba_cmd_load_c_mdata(1);
942 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
943 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
944 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
945 SBA_CMD_SHIFT, SBA_CMD_MASK);
946 cmdsp->cmd = cmd;
947 *cmdsp->cmd_dma = cpu_to_le64(cmd);
948 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
949 cmdsp->data = *dst_q + msg_offset;
950 cmdsp->data_len = msg_len;
951 cmdsp++;
952 }
953 } else {
954 /* Type-A command to zero all buffers */
955 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
956 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
957 cmd = sba_cmd_enc(cmd, msg_len,
958 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
959 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
960 SBA_CMD_SHIFT, SBA_CMD_MASK);
961 cmdsp->cmd = cmd;
962 *cmdsp->cmd_dma = cpu_to_le64(cmd);
963 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
964 cmdsp++;
965 }
966
967 /* Type-B commands for generate P onto buf0 and Q onto buf1 */
968 for (i = 0; i < src_cnt; i++) {
969 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
970 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
971 cmd = sba_cmd_enc(cmd, msg_len,
972 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
973 c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
974 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
975 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
976 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
977 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
978 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
979 SBA_CMD_SHIFT, SBA_CMD_MASK);
980 cmdsp->cmd = cmd;
981 *cmdsp->cmd_dma = cpu_to_le64(cmd);
982 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
983 cmdsp->data = src[i] + msg_offset;
984 cmdsp->data_len = msg_len;
985 cmdsp++;
986 }
987
988 /* Type-A command to write buf0 */
989 if (dst_p) {
990 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
991 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
992 cmd = sba_cmd_enc(cmd, msg_len,
993 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
994 cmd = sba_cmd_enc(cmd, 0x1,
995 SBA_RESP_SHIFT, SBA_RESP_MASK);
996 c_mdata = sba_cmd_write_c_mdata(0);
997 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
998 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
999 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1000 SBA_CMD_SHIFT, SBA_CMD_MASK);
1001 cmdsp->cmd = cmd;
1002 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1003 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1004 if (req->sba->hw_resp_size) {
1005 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1006 cmdsp->resp = resp_dma;
1007 cmdsp->resp_len = req->sba->hw_resp_size;
1008 }
1009 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1010 cmdsp->data = *dst_p + msg_offset;
1011 cmdsp->data_len = msg_len;
1012 cmdsp++;
1013 }
1014
1015 /* Type-A command to write buf1 */
1016 if (dst_q) {
1017 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1018 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1019 cmd = sba_cmd_enc(cmd, msg_len,
1020 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1021 cmd = sba_cmd_enc(cmd, 0x1,
1022 SBA_RESP_SHIFT, SBA_RESP_MASK);
1023 c_mdata = sba_cmd_write_c_mdata(1);
1024 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1025 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1026 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1027 SBA_CMD_SHIFT, SBA_CMD_MASK);
1028 cmdsp->cmd = cmd;
1029 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1030 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1031 if (req->sba->hw_resp_size) {
1032 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1033 cmdsp->resp = resp_dma;
1034 cmdsp->resp_len = req->sba->hw_resp_size;
1035 }
1036 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1037 cmdsp->data = *dst_q + msg_offset;
1038 cmdsp->data_len = msg_len;
1039 cmdsp++;
1040 }
1041
1042 /* Fillup brcm_message */
1043 msg->type = BRCM_MESSAGE_SBA;
1044 msg->sba.cmds = cmds;
1045 msg->sba.cmds_count = cmdsp - cmds;
1046 msg->ctx = req;
1047 msg->error = 0;
1048}
1049
1050static struct sba_request *
1051sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
1052 dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
1053 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1054{
1055 struct sba_request *req = NULL;
1056
1057 /* Alloc new request */
1058 req = sba_alloc_request(sba);
1059 if (!req)
1060 return NULL;
1061 if (flags & DMA_PREP_FENCE)
1062 req->flags |= SBA_REQUEST_FENCE;
1063
1064 /* Fillup request messages */
1065 sba_fillup_pq_msg(req, dmaf_continue(flags),
1066 req->cmds, &req->msg,
1067 off, len, dst_p, dst_q, scf, src, src_cnt);
1068
1069 /* Init async_tx descriptor */
1070 req->tx.flags = flags;
1071 req->tx.cookie = -EBUSY;
1072
1073 return req;
1074}
1075
1076static void sba_fillup_pq_single_msg(struct sba_request *req,
1077 bool pq_continue,
1078 struct brcm_sba_command *cmds,
1079 struct brcm_message *msg,
1080 dma_addr_t msg_offset, size_t msg_len,
1081 dma_addr_t *dst_p, dma_addr_t *dst_q,
1082 dma_addr_t src, u8 scf)
1083{
1084 u64 cmd;
1085 u32 c_mdata;
1086 u8 pos, dpos = raid6_gflog[scf];
1087 dma_addr_t resp_dma = req->tx.phys;
1088 struct brcm_sba_command *cmdsp = cmds;
1089
1090 if (!dst_p)
1091 goto skip_p;
1092
1093 if (pq_continue) {
1094 /* Type-B command to load old P into buf0 */
1095 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1096 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1097 cmd = sba_cmd_enc(cmd, msg_len,
1098 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1099 c_mdata = sba_cmd_load_c_mdata(0);
1100 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1101 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1102 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1103 SBA_CMD_SHIFT, SBA_CMD_MASK);
1104 cmdsp->cmd = cmd;
1105 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1106 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1107 cmdsp->data = *dst_p + msg_offset;
1108 cmdsp->data_len = msg_len;
1109 cmdsp++;
1110
1111 /*
1112 * Type-B commands to xor data with buf0 and put it
1113 * back in buf0
1114 */
1115 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1116 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1117 cmd = sba_cmd_enc(cmd, msg_len,
1118 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1119 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1120 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1121 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1122 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1123 SBA_CMD_SHIFT, SBA_CMD_MASK);
1124 cmdsp->cmd = cmd;
1125 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1126 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1127 cmdsp->data = src + msg_offset;
1128 cmdsp->data_len = msg_len;
1129 cmdsp++;
1130 } else {
1131 /* Type-B command to load old P into buf0 */
1132 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1133 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1134 cmd = sba_cmd_enc(cmd, msg_len,
1135 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1136 c_mdata = sba_cmd_load_c_mdata(0);
1137 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1138 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1139 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1140 SBA_CMD_SHIFT, SBA_CMD_MASK);
1141 cmdsp->cmd = cmd;
1142 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1143 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1144 cmdsp->data = src + msg_offset;
1145 cmdsp->data_len = msg_len;
1146 cmdsp++;
1147 }
1148
1149 /* Type-A command to write buf0 */
1150 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1151 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1152 cmd = sba_cmd_enc(cmd, msg_len,
1153 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1154 cmd = sba_cmd_enc(cmd, 0x1,
1155 SBA_RESP_SHIFT, SBA_RESP_MASK);
1156 c_mdata = sba_cmd_write_c_mdata(0);
1157 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1158 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1159 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1160 SBA_CMD_SHIFT, SBA_CMD_MASK);
1161 cmdsp->cmd = cmd;
1162 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1163 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1164 if (req->sba->hw_resp_size) {
1165 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1166 cmdsp->resp = resp_dma;
1167 cmdsp->resp_len = req->sba->hw_resp_size;
1168 }
1169 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1170 cmdsp->data = *dst_p + msg_offset;
1171 cmdsp->data_len = msg_len;
1172 cmdsp++;
1173
1174skip_p:
1175 if (!dst_q)
1176 goto skip_q;
1177
1178 /* Type-A command to zero all buffers */
1179 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1180 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1181 cmd = sba_cmd_enc(cmd, msg_len,
1182 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1183 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
1184 SBA_CMD_SHIFT, SBA_CMD_MASK);
1185 cmdsp->cmd = cmd;
1186 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1187 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1188 cmdsp++;
1189
1190 if (dpos == 255)
1191 goto skip_q_computation;
1192 pos = (dpos < req->sba->max_pq_coefs) ?
1193 dpos : (req->sba->max_pq_coefs - 1);
1194
1195 /*
1196 * Type-B command to generate initial Q from data
1197 * and store output into buf0
1198 */
1199 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1200 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1201 cmd = sba_cmd_enc(cmd, msg_len,
1202 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1203 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
1204 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1205 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1206 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1207 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1208 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1209 SBA_CMD_SHIFT, SBA_CMD_MASK);
1210 cmdsp->cmd = cmd;
1211 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1212 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1213 cmdsp->data = src + msg_offset;
1214 cmdsp->data_len = msg_len;
1215 cmdsp++;
1216
1217 dpos -= pos;
1218
1219 /* Multiple Type-A command to generate final Q */
1220 while (dpos) {
1221 pos = (dpos < req->sba->max_pq_coefs) ?
1222 dpos : (req->sba->max_pq_coefs - 1);
1223
1224 /*
1225 * Type-A command to generate Q with buf0 and
1226 * buf1 store result in buf0
1227 */
1228 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1229 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1230 cmd = sba_cmd_enc(cmd, msg_len,
1231 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1232 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
1233 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1234 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1235 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1236 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1237 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1238 SBA_CMD_SHIFT, SBA_CMD_MASK);
1239 cmdsp->cmd = cmd;
1240 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1241 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1242 cmdsp++;
1243
1244 dpos -= pos;
1245 }
1246
1247skip_q_computation:
1248 if (pq_continue) {
1249 /*
1250 * Type-B command to XOR previous output with
1251 * buf0 and write it into buf0
1252 */
1253 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1254 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1255 cmd = sba_cmd_enc(cmd, msg_len,
1256 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1257 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1258 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1259 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1260 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1261 SBA_CMD_SHIFT, SBA_CMD_MASK);
1262 cmdsp->cmd = cmd;
1263 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1264 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1265 cmdsp->data = *dst_q + msg_offset;
1266 cmdsp->data_len = msg_len;
1267 cmdsp++;
1268 }
1269
1270 /* Type-A command to write buf0 */
1271 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1272 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1273 cmd = sba_cmd_enc(cmd, msg_len,
1274 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1275 cmd = sba_cmd_enc(cmd, 0x1,
1276 SBA_RESP_SHIFT, SBA_RESP_MASK);
1277 c_mdata = sba_cmd_write_c_mdata(0);
1278 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1279 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1280 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1281 SBA_CMD_SHIFT, SBA_CMD_MASK);
1282 cmdsp->cmd = cmd;
1283 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1284 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1285 if (req->sba->hw_resp_size) {
1286 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1287 cmdsp->resp = resp_dma;
1288 cmdsp->resp_len = req->sba->hw_resp_size;
1289 }
1290 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1291 cmdsp->data = *dst_q + msg_offset;
1292 cmdsp->data_len = msg_len;
1293 cmdsp++;
1294
1295skip_q:
1296 /* Fillup brcm_message */
1297 msg->type = BRCM_MESSAGE_SBA;
1298 msg->sba.cmds = cmds;
1299 msg->sba.cmds_count = cmdsp - cmds;
1300 msg->ctx = req;
1301 msg->error = 0;
1302}
1303
1304static struct sba_request *
1305sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
1306 dma_addr_t *dst_p, dma_addr_t *dst_q,
1307 dma_addr_t src, u8 scf, size_t len,
1308 unsigned long flags)
1309{
1310 struct sba_request *req = NULL;
1311
1312 /* Alloc new request */
1313 req = sba_alloc_request(sba);
1314 if (!req)
1315 return NULL;
1316 if (flags & DMA_PREP_FENCE)
1317 req->flags |= SBA_REQUEST_FENCE;
1318
1319 /* Fillup request messages */
1320 sba_fillup_pq_single_msg(req, dmaf_continue(flags),
1321 req->cmds, &req->msg, off, len,
1322 dst_p, dst_q, src, scf);
1323
1324 /* Init async_tx descriptor */
1325 req->tx.flags = flags;
1326 req->tx.cookie = -EBUSY;
1327
1328 return req;
1329}
1330
1331static struct dma_async_tx_descriptor *
1332sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
1333 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1334{
1335 u32 i, dst_q_index;
1336 size_t req_len;
1337 bool slow = false;
1338 dma_addr_t off = 0;
1339 dma_addr_t *dst_p = NULL, *dst_q = NULL;
1340 struct sba_device *sba = to_sba_device(dchan);
1341 struct sba_request *first = NULL, *req;
1342
1343 /* Sanity checks */
1344 if (unlikely(src_cnt > sba->max_pq_srcs))
1345 return NULL;
1346 for (i = 0; i < src_cnt; i++)
1347 if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
1348 slow = true;
1349
1350 /* Figure-out P and Q destination addresses */
1351 if (!(flags & DMA_PREP_PQ_DISABLE_P))
1352 dst_p = &dst[0];
1353 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
1354 dst_q = &dst[1];
1355
1356 /* Create chained requests where each request is upto hw_buf_size */
1357 while (len) {
1358 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
1359
1360 if (slow) {
1361 dst_q_index = src_cnt;
1362
1363 if (dst_q) {
1364 for (i = 0; i < src_cnt; i++) {
1365 if (*dst_q == src[i]) {
1366 dst_q_index = i;
1367 break;
1368 }
1369 }
1370 }
1371
1372 if (dst_q_index < src_cnt) {
1373 i = dst_q_index;
1374 req = sba_prep_dma_pq_single_req(sba,
1375 off, dst_p, dst_q, src[i], scf[i],
1376 req_len, flags | DMA_PREP_FENCE);
1377 if (!req)
1378 goto fail;
1379
1380 if (first)
1381 sba_chain_request(first, req);
1382 else
1383 first = req;
1384
1385 flags |= DMA_PREP_CONTINUE;
1386 }
1387
1388 for (i = 0; i < src_cnt; i++) {
1389 if (dst_q_index == i)
1390 continue;
1391
1392 req = sba_prep_dma_pq_single_req(sba,
1393 off, dst_p, dst_q, src[i], scf[i],
1394 req_len, flags | DMA_PREP_FENCE);
1395 if (!req)
1396 goto fail;
1397
1398 if (first)
1399 sba_chain_request(first, req);
1400 else
1401 first = req;
1402
1403 flags |= DMA_PREP_CONTINUE;
1404 }
1405 } else {
1406 req = sba_prep_dma_pq_req(sba, off,
1407 dst_p, dst_q, src, src_cnt,
1408 scf, req_len, flags);
1409 if (!req)
1410 goto fail;
1411
1412 if (first)
1413 sba_chain_request(first, req);
1414 else
1415 first = req;
1416 }
1417
1418 off += req_len;
1419 len -= req_len;
1420 }
1421
1422 return (first) ? &first->tx : NULL;
1423
1424fail:
1425 if (first)
1426 sba_free_chained_requests(first);
1427 return NULL;
1428}
1429
1430/* ====== Mailbox callbacks ===== */
1431
1432static void sba_receive_message(struct mbox_client *cl, void *msg)
1433{
1434 struct brcm_message *m = msg;
1435 struct sba_request *req = m->ctx;
1436 struct sba_device *sba = req->sba;
1437
1438 /* Error count if message has error */
1439 if (m->error < 0)
1440 dev_err(sba->dev, "%s got message with error %d",
1441 dma_chan_name(&sba->dma_chan), m->error);
1442
1443 /* Process received request */
1444 sba_process_received_request(sba, req);
1445}
1446
1447/* ====== Debugfs callbacks ====== */
1448
1449static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
1450{
1451 struct sba_device *sba = dev_get_drvdata(file->private);
1452
1453 /* Write stats in file */
1454 sba_write_stats_in_seqfile(sba, file);
1455
1456 return 0;
1457}
1458
1459/* ====== Platform driver routines ===== */
1460
1461static int sba_prealloc_channel_resources(struct sba_device *sba)
1462{
1463 int i, j, ret = 0;
1464 struct sba_request *req = NULL;
1465
1466 sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
1467 sba->max_resp_pool_size,
1468 &sba->resp_dma_base, GFP_KERNEL);
1469 if (!sba->resp_base)
1470 return -ENOMEM;
1471
1472 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
1473 sba->max_cmds_pool_size,
1474 &sba->cmds_dma_base, GFP_KERNEL);
1475 if (!sba->cmds_base) {
1476 ret = -ENOMEM;
1477 goto fail_free_resp_pool;
1478 }
1479
1480 spin_lock_init(&sba->reqs_lock);
1481 sba->reqs_fence = false;
1482 INIT_LIST_HEAD(&sba->reqs_alloc_list);
1483 INIT_LIST_HEAD(&sba->reqs_pending_list);
1484 INIT_LIST_HEAD(&sba->reqs_active_list);
1485 INIT_LIST_HEAD(&sba->reqs_aborted_list);
1486 INIT_LIST_HEAD(&sba->reqs_free_list);
1487
1488 for (i = 0; i < sba->max_req; i++) {
1489 req = devm_kzalloc(sba->dev,
1490 struct_size(req, cmds, sba->max_cmd_per_req),
1491 GFP_KERNEL);
1492 if (!req) {
1493 ret = -ENOMEM;
1494 goto fail_free_cmds_pool;
1495 }
1496 INIT_LIST_HEAD(&req->node);
1497 req->sba = sba;
1498 req->flags = SBA_REQUEST_STATE_FREE;
1499 INIT_LIST_HEAD(&req->next);
1500 atomic_set(&req->next_pending_count, 0);
1501 for (j = 0; j < sba->max_cmd_per_req; j++) {
1502 req->cmds[j].cmd = 0;
1503 req->cmds[j].cmd_dma = sba->cmds_base +
1504 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1505 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1506 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1507 req->cmds[j].flags = 0;
1508 }
1509 memset(&req->msg, 0, sizeof(req->msg));
1510 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1511 async_tx_ack(&req->tx);
1512 req->tx.tx_submit = sba_tx_submit;
1513 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
1514 list_add_tail(&req->node, &sba->reqs_free_list);
1515 }
1516
1517 return 0;
1518
1519fail_free_cmds_pool:
1520 dma_free_coherent(sba->mbox_dev,
1521 sba->max_cmds_pool_size,
1522 sba->cmds_base, sba->cmds_dma_base);
1523fail_free_resp_pool:
1524 dma_free_coherent(sba->mbox_dev,
1525 sba->max_resp_pool_size,
1526 sba->resp_base, sba->resp_dma_base);
1527 return ret;
1528}
1529
1530static void sba_freeup_channel_resources(struct sba_device *sba)
1531{
1532 dmaengine_terminate_all(&sba->dma_chan);
1533 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
1534 sba->cmds_base, sba->cmds_dma_base);
1535 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
1536 sba->resp_base, sba->resp_dma_base);
1537 sba->resp_base = NULL;
1538 sba->resp_dma_base = 0;
1539}
1540
1541static int sba_async_register(struct sba_device *sba)
1542{
1543 int ret;
1544 struct dma_device *dma_dev = &sba->dma_dev;
1545
1546 /* Initialize DMA channel cookie */
1547 sba->dma_chan.device = dma_dev;
1548 dma_cookie_init(&sba->dma_chan);
1549
1550 /* Initialize DMA device capability mask */
1551 dma_cap_zero(dma_dev->cap_mask);
1552 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
1553 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1554 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1555 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1556
1557 /*
1558 * Set mailbox channel device as the base device of
1559 * our dma_device because the actual memory accesses
1560 * will be done by mailbox controller
1561 */
1562 dma_dev->dev = sba->mbox_dev;
1563
1564 /* Set base prep routines */
1565 dma_dev->device_free_chan_resources = sba_free_chan_resources;
1566 dma_dev->device_terminate_all = sba_device_terminate_all;
1567 dma_dev->device_issue_pending = sba_issue_pending;
1568 dma_dev->device_tx_status = sba_tx_status;
1569
1570 /* Set interrupt routine */
1571 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1572 dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
1573
1574 /* Set memcpy routine */
1575 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1576 dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
1577
1578 /* Set xor routine and capability */
1579 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1580 dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
1581 dma_dev->max_xor = sba->max_xor_srcs;
1582 }
1583
1584 /* Set pq routine and capability */
1585 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1586 dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
1587 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
1588 }
1589
1590 /* Initialize DMA device channel list */
1591 INIT_LIST_HEAD(&dma_dev->channels);
1592 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
1593
1594 /* Register with Linux async DMA framework*/
1595 ret = dma_async_device_register(dma_dev);
1596 if (ret) {
1597 dev_err(sba->dev, "async device register error %d", ret);
1598 return ret;
1599 }
1600
1601 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
1602 dma_chan_name(&sba->dma_chan),
1603 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
1604 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
1605 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1606 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
1607
1608 return 0;
1609}
1610
1611static int sba_probe(struct platform_device *pdev)
1612{
1613 int ret = 0;
1614 struct sba_device *sba;
1615 struct platform_device *mbox_pdev;
1616 struct of_phandle_args args;
1617
1618 /* Allocate main SBA struct */
1619 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
1620 if (!sba)
1621 return -ENOMEM;
1622
1623 sba->dev = &pdev->dev;
1624 platform_set_drvdata(pdev, sba);
1625
1626 /* Number of mailbox channels should be atleast 1 */
1627 ret = of_count_phandle_with_args(pdev->dev.of_node,
1628 "mboxes", "#mbox-cells");
1629 if (ret <= 0)
1630 return -ENODEV;
1631
1632 /* Determine SBA version from DT compatible string */
1633 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
1634 sba->ver = SBA_VER_1;
1635 else if (of_device_is_compatible(sba->dev->of_node,
1636 "brcm,iproc-sba-v2"))
1637 sba->ver = SBA_VER_2;
1638 else
1639 return -ENODEV;
1640
1641 /* Derived Configuration parameters */
1642 switch (sba->ver) {
1643 case SBA_VER_1:
1644 sba->hw_buf_size = 4096;
1645 sba->hw_resp_size = 8;
1646 sba->max_pq_coefs = 6;
1647 sba->max_pq_srcs = 6;
1648 break;
1649 case SBA_VER_2:
1650 sba->hw_buf_size = 4096;
1651 sba->hw_resp_size = 8;
1652 sba->max_pq_coefs = 30;
1653 /*
1654 * We can support max_pq_srcs == max_pq_coefs because
1655 * we are limited by number of SBA commands that we can
1656 * fit in one message for underlying ring manager HW.
1657 */
1658 sba->max_pq_srcs = 12;
1659 break;
1660 default:
1661 return -EINVAL;
1662 }
1663 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
1664 sba->max_cmd_per_req = sba->max_pq_srcs + 3;
1665 sba->max_xor_srcs = sba->max_cmd_per_req - 1;
1666 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
1667 sba->max_cmds_pool_size = sba->max_req *
1668 sba->max_cmd_per_req * sizeof(u64);
1669
1670 /* Setup mailbox client */
1671 sba->client.dev = &pdev->dev;
1672 sba->client.rx_callback = sba_receive_message;
1673 sba->client.tx_block = false;
1674 sba->client.knows_txdone = true;
1675 sba->client.tx_tout = 0;
1676
1677 /* Request mailbox channel */
1678 sba->mchan = mbox_request_channel(&sba->client, 0);
1679 if (IS_ERR(sba->mchan)) {
1680 ret = PTR_ERR(sba->mchan);
1681 goto fail_free_mchan;
1682 }
1683
1684 /* Find-out underlying mailbox device */
1685 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1686 "mboxes", "#mbox-cells", 0, &args);
1687 if (ret)
1688 goto fail_free_mchan;
1689 mbox_pdev = of_find_device_by_node(args.np);
1690 of_node_put(args.np);
1691 if (!mbox_pdev) {
1692 ret = -ENODEV;
1693 goto fail_free_mchan;
1694 }
1695 sba->mbox_dev = &mbox_pdev->dev;
1696
1697 /* Prealloc channel resource */
1698 ret = sba_prealloc_channel_resources(sba);
1699 if (ret)
1700 goto fail_free_mchan;
1701
1702 /* Check availability of debugfs */
1703 if (!debugfs_initialized())
1704 goto skip_debugfs;
1705
1706 /* Create debugfs root entry */
1707 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
1708
1709 /* Create debugfs stats entry */
1710 debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
1711 sba_debugfs_stats_show);
1712
1713skip_debugfs:
1714
1715 /* Register DMA device with Linux async framework */
1716 ret = sba_async_register(sba);
1717 if (ret)
1718 goto fail_free_resources;
1719
1720 /* Print device info */
1721 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
1722 dma_chan_name(&sba->dma_chan), sba->ver+1,
1723 dev_name(sba->mbox_dev));
1724
1725 return 0;
1726
1727fail_free_resources:
1728 debugfs_remove_recursive(sba->root);
1729 sba_freeup_channel_resources(sba);
1730fail_free_mchan:
1731 mbox_free_channel(sba->mchan);
1732 return ret;
1733}
1734
1735static int sba_remove(struct platform_device *pdev)
1736{
1737 struct sba_device *sba = platform_get_drvdata(pdev);
1738
1739 dma_async_device_unregister(&sba->dma_dev);
1740
1741 debugfs_remove_recursive(sba->root);
1742
1743 sba_freeup_channel_resources(sba);
1744
1745 mbox_free_channel(sba->mchan);
1746
1747 return 0;
1748}
1749
1750static const struct of_device_id sba_of_match[] = {
1751 { .compatible = "brcm,iproc-sba", },
1752 { .compatible = "brcm,iproc-sba-v2", },
1753 {},
1754};
1755MODULE_DEVICE_TABLE(of, sba_of_match);
1756
1757static struct platform_driver sba_driver = {
1758 .probe = sba_probe,
1759 .remove = sba_remove,
1760 .driver = {
1761 .name = "bcm-sba-raid",
1762 .of_match_table = sba_of_match,
1763 },
1764};
1765module_platform_driver(sba_driver);
1766
1767MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1768MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1769MODULE_LICENSE("GPL v2");