Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2016 Broadcom
4 */
5
6/*
7 * Broadcom PDC Mailbox Driver
8 * The PDC provides a ring based programming interface to one or more hardware
9 * offload engines. For example, the PDC driver works with both SPU-M and SPU2
10 * cryptographic offload hardware. In some chips the PDC is referred to as MDE,
11 * and in others the FA2/FA+ hardware is used with this PDC driver.
12 *
13 * The PDC driver registers with the Linux mailbox framework as a mailbox
14 * controller, once for each PDC instance. Ring 0 for each PDC is registered as
15 * a mailbox channel. The PDC driver uses interrupts to determine when data
16 * transfers to and from an offload engine are complete. The PDC driver uses
17 * threaded IRQs so that response messages are handled outside of interrupt
18 * context.
19 *
20 * The PDC driver allows multiple messages to be pending in the descriptor
21 * rings. The tx_msg_start descriptor index indicates where the last message
22 * starts. The txin_numd value at this index indicates how many descriptor
23 * indexes make up the message. Similar state is kept on the receive side. When
24 * an rx interrupt indicates a response is ready, the PDC driver processes numd
25 * descriptors from the tx and rx ring, thus processing one response at a time.
26 */
27
28#include <linux/errno.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/slab.h>
32#include <linux/debugfs.h>
33#include <linux/interrupt.h>
34#include <linux/wait.h>
35#include <linux/platform_device.h>
36#include <linux/property.h>
37#include <linux/io.h>
38#include <linux/of.h>
39#include <linux/of_irq.h>
40#include <linux/mailbox_controller.h>
41#include <linux/mailbox/brcm-message.h>
42#include <linux/scatterlist.h>
43#include <linux/dma-direction.h>
44#include <linux/dma-mapping.h>
45#include <linux/dmapool.h>
46
47#define PDC_SUCCESS 0
48
49#define RING_ENTRY_SIZE sizeof(struct dma64dd)
50
51/* # entries in PDC dma ring */
52#define PDC_RING_ENTRIES 512
53/*
54 * Minimum number of ring descriptor entries that must be free to tell mailbox
55 * framework that it can submit another request
56 */
57#define PDC_RING_SPACE_MIN 15
58
59#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
60/* Rings are 8k aligned */
61#define RING_ALIGN_ORDER 13
62#define RING_ALIGN BIT(RING_ALIGN_ORDER)
63
64#define RX_BUF_ALIGN_ORDER 5
65#define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
66
67/* descriptor bumping macros */
68#define XXD(x, max_mask) ((x) & (max_mask))
69#define TXD(x, max_mask) XXD((x), (max_mask))
70#define RXD(x, max_mask) XXD((x), (max_mask))
71#define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
72#define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
73#define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
74#define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
75#define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
76#define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
77
78/* Length of BCM header at start of SPU msg, in bytes */
79#define BCM_HDR_LEN 8
80
81/*
82 * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
83 * not currently support use of multiple ringsets on a single PDC engine.
84 */
85#define PDC_RINGSET 0
86
87/*
88 * Interrupt mask and status definitions. Enable interrupts for tx and rx on
89 * ring 0
90 */
91#define PDC_RCVINT_0 (16 + PDC_RINGSET)
92#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
93#define PDC_INTMASK (PDC_RCVINTEN_0)
94#define PDC_LAZY_FRAMECOUNT 1
95#define PDC_LAZY_TIMEOUT 10000
96#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
97#define PDC_INTMASK_OFFSET 0x24
98#define PDC_INTSTATUS_OFFSET 0x20
99#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
100#define FA_RCVLAZY0_OFFSET 0x100
101
102/*
103 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
104 * before frame
105 */
106#define PDC_SPU2_RESP_HDR_LEN 17
107#define PDC_CKSUM_CTRL BIT(27)
108#define PDC_CKSUM_CTRL_OFFSET 0x400
109
110#define PDC_SPUM_RESP_HDR_LEN 32
111
112/*
113 * Sets the following bits for write to transmit control reg:
114 * 11 - PtyChkDisable - parity check is disabled
115 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
116 */
117#define PDC_TX_CTL 0x000C0800
118
119/* Bit in tx control reg to enable tx channel */
120#define PDC_TX_ENABLE 0x1
121
122/*
123 * Sets the following bits for write to receive control reg:
124 * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
125 * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
126 * that have StartOfFrame set
127 * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
128 * remaining bytes in current frame, report error
129 * in rx frame status for current frame
130 * 11 - PtyChkDisable - parity check is disabled
131 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
132 */
133#define PDC_RX_CTL 0x000C0E00
134
135/* Bit in rx control reg to enable rx channel */
136#define PDC_RX_ENABLE 0x1
137
138#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
139
140/* descriptor flags */
141#define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
142#define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
143#define D64_CTRL1_EOF BIT(30) /* end of frame */
144#define D64_CTRL1_SOF BIT(31) /* start of frame */
145
146#define RX_STATUS_OVERFLOW 0x00800000
147#define RX_STATUS_LEN 0x0000FFFF
148
149#define PDC_TXREGS_OFFSET 0x200
150#define PDC_RXREGS_OFFSET 0x220
151
152/* Maximum size buffer the DMA engine can handle */
153#define PDC_DMA_BUF_MAX 16384
154
155enum pdc_hw {
156 FA_HW, /* FA2/FA+ hardware (i.e. Northstar Plus) */
157 PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
158};
159
160struct pdc_dma_map {
161 void *ctx; /* opaque context associated with frame */
162};
163
164/* dma descriptor */
165struct dma64dd {
166 u32 ctrl1; /* misc control bits */
167 u32 ctrl2; /* buffer count and address extension */
168 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
169 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
170};
171
172/* dma registers per channel(xmt or rcv) */
173struct dma64_regs {
174 u32 control; /* enable, et al */
175 u32 ptr; /* last descriptor posted to chip */
176 u32 addrlow; /* descriptor ring base address low 32-bits */
177 u32 addrhigh; /* descriptor ring base address bits 63:32 */
178 u32 status0; /* last rx descriptor written by hw */
179 u32 status1; /* driver does not use */
180};
181
182/* cpp contortions to concatenate w/arg prescan */
183#ifndef PAD
184#define _PADLINE(line) pad ## line
185#define _XSTR(line) _PADLINE(line)
186#define PAD _XSTR(__LINE__)
187#endif /* PAD */
188
189/* dma registers. matches hw layout. */
190struct dma64 {
191 struct dma64_regs dmaxmt; /* dma tx */
192 u32 PAD[2];
193 struct dma64_regs dmarcv; /* dma rx */
194 u32 PAD[2];
195};
196
197/* PDC registers */
198struct pdc_regs {
199 u32 devcontrol; /* 0x000 */
200 u32 devstatus; /* 0x004 */
201 u32 PAD;
202 u32 biststatus; /* 0x00c */
203 u32 PAD[4];
204 u32 intstatus; /* 0x020 */
205 u32 intmask; /* 0x024 */
206 u32 gptimer; /* 0x028 */
207
208 u32 PAD;
209 u32 intrcvlazy_0; /* 0x030 (Only in PDC, not FA2) */
210 u32 intrcvlazy_1; /* 0x034 (Only in PDC, not FA2) */
211 u32 intrcvlazy_2; /* 0x038 (Only in PDC, not FA2) */
212 u32 intrcvlazy_3; /* 0x03c (Only in PDC, not FA2) */
213
214 u32 PAD[48];
215 u32 fa_intrecvlazy; /* 0x100 (Only in FA2, not PDC) */
216 u32 flowctlthresh; /* 0x104 */
217 u32 wrrthresh; /* 0x108 */
218 u32 gmac_idle_cnt_thresh; /* 0x10c */
219
220 u32 PAD[4];
221 u32 ifioaccessaddr; /* 0x120 */
222 u32 ifioaccessbyte; /* 0x124 */
223 u32 ifioaccessdata; /* 0x128 */
224
225 u32 PAD[21];
226 u32 phyaccess; /* 0x180 */
227 u32 PAD;
228 u32 phycontrol; /* 0x188 */
229 u32 txqctl; /* 0x18c */
230 u32 rxqctl; /* 0x190 */
231 u32 gpioselect; /* 0x194 */
232 u32 gpio_output_en; /* 0x198 */
233 u32 PAD; /* 0x19c */
234 u32 txq_rxq_mem_ctl; /* 0x1a0 */
235 u32 memory_ecc_status; /* 0x1a4 */
236 u32 serdes_ctl; /* 0x1a8 */
237 u32 serdes_status0; /* 0x1ac */
238 u32 serdes_status1; /* 0x1b0 */
239 u32 PAD[11]; /* 0x1b4-1dc */
240 u32 clk_ctl_st; /* 0x1e0 */
241 u32 hw_war; /* 0x1e4 (Only in PDC, not FA2) */
242 u32 pwrctl; /* 0x1e8 */
243 u32 PAD[5];
244
245#define PDC_NUM_DMA_RINGS 4
246 struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
247
248 /* more registers follow, but we don't use them */
249};
250
251/* structure for allocating/freeing DMA rings */
252struct pdc_ring_alloc {
253 dma_addr_t dmabase; /* DMA address of start of ring */
254 void *vbase; /* base kernel virtual address of ring */
255 u32 size; /* ring allocation size in bytes */
256};
257
258/*
259 * context associated with a receive descriptor.
260 * @rxp_ctx: opaque context associated with frame that starts at each
261 * rx ring index.
262 * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
263 * index. Retained in order to unmap each sg after reply is processed.
264 * @rxin_numd: Number of rx descriptors associated with the message that starts
265 * at a descriptor index. Not set for every index. For example,
266 * if descriptor index i points to a scatterlist with 4 entries,
267 * then the next three descriptor indexes don't have a value set.
268 * @resp_hdr: Virtual address of buffer used to catch DMA rx status
269 * @resp_hdr_daddr: physical address of DMA rx status buffer
270 */
271struct pdc_rx_ctx {
272 void *rxp_ctx;
273 struct scatterlist *dst_sg;
274 u32 rxin_numd;
275 void *resp_hdr;
276 dma_addr_t resp_hdr_daddr;
277};
278
279/* PDC state structure */
280struct pdc_state {
281 /* Index of the PDC whose state is in this structure instance */
282 u8 pdc_idx;
283
284 /* Platform device for this PDC instance */
285 struct platform_device *pdev;
286
287 /*
288 * Each PDC instance has a mailbox controller. PDC receives request
289 * messages through mailboxes, and sends response messages through the
290 * mailbox framework.
291 */
292 struct mbox_controller mbc;
293
294 unsigned int pdc_irq;
295
296 /* tasklet for deferred processing after DMA rx interrupt */
297 struct tasklet_struct rx_tasklet;
298
299 /* Number of bytes of receive status prior to each rx frame */
300 u32 rx_status_len;
301 /* Whether a BCM header is prepended to each frame */
302 bool use_bcm_hdr;
303 /* Sum of length of BCM header and rx status header */
304 u32 pdc_resp_hdr_len;
305
306 /* The base virtual address of DMA hw registers */
307 void __iomem *pdc_reg_vbase;
308
309 /* Pool for allocation of DMA rings */
310 struct dma_pool *ring_pool;
311
312 /* Pool for allocation of metadata buffers for response messages */
313 struct dma_pool *rx_buf_pool;
314
315 /*
316 * The base virtual address of DMA tx/rx descriptor rings. Corresponding
317 * DMA address and size of ring allocation.
318 */
319 struct pdc_ring_alloc tx_ring_alloc;
320 struct pdc_ring_alloc rx_ring_alloc;
321
322 struct pdc_regs *regs; /* start of PDC registers */
323
324 struct dma64_regs *txregs_64; /* dma tx engine registers */
325 struct dma64_regs *rxregs_64; /* dma rx engine registers */
326
327 /*
328 * Arrays of PDC_RING_ENTRIES descriptors
329 * To use multiple ringsets, this needs to be extended
330 */
331 struct dma64dd *txd_64; /* tx descriptor ring */
332 struct dma64dd *rxd_64; /* rx descriptor ring */
333
334 /* descriptor ring sizes */
335 u32 ntxd; /* # tx descriptors */
336 u32 nrxd; /* # rx descriptors */
337 u32 nrxpost; /* # rx buffers to keep posted */
338 u32 ntxpost; /* max number of tx buffers that can be posted */
339
340 /*
341 * Index of next tx descriptor to reclaim. That is, the descriptor
342 * index of the oldest tx buffer for which the host has yet to process
343 * the corresponding response.
344 */
345 u32 txin;
346
347 /*
348 * Index of the first receive descriptor for the sequence of
349 * message fragments currently under construction. Used to build up
350 * the rxin_numd count for a message. Updated to rxout when the host
351 * starts a new sequence of rx buffers for a new message.
352 */
353 u32 tx_msg_start;
354
355 /* Index of next tx descriptor to post. */
356 u32 txout;
357
358 /*
359 * Number of tx descriptors associated with the message that starts
360 * at this tx descriptor index.
361 */
362 u32 txin_numd[PDC_RING_ENTRIES];
363
364 /*
365 * Index of next rx descriptor to reclaim. This is the index of
366 * the next descriptor whose data has yet to be processed by the host.
367 */
368 u32 rxin;
369
370 /*
371 * Index of the first receive descriptor for the sequence of
372 * message fragments currently under construction. Used to build up
373 * the rxin_numd count for a message. Updated to rxout when the host
374 * starts a new sequence of rx buffers for a new message.
375 */
376 u32 rx_msg_start;
377
378 /*
379 * Saved value of current hardware rx descriptor index.
380 * The last rx buffer written by the hw is the index previous to
381 * this one.
382 */
383 u32 last_rx_curr;
384
385 /* Index of next rx descriptor to post. */
386 u32 rxout;
387
388 struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
389
390 /*
391 * Scatterlists used to form request and reply frames beginning at a
392 * given ring index. Retained in order to unmap each sg after reply
393 * is processed
394 */
395 struct scatterlist *src_sg[PDC_RING_ENTRIES];
396
397 /* counters */
398 u32 pdc_requests; /* number of request messages submitted */
399 u32 pdc_replies; /* number of reply messages received */
400 u32 last_tx_not_done; /* too few tx descriptors to indicate done */
401 u32 tx_ring_full; /* unable to accept msg because tx ring full */
402 u32 rx_ring_full; /* unable to accept msg because rx ring full */
403 u32 txnobuf; /* unable to create tx descriptor */
404 u32 rxnobuf; /* unable to create rx descriptor */
405 u32 rx_oflow; /* count of rx overflows */
406
407 /* hardware type - FA2 or PDC/MDE */
408 enum pdc_hw hw_type;
409};
410
411/* Global variables */
412
413struct pdc_globals {
414 /* Actual number of SPUs in hardware, as reported by device tree */
415 u32 num_spu;
416};
417
418static struct pdc_globals pdcg;
419
420/* top level debug FS directory for PDC driver */
421static struct dentry *debugfs_dir;
422
423static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
424 size_t count, loff_t *offp)
425{
426 struct pdc_state *pdcs;
427 char *buf;
428 ssize_t ret, out_offset, out_count;
429
430 out_count = 512;
431
432 buf = kmalloc(out_count, GFP_KERNEL);
433 if (!buf)
434 return -ENOMEM;
435
436 pdcs = filp->private_data;
437 out_offset = 0;
438 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
439 "SPU %u stats:\n", pdcs->pdc_idx);
440 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
441 "PDC requests....................%u\n",
442 pdcs->pdc_requests);
443 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
444 "PDC responses...................%u\n",
445 pdcs->pdc_replies);
446 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
447 "Tx not done.....................%u\n",
448 pdcs->last_tx_not_done);
449 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
450 "Tx ring full....................%u\n",
451 pdcs->tx_ring_full);
452 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
453 "Rx ring full....................%u\n",
454 pdcs->rx_ring_full);
455 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
456 "Tx desc write fail. Ring full...%u\n",
457 pdcs->txnobuf);
458 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
459 "Rx desc write fail. Ring full...%u\n",
460 pdcs->rxnobuf);
461 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
462 "Receive overflow................%u\n",
463 pdcs->rx_oflow);
464 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
465 "Num frags in rx ring............%u\n",
466 NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
467 pdcs->nrxpost));
468
469 if (out_offset > out_count)
470 out_offset = out_count;
471
472 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
473 kfree(buf);
474 return ret;
475}
476
477static const struct file_operations pdc_debugfs_stats = {
478 .owner = THIS_MODULE,
479 .open = simple_open,
480 .read = pdc_debugfs_read,
481};
482
483/**
484 * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
485 * directory has not yet been created, create it now. Create a stats file in
486 * this directory for a SPU.
487 * @pdcs: PDC state structure
488 */
489static void pdc_setup_debugfs(struct pdc_state *pdcs)
490{
491 char spu_stats_name[16];
492
493 if (!debugfs_initialized())
494 return;
495
496 snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
497 if (!debugfs_dir)
498 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
499
500 /* S_IRUSR == 0400 */
501 debugfs_create_file(spu_stats_name, 0400, debugfs_dir, pdcs,
502 &pdc_debugfs_stats);
503}
504
505static void pdc_free_debugfs(void)
506{
507 debugfs_remove_recursive(debugfs_dir);
508 debugfs_dir = NULL;
509}
510
511/**
512 * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
513 * @pdcs: PDC state for SPU that will generate result
514 * @dma_addr: DMA address of buffer that descriptor is being built for
515 * @buf_len: Length of the receive buffer, in bytes
516 * @flags: Flags to be stored in descriptor
517 */
518static inline void
519pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
520 u32 buf_len, u32 flags)
521{
522 struct device *dev = &pdcs->pdev->dev;
523 struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
524
525 dev_dbg(dev,
526 "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
527 pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
528
529 rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
530 rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
531 rxd->ctrl1 = cpu_to_le32(flags);
532 rxd->ctrl2 = cpu_to_le32(buf_len);
533
534 /* bump ring index and return */
535 pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
536}
537
538/**
539 * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
540 * hardware.
541 * @pdcs: PDC state for the SPU that will process this request
542 * @dma_addr: DMA address of packet to be transmitted
543 * @buf_len: Length of tx buffer, in bytes
544 * @flags: Flags to be stored in descriptor
545 */
546static inline void
547pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
548 u32 flags)
549{
550 struct device *dev = &pdcs->pdev->dev;
551 struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
552
553 dev_dbg(dev,
554 "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
555 pdcs->pdc_idx, pdcs->txout, buf_len, flags);
556
557 txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
558 txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
559 txd->ctrl1 = cpu_to_le32(flags);
560 txd->ctrl2 = cpu_to_le32(buf_len);
561
562 /* bump ring index and return */
563 pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
564}
565
566/**
567 * pdc_receive_one() - Receive a response message from a given SPU.
568 * @pdcs: PDC state for the SPU to receive from
569 *
570 * When the return code indicates success, the response message is available in
571 * the receive buffers provided prior to submission of the request.
572 *
573 * Return: PDC_SUCCESS if one or more receive descriptors was processed
574 * -EAGAIN indicates that no response message is available
575 * -EIO an error occurred
576 */
577static int
578pdc_receive_one(struct pdc_state *pdcs)
579{
580 struct device *dev = &pdcs->pdev->dev;
581 struct mbox_controller *mbc;
582 struct mbox_chan *chan;
583 struct brcm_message mssg;
584 u32 len, rx_status;
585 u32 num_frags;
586 u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
587 u32 frags_rdy; /* number of fragments ready to read */
588 u32 rx_idx; /* ring index of start of receive frame */
589 dma_addr_t resp_hdr_daddr;
590 struct pdc_rx_ctx *rx_ctx;
591
592 mbc = &pdcs->mbc;
593 chan = &mbc->chans[0];
594 mssg.type = BRCM_MESSAGE_SPU;
595
596 /*
597 * return if a complete response message is not yet ready.
598 * rxin_numd[rxin] is the number of fragments in the next msg
599 * to read.
600 */
601 frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
602 if ((frags_rdy == 0) ||
603 (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
604 /* No response ready */
605 return -EAGAIN;
606
607 num_frags = pdcs->txin_numd[pdcs->txin];
608 WARN_ON(num_frags == 0);
609
610 dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
611 sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
612
613 pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
614
615 dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
616 pdcs->pdc_idx, num_frags);
617
618 rx_idx = pdcs->rxin;
619 rx_ctx = &pdcs->rx_ctx[rx_idx];
620 num_frags = rx_ctx->rxin_numd;
621 /* Return opaque context with result */
622 mssg.ctx = rx_ctx->rxp_ctx;
623 rx_ctx->rxp_ctx = NULL;
624 resp_hdr = rx_ctx->resp_hdr;
625 resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
626 dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
627 DMA_FROM_DEVICE);
628
629 pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
630
631 dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
632 pdcs->pdc_idx, num_frags);
633
634 dev_dbg(dev,
635 "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
636 pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
637 pdcs->rxout, pdcs->last_rx_curr);
638
639 if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
640 /*
641 * For SPU-M, get length of response msg and rx overflow status.
642 */
643 rx_status = *((u32 *)resp_hdr);
644 len = rx_status & RX_STATUS_LEN;
645 dev_dbg(dev,
646 "SPU response length %u bytes", len);
647 if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
648 if (rx_status & RX_STATUS_OVERFLOW) {
649 dev_err_ratelimited(dev,
650 "crypto receive overflow");
651 pdcs->rx_oflow++;
652 } else {
653 dev_info_ratelimited(dev, "crypto rx len = 0");
654 }
655 return -EIO;
656 }
657 }
658
659 dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
660
661 mbox_chan_received_data(chan, &mssg);
662
663 pdcs->pdc_replies++;
664 return PDC_SUCCESS;
665}
666
667/**
668 * pdc_receive() - Process as many responses as are available in the rx ring.
669 * @pdcs: PDC state
670 *
671 * Called within the hard IRQ.
672 * Return:
673 */
674static int
675pdc_receive(struct pdc_state *pdcs)
676{
677 int rx_status;
678
679 /* read last_rx_curr from register once */
680 pdcs->last_rx_curr =
681 (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
682 CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
683
684 do {
685 /* Could be many frames ready */
686 rx_status = pdc_receive_one(pdcs);
687 } while (rx_status == PDC_SUCCESS);
688
689 return 0;
690}
691
692/**
693 * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
694 * descriptors for a given SPU. The scatterlist buffers contain the data for a
695 * SPU request message.
696 * @pdcs: PDC state for the SPU that will process this request
697 * @sg: Scatterlist whose buffers contain part of the SPU request
698 *
699 * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
700 * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
701 *
702 * Return: PDC_SUCCESS if successful
703 * < 0 otherwise
704 */
705static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
706{
707 u32 flags = 0;
708 u32 eot;
709 u32 tx_avail;
710
711 /*
712 * Num descriptors needed. Conservatively assume we need a descriptor
713 * for every entry in sg.
714 */
715 u32 num_desc;
716 u32 desc_w = 0; /* Number of tx descriptors written */
717 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
718 dma_addr_t databufptr; /* DMA address to put in descriptor */
719
720 num_desc = (u32)sg_nents(sg);
721
722 /* check whether enough tx descriptors are available */
723 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
724 pdcs->ntxpost);
725 if (unlikely(num_desc > tx_avail)) {
726 pdcs->txnobuf++;
727 return -ENOSPC;
728 }
729
730 /* build tx descriptors */
731 if (pdcs->tx_msg_start == pdcs->txout) {
732 /* Start of frame */
733 pdcs->txin_numd[pdcs->tx_msg_start] = 0;
734 pdcs->src_sg[pdcs->txout] = sg;
735 flags = D64_CTRL1_SOF;
736 }
737
738 while (sg) {
739 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
740 eot = D64_CTRL1_EOT;
741 else
742 eot = 0;
743
744 /*
745 * If sg buffer larger than PDC limit, split across
746 * multiple descriptors
747 */
748 bufcnt = sg_dma_len(sg);
749 databufptr = sg_dma_address(sg);
750 while (bufcnt > PDC_DMA_BUF_MAX) {
751 pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
752 flags | eot);
753 desc_w++;
754 bufcnt -= PDC_DMA_BUF_MAX;
755 databufptr += PDC_DMA_BUF_MAX;
756 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
757 eot = D64_CTRL1_EOT;
758 else
759 eot = 0;
760 }
761 sg = sg_next(sg);
762 if (!sg)
763 /* Writing last descriptor for frame */
764 flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
765 pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
766 desc_w++;
767 /* Clear start of frame after first descriptor */
768 flags &= ~D64_CTRL1_SOF;
769 }
770 pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
771
772 return PDC_SUCCESS;
773}
774
775/**
776 * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
777 * ring.
778 * @pdcs: PDC state for SPU to process the request
779 *
780 * Sets the index of the last descriptor written in both the rx and tx ring.
781 *
782 * Return: PDC_SUCCESS
783 */
784static int pdc_tx_list_final(struct pdc_state *pdcs)
785{
786 /*
787 * write barrier to ensure all register writes are complete
788 * before chip starts to process new request
789 */
790 wmb();
791 iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
792 iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
793 pdcs->pdc_requests++;
794
795 return PDC_SUCCESS;
796}
797
798/**
799 * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
800 * @pdcs: PDC state for SPU handling request
801 * @dst_sg: scatterlist providing rx buffers for response to be returned to
802 * mailbox client
803 * @ctx: Opaque context for this request
804 *
805 * Posts a single receive descriptor to hold the metadata that precedes a
806 * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
807 * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
808 * rx to indicate the start of a new message.
809 *
810 * Return: PDC_SUCCESS if successful
811 * < 0 if an error (e.g., rx ring is full)
812 */
813static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
814 void *ctx)
815{
816 u32 flags = 0;
817 u32 rx_avail;
818 u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
819 dma_addr_t daddr;
820 void *vaddr;
821 struct pdc_rx_ctx *rx_ctx;
822
823 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
824 pdcs->nrxpost);
825 if (unlikely(rx_pkt_cnt > rx_avail)) {
826 pdcs->rxnobuf++;
827 return -ENOSPC;
828 }
829
830 /* allocate a buffer for the dma rx status */
831 vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
832 if (unlikely(!vaddr))
833 return -ENOMEM;
834
835 /*
836 * Update msg_start indexes for both tx and rx to indicate the start
837 * of a new sequence of descriptor indexes that contain the fragments
838 * of the same message.
839 */
840 pdcs->rx_msg_start = pdcs->rxout;
841 pdcs->tx_msg_start = pdcs->txout;
842
843 /* This is always the first descriptor in the receive sequence */
844 flags = D64_CTRL1_SOF;
845 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
846
847 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
848 flags |= D64_CTRL1_EOT;
849
850 rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
851 rx_ctx->rxp_ctx = ctx;
852 rx_ctx->dst_sg = dst_sg;
853 rx_ctx->resp_hdr = vaddr;
854 rx_ctx->resp_hdr_daddr = daddr;
855 pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
856 return PDC_SUCCESS;
857}
858
859/**
860 * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
861 * descriptors for a given SPU. The caller must have already DMA mapped the
862 * scatterlist.
863 * @pdcs: PDC state for the SPU that will process this request
864 * @sg: Scatterlist whose buffers are added to the receive ring
865 *
866 * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
867 * multiple receive descriptors are written, each with a buffer <=
868 * PDC_DMA_BUF_MAX.
869 *
870 * Return: PDC_SUCCESS if successful
871 * < 0 otherwise (e.g., receive ring is full)
872 */
873static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
874{
875 u32 flags = 0;
876 u32 rx_avail;
877
878 /*
879 * Num descriptors needed. Conservatively assume we need a descriptor
880 * for every entry from our starting point in the scatterlist.
881 */
882 u32 num_desc;
883 u32 desc_w = 0; /* Number of tx descriptors written */
884 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
885 dma_addr_t databufptr; /* DMA address to put in descriptor */
886
887 num_desc = (u32)sg_nents(sg);
888
889 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
890 pdcs->nrxpost);
891 if (unlikely(num_desc > rx_avail)) {
892 pdcs->rxnobuf++;
893 return -ENOSPC;
894 }
895
896 while (sg) {
897 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
898 flags = D64_CTRL1_EOT;
899 else
900 flags = 0;
901
902 /*
903 * If sg buffer larger than PDC limit, split across
904 * multiple descriptors
905 */
906 bufcnt = sg_dma_len(sg);
907 databufptr = sg_dma_address(sg);
908 while (bufcnt > PDC_DMA_BUF_MAX) {
909 pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
910 desc_w++;
911 bufcnt -= PDC_DMA_BUF_MAX;
912 databufptr += PDC_DMA_BUF_MAX;
913 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
914 flags = D64_CTRL1_EOT;
915 else
916 flags = 0;
917 }
918 pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
919 desc_w++;
920 sg = sg_next(sg);
921 }
922 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
923
924 return PDC_SUCCESS;
925}
926
927/**
928 * pdc_irq_handler() - Interrupt handler called in interrupt context.
929 * @irq: Interrupt number that has fired
930 * @data: device struct for DMA engine that generated the interrupt
931 *
932 * We have to clear the device interrupt status flags here. So cache the
933 * status for later use in the thread function. Other than that, just return
934 * WAKE_THREAD to invoke the thread function.
935 *
936 * Return: IRQ_WAKE_THREAD if interrupt is ours
937 * IRQ_NONE otherwise
938 */
939static irqreturn_t pdc_irq_handler(int irq, void *data)
940{
941 struct device *dev = (struct device *)data;
942 struct pdc_state *pdcs = dev_get_drvdata(dev);
943 u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
944
945 if (unlikely(intstatus == 0))
946 return IRQ_NONE;
947
948 /* Disable interrupts until soft handler runs */
949 iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
950
951 /* Clear interrupt flags in device */
952 iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
953
954 /* Wakeup IRQ thread */
955 tasklet_schedule(&pdcs->rx_tasklet);
956 return IRQ_HANDLED;
957}
958
959/**
960 * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
961 * a DMA receive interrupt. Reenables the receive interrupt.
962 * @t: Pointer to the Altera sSGDMA channel structure
963 */
964static void pdc_tasklet_cb(struct tasklet_struct *t)
965{
966 struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
967
968 pdc_receive(pdcs);
969
970 /* reenable interrupts */
971 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
972}
973
974/**
975 * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
976 * descriptors in one ringset.
977 * @pdcs: PDC instance state
978 * @ringset: index of ringset being used
979 *
980 * Return: PDC_SUCCESS if ring initialized
981 * < 0 otherwise
982 */
983static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
984{
985 int i;
986 int err = PDC_SUCCESS;
987 struct dma64 *dma_reg;
988 struct device *dev = &pdcs->pdev->dev;
989 struct pdc_ring_alloc tx;
990 struct pdc_ring_alloc rx;
991
992 /* Allocate tx ring */
993 tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
994 if (unlikely(!tx.vbase)) {
995 err = -ENOMEM;
996 goto done;
997 }
998
999 /* Allocate rx ring */
1000 rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
1001 if (unlikely(!rx.vbase)) {
1002 err = -ENOMEM;
1003 goto fail_dealloc;
1004 }
1005
1006 dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
1007 dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
1008 dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
1009 dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
1010
1011 memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
1012 memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
1013
1014 pdcs->rxin = 0;
1015 pdcs->rx_msg_start = 0;
1016 pdcs->last_rx_curr = 0;
1017 pdcs->rxout = 0;
1018 pdcs->txin = 0;
1019 pdcs->tx_msg_start = 0;
1020 pdcs->txout = 0;
1021
1022 /* Set descriptor array base addresses */
1023 pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
1024 pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
1025
1026 /* Tell device the base DMA address of each ring */
1027 dma_reg = &pdcs->regs->dmaregs[ringset];
1028
1029 /* But first disable DMA and set curptr to 0 for both TX & RX */
1030 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1031 iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
1032 &dma_reg->dmarcv.control);
1033 iowrite32(0, &dma_reg->dmaxmt.ptr);
1034 iowrite32(0, &dma_reg->dmarcv.ptr);
1035
1036 /* Set base DMA addresses */
1037 iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
1038 &dma_reg->dmaxmt.addrlow);
1039 iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
1040 &dma_reg->dmaxmt.addrhigh);
1041
1042 iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
1043 &dma_reg->dmarcv.addrlow);
1044 iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
1045 &dma_reg->dmarcv.addrhigh);
1046
1047 /* Re-enable DMA */
1048 iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
1049 iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
1050 &dma_reg->dmarcv.control);
1051
1052 /* Initialize descriptors */
1053 for (i = 0; i < PDC_RING_ENTRIES; i++) {
1054 /* Every tx descriptor can be used for start of frame. */
1055 if (i != pdcs->ntxpost) {
1056 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
1057 &pdcs->txd_64[i].ctrl1);
1058 } else {
1059 /* Last descriptor in ringset. Set End of Table. */
1060 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
1061 D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
1062 }
1063
1064 /* Every rx descriptor can be used for start of frame */
1065 if (i != pdcs->nrxpost) {
1066 iowrite32(D64_CTRL1_SOF,
1067 &pdcs->rxd_64[i].ctrl1);
1068 } else {
1069 /* Last descriptor in ringset. Set End of Table. */
1070 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
1071 &pdcs->rxd_64[i].ctrl1);
1072 }
1073 }
1074 return PDC_SUCCESS;
1075
1076fail_dealloc:
1077 dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
1078done:
1079 return err;
1080}
1081
1082static void pdc_ring_free(struct pdc_state *pdcs)
1083{
1084 if (pdcs->tx_ring_alloc.vbase) {
1085 dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
1086 pdcs->tx_ring_alloc.dmabase);
1087 pdcs->tx_ring_alloc.vbase = NULL;
1088 }
1089
1090 if (pdcs->rx_ring_alloc.vbase) {
1091 dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
1092 pdcs->rx_ring_alloc.dmabase);
1093 pdcs->rx_ring_alloc.vbase = NULL;
1094 }
1095}
1096
1097/**
1098 * pdc_desc_count() - Count the number of DMA descriptors that will be required
1099 * for a given scatterlist. Account for the max length of a DMA buffer.
1100 * @sg: Scatterlist to be DMA'd
1101 * Return: Number of descriptors required
1102 */
1103static u32 pdc_desc_count(struct scatterlist *sg)
1104{
1105 u32 cnt = 0;
1106
1107 while (sg) {
1108 cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
1109 sg = sg_next(sg);
1110 }
1111 return cnt;
1112}
1113
1114/**
1115 * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
1116 * and the rx ring has room for rx_cnt descriptors.
1117 * @pdcs: PDC state
1118 * @tx_cnt: The number of descriptors required in the tx ring
1119 * @rx_cnt: The number of descriptors required i the rx ring
1120 *
1121 * Return: true if one of the rings does not have enough space
1122 * false if sufficient space is available in both rings
1123 */
1124static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
1125{
1126 u32 rx_avail;
1127 u32 tx_avail;
1128 bool full = false;
1129
1130 /* Check if the tx and rx rings are likely to have enough space */
1131 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
1132 pdcs->nrxpost);
1133 if (unlikely(rx_cnt > rx_avail)) {
1134 pdcs->rx_ring_full++;
1135 full = true;
1136 }
1137
1138 if (likely(!full)) {
1139 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
1140 pdcs->ntxpost);
1141 if (unlikely(tx_cnt > tx_avail)) {
1142 pdcs->tx_ring_full++;
1143 full = true;
1144 }
1145 }
1146 return full;
1147}
1148
1149/**
1150 * pdc_last_tx_done() - If both the tx and rx rings have at least
1151 * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
1152 * framework can submit another message.
1153 * @chan: mailbox channel to check
1154 * Return: true if PDC can accept another message on this channel
1155 */
1156static bool pdc_last_tx_done(struct mbox_chan *chan)
1157{
1158 struct pdc_state *pdcs = chan->con_priv;
1159 bool ret;
1160
1161 if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
1162 PDC_RING_SPACE_MIN))) {
1163 pdcs->last_tx_not_done++;
1164 ret = false;
1165 } else {
1166 ret = true;
1167 }
1168 return ret;
1169}
1170
1171/**
1172 * pdc_send_data() - mailbox send_data function
1173 * @chan: The mailbox channel on which the data is sent. The channel
1174 * corresponds to a DMA ringset.
1175 * @data: The mailbox message to be sent. The message must be a
1176 * brcm_message structure.
1177 *
1178 * This function is registered as the send_data function for the mailbox
1179 * controller. From the destination scatterlist in the mailbox message, it
1180 * creates a sequence of receive descriptors in the rx ring. From the source
1181 * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
1182 * After creating the descriptors, it writes the rx ptr and tx ptr registers to
1183 * initiate the DMA transfer.
1184 *
1185 * This function does the DMA map and unmap of the src and dst scatterlists in
1186 * the mailbox message.
1187 *
1188 * Return: 0 if successful
1189 * -ENOTSUPP if the mailbox message is a type this driver does not
1190 * support
1191 * < 0 if an error
1192 */
1193static int pdc_send_data(struct mbox_chan *chan, void *data)
1194{
1195 struct pdc_state *pdcs = chan->con_priv;
1196 struct device *dev = &pdcs->pdev->dev;
1197 struct brcm_message *mssg = data;
1198 int err = PDC_SUCCESS;
1199 int src_nent;
1200 int dst_nent;
1201 int nent;
1202 u32 tx_desc_req;
1203 u32 rx_desc_req;
1204
1205 if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
1206 return -ENOTSUPP;
1207
1208 src_nent = sg_nents(mssg->spu.src);
1209 if (likely(src_nent)) {
1210 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
1211 if (unlikely(nent == 0))
1212 return -EIO;
1213 }
1214
1215 dst_nent = sg_nents(mssg->spu.dst);
1216 if (likely(dst_nent)) {
1217 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
1218 DMA_FROM_DEVICE);
1219 if (unlikely(nent == 0)) {
1220 dma_unmap_sg(dev, mssg->spu.src, src_nent,
1221 DMA_TO_DEVICE);
1222 return -EIO;
1223 }
1224 }
1225
1226 /*
1227 * Check if the tx and rx rings have enough space. Do this prior to
1228 * writing any tx or rx descriptors. Need to ensure that we do not write
1229 * a partial set of descriptors, or write just rx descriptors but
1230 * corresponding tx descriptors don't fit. Note that we want this check
1231 * and the entire sequence of descriptor to happen without another
1232 * thread getting in. The channel spin lock in the mailbox framework
1233 * ensures this.
1234 */
1235 tx_desc_req = pdc_desc_count(mssg->spu.src);
1236 rx_desc_req = pdc_desc_count(mssg->spu.dst);
1237 if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
1238 return -ENOSPC;
1239
1240 /* Create rx descriptors to SPU catch response */
1241 err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
1242 err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
1243
1244 /* Create tx descriptors to submit SPU request */
1245 err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
1246 err |= pdc_tx_list_final(pdcs); /* initiate transfer */
1247
1248 if (unlikely(err))
1249 dev_err(&pdcs->pdev->dev,
1250 "%s failed with error %d", __func__, err);
1251
1252 return err;
1253}
1254
1255static int pdc_startup(struct mbox_chan *chan)
1256{
1257 return pdc_ring_init(chan->con_priv, PDC_RINGSET);
1258}
1259
1260static void pdc_shutdown(struct mbox_chan *chan)
1261{
1262 struct pdc_state *pdcs = chan->con_priv;
1263
1264 if (!pdcs)
1265 return;
1266
1267 dev_dbg(&pdcs->pdev->dev,
1268 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1269 pdc_ring_free(pdcs);
1270}
1271
1272/**
1273 * pdc_hw_init() - Use the given initialization parameters to initialize the
1274 * state for one of the PDCs.
1275 * @pdcs: state of the PDC
1276 */
1277static
1278void pdc_hw_init(struct pdc_state *pdcs)
1279{
1280 struct platform_device *pdev;
1281 struct device *dev;
1282 struct dma64 *dma_reg;
1283 int ringset = PDC_RINGSET;
1284
1285 pdev = pdcs->pdev;
1286 dev = &pdev->dev;
1287
1288 dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
1289 dev_dbg(dev, "state structure: %p",
1290 pdcs);
1291 dev_dbg(dev, " - base virtual addr of hw regs %p",
1292 pdcs->pdc_reg_vbase);
1293
1294 /* initialize data structures */
1295 pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
1296 pdcs->txregs_64 = (struct dma64_regs *)
1297 (((u8 *)pdcs->pdc_reg_vbase) +
1298 PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1299 pdcs->rxregs_64 = (struct dma64_regs *)
1300 (((u8 *)pdcs->pdc_reg_vbase) +
1301 PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1302
1303 pdcs->ntxd = PDC_RING_ENTRIES;
1304 pdcs->nrxd = PDC_RING_ENTRIES;
1305 pdcs->ntxpost = PDC_RING_ENTRIES - 1;
1306 pdcs->nrxpost = PDC_RING_ENTRIES - 1;
1307 iowrite32(0, &pdcs->regs->intmask);
1308
1309 dma_reg = &pdcs->regs->dmaregs[ringset];
1310
1311 /* Configure DMA but will enable later in pdc_ring_init() */
1312 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1313
1314 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1315 &dma_reg->dmarcv.control);
1316
1317 /* Reset current index pointers after making sure DMA is disabled */
1318 iowrite32(0, &dma_reg->dmaxmt.ptr);
1319 iowrite32(0, &dma_reg->dmarcv.ptr);
1320
1321 if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
1322 iowrite32(PDC_CKSUM_CTRL,
1323 pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
1324}
1325
1326/**
1327 * pdc_hw_disable() - Disable the tx and rx control in the hw.
1328 * @pdcs: PDC state structure
1329 *
1330 */
1331static void pdc_hw_disable(struct pdc_state *pdcs)
1332{
1333 struct dma64 *dma_reg;
1334
1335 dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
1336 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1337 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1338 &dma_reg->dmarcv.control);
1339}
1340
1341/**
1342 * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
1343 * header returned with each response message.
1344 * @pdcs: PDC state structure
1345 *
1346 * The metadata is not returned to the mailbox client. So the PDC driver
1347 * manages these buffers.
1348 *
1349 * Return: PDC_SUCCESS
1350 * -ENOMEM if pool creation fails
1351 */
1352static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
1353{
1354 struct platform_device *pdev;
1355 struct device *dev;
1356
1357 pdev = pdcs->pdev;
1358 dev = &pdev->dev;
1359
1360 pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
1361 if (pdcs->use_bcm_hdr)
1362 pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
1363
1364 pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
1365 pdcs->pdc_resp_hdr_len,
1366 RX_BUF_ALIGN, 0);
1367 if (!pdcs->rx_buf_pool)
1368 return -ENOMEM;
1369
1370 return PDC_SUCCESS;
1371}
1372
1373/**
1374 * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
1375 * specify a threaded IRQ handler for deferred handling of interrupts outside of
1376 * interrupt context.
1377 * @pdcs: PDC state
1378 *
1379 * Set the interrupt mask for transmit and receive done.
1380 * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
1381 *
1382 * Return: PDC_SUCCESS
1383 * <0 if threaded irq request fails
1384 */
1385static int pdc_interrupts_init(struct pdc_state *pdcs)
1386{
1387 struct platform_device *pdev = pdcs->pdev;
1388 struct device *dev = &pdev->dev;
1389 struct device_node *dn = pdev->dev.of_node;
1390 int err;
1391
1392 /* interrupt configuration */
1393 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
1394
1395 if (pdcs->hw_type == FA_HW)
1396 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1397 FA_RCVLAZY0_OFFSET);
1398 else
1399 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1400 PDC_RCVLAZY0_OFFSET);
1401
1402 /* read irq from device tree */
1403 pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
1404 dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
1405 dev_name(dev), pdcs->pdc_irq, pdcs);
1406
1407 err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
1408 dev_name(dev), dev);
1409 if (err) {
1410 dev_err(dev, "IRQ %u request failed with err %d\n",
1411 pdcs->pdc_irq, err);
1412 return err;
1413 }
1414 return PDC_SUCCESS;
1415}
1416
1417static const struct mbox_chan_ops pdc_mbox_chan_ops = {
1418 .send_data = pdc_send_data,
1419 .last_tx_done = pdc_last_tx_done,
1420 .startup = pdc_startup,
1421 .shutdown = pdc_shutdown
1422};
1423
1424/**
1425 * pdc_mb_init() - Initialize the mailbox controller.
1426 * @pdcs: PDC state
1427 *
1428 * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
1429 * driver only uses one ringset and thus one mb channel. PDC uses the transmit
1430 * complete interrupt to determine when a mailbox message has successfully been
1431 * transmitted.
1432 *
1433 * Return: 0 on success
1434 * < 0 if there is an allocation or registration failure
1435 */
1436static int pdc_mb_init(struct pdc_state *pdcs)
1437{
1438 struct device *dev = &pdcs->pdev->dev;
1439 struct mbox_controller *mbc;
1440 int chan_index;
1441 int err;
1442
1443 mbc = &pdcs->mbc;
1444 mbc->dev = dev;
1445 mbc->ops = &pdc_mbox_chan_ops;
1446 mbc->num_chans = 1;
1447 mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
1448 GFP_KERNEL);
1449 if (!mbc->chans)
1450 return -ENOMEM;
1451
1452 mbc->txdone_irq = false;
1453 mbc->txdone_poll = true;
1454 mbc->txpoll_period = 1;
1455 for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
1456 mbc->chans[chan_index].con_priv = pdcs;
1457
1458 /* Register mailbox controller */
1459 err = devm_mbox_controller_register(dev, mbc);
1460 if (err) {
1461 dev_crit(dev,
1462 "Failed to register PDC mailbox controller. Error %d.",
1463 err);
1464 return err;
1465 }
1466 return 0;
1467}
1468
1469/* Device tree API */
1470static const int pdc_hw = PDC_HW;
1471static const int fa_hw = FA_HW;
1472
1473static const struct of_device_id pdc_mbox_of_match[] = {
1474 {.compatible = "brcm,iproc-pdc-mbox", .data = &pdc_hw},
1475 {.compatible = "brcm,iproc-fa2-mbox", .data = &fa_hw},
1476 { /* sentinel */ }
1477};
1478MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
1479
1480/**
1481 * pdc_dt_read() - Read application-specific data from device tree.
1482 * @pdev: Platform device
1483 * @pdcs: PDC state
1484 *
1485 * Reads the number of bytes of receive status that precede each received frame.
1486 * Reads whether transmit and received frames should be preceded by an 8-byte
1487 * BCM header.
1488 *
1489 * Return: 0 if successful
1490 * -ENODEV if device not available
1491 */
1492static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
1493{
1494 struct device *dev = &pdev->dev;
1495 struct device_node *dn = pdev->dev.of_node;
1496 const int *hw_type;
1497 int err;
1498
1499 err = of_property_read_u32(dn, "brcm,rx-status-len",
1500 &pdcs->rx_status_len);
1501 if (err < 0)
1502 dev_err(dev,
1503 "%s failed to get DMA receive status length from device tree",
1504 __func__);
1505
1506 pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
1507
1508 pdcs->hw_type = PDC_HW;
1509
1510 hw_type = device_get_match_data(dev);
1511 if (hw_type)
1512 pdcs->hw_type = *hw_type;
1513
1514 return 0;
1515}
1516
1517/**
1518 * pdc_probe() - Probe function for PDC driver.
1519 * @pdev: PDC platform device
1520 *
1521 * Reserve and map register regions defined in device tree.
1522 * Allocate and initialize tx and rx DMA rings.
1523 * Initialize a mailbox controller for each PDC.
1524 *
1525 * Return: 0 if successful
1526 * < 0 if an error
1527 */
1528static int pdc_probe(struct platform_device *pdev)
1529{
1530 int err = 0;
1531 struct device *dev = &pdev->dev;
1532 struct resource *pdc_regs;
1533 struct pdc_state *pdcs;
1534
1535 /* PDC state for one SPU */
1536 pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
1537 if (!pdcs) {
1538 err = -ENOMEM;
1539 goto cleanup;
1540 }
1541
1542 pdcs->pdev = pdev;
1543 platform_set_drvdata(pdev, pdcs);
1544 pdcs->pdc_idx = pdcg.num_spu;
1545 pdcg.num_spu++;
1546
1547 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
1548 if (err) {
1549 dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
1550 goto cleanup;
1551 }
1552
1553 /* Create DMA pool for tx ring */
1554 pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
1555 RING_ALIGN, 0);
1556 if (!pdcs->ring_pool) {
1557 err = -ENOMEM;
1558 goto cleanup;
1559 }
1560
1561 err = pdc_dt_read(pdev, pdcs);
1562 if (err)
1563 goto cleanup_ring_pool;
1564
1565 pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs);
1566 if (IS_ERR(pdcs->pdc_reg_vbase)) {
1567 err = PTR_ERR(pdcs->pdc_reg_vbase);
1568 goto cleanup_ring_pool;
1569 }
1570 dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
1571 &pdc_regs->start, &pdc_regs->end);
1572
1573 /* create rx buffer pool after dt read to know how big buffers are */
1574 err = pdc_rx_buf_pool_create(pdcs);
1575 if (err)
1576 goto cleanup_ring_pool;
1577
1578 pdc_hw_init(pdcs);
1579
1580 /* Init tasklet for deferred DMA rx processing */
1581 tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
1582
1583 err = pdc_interrupts_init(pdcs);
1584 if (err)
1585 goto cleanup_buf_pool;
1586
1587 /* Initialize mailbox controller */
1588 err = pdc_mb_init(pdcs);
1589 if (err)
1590 goto cleanup_buf_pool;
1591
1592 pdc_setup_debugfs(pdcs);
1593
1594 dev_dbg(dev, "pdc_probe() successful");
1595 return PDC_SUCCESS;
1596
1597cleanup_buf_pool:
1598 tasklet_kill(&pdcs->rx_tasklet);
1599 dma_pool_destroy(pdcs->rx_buf_pool);
1600
1601cleanup_ring_pool:
1602 dma_pool_destroy(pdcs->ring_pool);
1603
1604cleanup:
1605 return err;
1606}
1607
1608static void pdc_remove(struct platform_device *pdev)
1609{
1610 struct pdc_state *pdcs = platform_get_drvdata(pdev);
1611
1612 pdc_free_debugfs();
1613
1614 tasklet_kill(&pdcs->rx_tasklet);
1615
1616 pdc_hw_disable(pdcs);
1617
1618 dma_pool_destroy(pdcs->rx_buf_pool);
1619 dma_pool_destroy(pdcs->ring_pool);
1620}
1621
1622static struct platform_driver pdc_mbox_driver = {
1623 .probe = pdc_probe,
1624 .remove_new = pdc_remove,
1625 .driver = {
1626 .name = "brcm-iproc-pdc-mbox",
1627 .of_match_table = pdc_mbox_of_match,
1628 },
1629};
1630module_platform_driver(pdc_mbox_driver);
1631
1632MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
1633MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
1634MODULE_LICENSE("GPL v2");
1/*
2 * Copyright 2016 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation (the "GPL").
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License version 2 (GPLv2) for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * version 2 (GPLv2) along with this source code.
15 */
16
17/*
18 * Broadcom PDC Mailbox Driver
19 * The PDC provides a ring based programming interface to one or more hardware
20 * offload engines. For example, the PDC driver works with both SPU-M and SPU2
21 * cryptographic offload hardware. In some chips the PDC is referred to as MDE.
22 *
23 * The PDC driver registers with the Linux mailbox framework as a mailbox
24 * controller, once for each PDC instance. Ring 0 for each PDC is registered as
25 * a mailbox channel. The PDC driver uses interrupts to determine when data
26 * transfers to and from an offload engine are complete. The PDC driver uses
27 * threaded IRQs so that response messages are handled outside of interrupt
28 * context.
29 *
30 * The PDC driver allows multiple messages to be pending in the descriptor
31 * rings. The tx_msg_start descriptor index indicates where the last message
32 * starts. The txin_numd value at this index indicates how many descriptor
33 * indexes make up the message. Similar state is kept on the receive side. When
34 * an rx interrupt indicates a response is ready, the PDC driver processes numd
35 * descriptors from the tx and rx ring, thus processing one response at a time.
36 */
37
38#include <linux/errno.h>
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/debugfs.h>
43#include <linux/interrupt.h>
44#include <linux/wait.h>
45#include <linux/platform_device.h>
46#include <linux/io.h>
47#include <linux/of.h>
48#include <linux/of_device.h>
49#include <linux/of_address.h>
50#include <linux/of_irq.h>
51#include <linux/mailbox_controller.h>
52#include <linux/mailbox/brcm-message.h>
53#include <linux/scatterlist.h>
54#include <linux/dma-direction.h>
55#include <linux/dma-mapping.h>
56#include <linux/dmapool.h>
57
58#define PDC_SUCCESS 0
59
60#define RING_ENTRY_SIZE sizeof(struct dma64dd)
61
62/* # entries in PDC dma ring */
63#define PDC_RING_ENTRIES 512
64/*
65 * Minimum number of ring descriptor entries that must be free to tell mailbox
66 * framework that it can submit another request
67 */
68#define PDC_RING_SPACE_MIN 15
69
70#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
71/* Rings are 8k aligned */
72#define RING_ALIGN_ORDER 13
73#define RING_ALIGN BIT(RING_ALIGN_ORDER)
74
75#define RX_BUF_ALIGN_ORDER 5
76#define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
77
78/* descriptor bumping macros */
79#define XXD(x, max_mask) ((x) & (max_mask))
80#define TXD(x, max_mask) XXD((x), (max_mask))
81#define RXD(x, max_mask) XXD((x), (max_mask))
82#define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
83#define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
84#define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
85#define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
86#define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
87#define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
88
89/* Length of BCM header at start of SPU msg, in bytes */
90#define BCM_HDR_LEN 8
91
92/*
93 * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
94 * not currently support use of multiple ringsets on a single PDC engine.
95 */
96#define PDC_RINGSET 0
97
98/*
99 * Interrupt mask and status definitions. Enable interrupts for tx and rx on
100 * ring 0
101 */
102#define PDC_RCVINT_0 (16 + PDC_RINGSET)
103#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
104#define PDC_INTMASK (PDC_RCVINTEN_0)
105#define PDC_LAZY_FRAMECOUNT 1
106#define PDC_LAZY_TIMEOUT 10000
107#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
108#define PDC_INTMASK_OFFSET 0x24
109#define PDC_INTSTATUS_OFFSET 0x20
110#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
111
112/*
113 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
114 * before frame
115 */
116#define PDC_SPU2_RESP_HDR_LEN 17
117#define PDC_CKSUM_CTRL BIT(27)
118#define PDC_CKSUM_CTRL_OFFSET 0x400
119
120#define PDC_SPUM_RESP_HDR_LEN 32
121
122/*
123 * Sets the following bits for write to transmit control reg:
124 * 11 - PtyChkDisable - parity check is disabled
125 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
126 */
127#define PDC_TX_CTL 0x000C0800
128
129/* Bit in tx control reg to enable tx channel */
130#define PDC_TX_ENABLE 0x1
131
132/*
133 * Sets the following bits for write to receive control reg:
134 * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
135 * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
136 * that have StartOfFrame set
137 * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
138 * remaining bytes in current frame, report error
139 * in rx frame status for current frame
140 * 11 - PtyChkDisable - parity check is disabled
141 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
142 */
143#define PDC_RX_CTL 0x000C0E00
144
145/* Bit in rx control reg to enable rx channel */
146#define PDC_RX_ENABLE 0x1
147
148#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
149
150/* descriptor flags */
151#define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
152#define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
153#define D64_CTRL1_EOF BIT(30) /* end of frame */
154#define D64_CTRL1_SOF BIT(31) /* start of frame */
155
156#define RX_STATUS_OVERFLOW 0x00800000
157#define RX_STATUS_LEN 0x0000FFFF
158
159#define PDC_TXREGS_OFFSET 0x200
160#define PDC_RXREGS_OFFSET 0x220
161
162/* Maximum size buffer the DMA engine can handle */
163#define PDC_DMA_BUF_MAX 16384
164
165struct pdc_dma_map {
166 void *ctx; /* opaque context associated with frame */
167};
168
169/* dma descriptor */
170struct dma64dd {
171 u32 ctrl1; /* misc control bits */
172 u32 ctrl2; /* buffer count and address extension */
173 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
174 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
175};
176
177/* dma registers per channel(xmt or rcv) */
178struct dma64_regs {
179 u32 control; /* enable, et al */
180 u32 ptr; /* last descriptor posted to chip */
181 u32 addrlow; /* descriptor ring base address low 32-bits */
182 u32 addrhigh; /* descriptor ring base address bits 63:32 */
183 u32 status0; /* last rx descriptor written by hw */
184 u32 status1; /* driver does not use */
185};
186
187/* cpp contortions to concatenate w/arg prescan */
188#ifndef PAD
189#define _PADLINE(line) pad ## line
190#define _XSTR(line) _PADLINE(line)
191#define PAD _XSTR(__LINE__)
192#endif /* PAD */
193
194/* dma registers. matches hw layout. */
195struct dma64 {
196 struct dma64_regs dmaxmt; /* dma tx */
197 u32 PAD[2];
198 struct dma64_regs dmarcv; /* dma rx */
199 u32 PAD[2];
200};
201
202/* PDC registers */
203struct pdc_regs {
204 u32 devcontrol; /* 0x000 */
205 u32 devstatus; /* 0x004 */
206 u32 PAD;
207 u32 biststatus; /* 0x00c */
208 u32 PAD[4];
209 u32 intstatus; /* 0x020 */
210 u32 intmask; /* 0x024 */
211 u32 gptimer; /* 0x028 */
212
213 u32 PAD;
214 u32 intrcvlazy_0; /* 0x030 */
215 u32 intrcvlazy_1; /* 0x034 */
216 u32 intrcvlazy_2; /* 0x038 */
217 u32 intrcvlazy_3; /* 0x03c */
218
219 u32 PAD[48];
220 u32 removed_intrecvlazy; /* 0x100 */
221 u32 flowctlthresh; /* 0x104 */
222 u32 wrrthresh; /* 0x108 */
223 u32 gmac_idle_cnt_thresh; /* 0x10c */
224
225 u32 PAD[4];
226 u32 ifioaccessaddr; /* 0x120 */
227 u32 ifioaccessbyte; /* 0x124 */
228 u32 ifioaccessdata; /* 0x128 */
229
230 u32 PAD[21];
231 u32 phyaccess; /* 0x180 */
232 u32 PAD;
233 u32 phycontrol; /* 0x188 */
234 u32 txqctl; /* 0x18c */
235 u32 rxqctl; /* 0x190 */
236 u32 gpioselect; /* 0x194 */
237 u32 gpio_output_en; /* 0x198 */
238 u32 PAD; /* 0x19c */
239 u32 txq_rxq_mem_ctl; /* 0x1a0 */
240 u32 memory_ecc_status; /* 0x1a4 */
241 u32 serdes_ctl; /* 0x1a8 */
242 u32 serdes_status0; /* 0x1ac */
243 u32 serdes_status1; /* 0x1b0 */
244 u32 PAD[11]; /* 0x1b4-1dc */
245 u32 clk_ctl_st; /* 0x1e0 */
246 u32 hw_war; /* 0x1e4 */
247 u32 pwrctl; /* 0x1e8 */
248 u32 PAD[5];
249
250#define PDC_NUM_DMA_RINGS 4
251 struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
252
253 /* more registers follow, but we don't use them */
254};
255
256/* structure for allocating/freeing DMA rings */
257struct pdc_ring_alloc {
258 dma_addr_t dmabase; /* DMA address of start of ring */
259 void *vbase; /* base kernel virtual address of ring */
260 u32 size; /* ring allocation size in bytes */
261};
262
263/*
264 * context associated with a receive descriptor.
265 * @rxp_ctx: opaque context associated with frame that starts at each
266 * rx ring index.
267 * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
268 * index. Retained in order to unmap each sg after reply is processed.
269 * @rxin_numd: Number of rx descriptors associated with the message that starts
270 * at a descriptor index. Not set for every index. For example,
271 * if descriptor index i points to a scatterlist with 4 entries,
272 * then the next three descriptor indexes don't have a value set.
273 * @resp_hdr: Virtual address of buffer used to catch DMA rx status
274 * @resp_hdr_daddr: physical address of DMA rx status buffer
275 */
276struct pdc_rx_ctx {
277 void *rxp_ctx;
278 struct scatterlist *dst_sg;
279 u32 rxin_numd;
280 void *resp_hdr;
281 dma_addr_t resp_hdr_daddr;
282};
283
284/* PDC state structure */
285struct pdc_state {
286 /* Index of the PDC whose state is in this structure instance */
287 u8 pdc_idx;
288
289 /* Platform device for this PDC instance */
290 struct platform_device *pdev;
291
292 /*
293 * Each PDC instance has a mailbox controller. PDC receives request
294 * messages through mailboxes, and sends response messages through the
295 * mailbox framework.
296 */
297 struct mbox_controller mbc;
298
299 unsigned int pdc_irq;
300
301 /* tasklet for deferred processing after DMA rx interrupt */
302 struct tasklet_struct rx_tasklet;
303
304 /* Number of bytes of receive status prior to each rx frame */
305 u32 rx_status_len;
306 /* Whether a BCM header is prepended to each frame */
307 bool use_bcm_hdr;
308 /* Sum of length of BCM header and rx status header */
309 u32 pdc_resp_hdr_len;
310
311 /* The base virtual address of DMA hw registers */
312 void __iomem *pdc_reg_vbase;
313
314 /* Pool for allocation of DMA rings */
315 struct dma_pool *ring_pool;
316
317 /* Pool for allocation of metadata buffers for response messages */
318 struct dma_pool *rx_buf_pool;
319
320 /*
321 * The base virtual address of DMA tx/rx descriptor rings. Corresponding
322 * DMA address and size of ring allocation.
323 */
324 struct pdc_ring_alloc tx_ring_alloc;
325 struct pdc_ring_alloc rx_ring_alloc;
326
327 struct pdc_regs *regs; /* start of PDC registers */
328
329 struct dma64_regs *txregs_64; /* dma tx engine registers */
330 struct dma64_regs *rxregs_64; /* dma rx engine registers */
331
332 /*
333 * Arrays of PDC_RING_ENTRIES descriptors
334 * To use multiple ringsets, this needs to be extended
335 */
336 struct dma64dd *txd_64; /* tx descriptor ring */
337 struct dma64dd *rxd_64; /* rx descriptor ring */
338
339 /* descriptor ring sizes */
340 u32 ntxd; /* # tx descriptors */
341 u32 nrxd; /* # rx descriptors */
342 u32 nrxpost; /* # rx buffers to keep posted */
343 u32 ntxpost; /* max number of tx buffers that can be posted */
344
345 /*
346 * Index of next tx descriptor to reclaim. That is, the descriptor
347 * index of the oldest tx buffer for which the host has yet to process
348 * the corresponding response.
349 */
350 u32 txin;
351
352 /*
353 * Index of the first receive descriptor for the sequence of
354 * message fragments currently under construction. Used to build up
355 * the rxin_numd count for a message. Updated to rxout when the host
356 * starts a new sequence of rx buffers for a new message.
357 */
358 u32 tx_msg_start;
359
360 /* Index of next tx descriptor to post. */
361 u32 txout;
362
363 /*
364 * Number of tx descriptors associated with the message that starts
365 * at this tx descriptor index.
366 */
367 u32 txin_numd[PDC_RING_ENTRIES];
368
369 /*
370 * Index of next rx descriptor to reclaim. This is the index of
371 * the next descriptor whose data has yet to be processed by the host.
372 */
373 u32 rxin;
374
375 /*
376 * Index of the first receive descriptor for the sequence of
377 * message fragments currently under construction. Used to build up
378 * the rxin_numd count for a message. Updated to rxout when the host
379 * starts a new sequence of rx buffers for a new message.
380 */
381 u32 rx_msg_start;
382
383 /*
384 * Saved value of current hardware rx descriptor index.
385 * The last rx buffer written by the hw is the index previous to
386 * this one.
387 */
388 u32 last_rx_curr;
389
390 /* Index of next rx descriptor to post. */
391 u32 rxout;
392
393 struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
394
395 /*
396 * Scatterlists used to form request and reply frames beginning at a
397 * given ring index. Retained in order to unmap each sg after reply
398 * is processed
399 */
400 struct scatterlist *src_sg[PDC_RING_ENTRIES];
401
402 struct dentry *debugfs_stats; /* debug FS stats file for this PDC */
403
404 /* counters */
405 u32 pdc_requests; /* number of request messages submitted */
406 u32 pdc_replies; /* number of reply messages received */
407 u32 last_tx_not_done; /* too few tx descriptors to indicate done */
408 u32 tx_ring_full; /* unable to accept msg because tx ring full */
409 u32 rx_ring_full; /* unable to accept msg because rx ring full */
410 u32 txnobuf; /* unable to create tx descriptor */
411 u32 rxnobuf; /* unable to create rx descriptor */
412 u32 rx_oflow; /* count of rx overflows */
413};
414
415/* Global variables */
416
417struct pdc_globals {
418 /* Actual number of SPUs in hardware, as reported by device tree */
419 u32 num_spu;
420};
421
422static struct pdc_globals pdcg;
423
424/* top level debug FS directory for PDC driver */
425static struct dentry *debugfs_dir;
426
427static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
428 size_t count, loff_t *offp)
429{
430 struct pdc_state *pdcs;
431 char *buf;
432 ssize_t ret, out_offset, out_count;
433
434 out_count = 512;
435
436 buf = kmalloc(out_count, GFP_KERNEL);
437 if (!buf)
438 return -ENOMEM;
439
440 pdcs = filp->private_data;
441 out_offset = 0;
442 out_offset += snprintf(buf + out_offset, out_count - out_offset,
443 "SPU %u stats:\n", pdcs->pdc_idx);
444 out_offset += snprintf(buf + out_offset, out_count - out_offset,
445 "PDC requests....................%u\n",
446 pdcs->pdc_requests);
447 out_offset += snprintf(buf + out_offset, out_count - out_offset,
448 "PDC responses...................%u\n",
449 pdcs->pdc_replies);
450 out_offset += snprintf(buf + out_offset, out_count - out_offset,
451 "Tx not done.....................%u\n",
452 pdcs->last_tx_not_done);
453 out_offset += snprintf(buf + out_offset, out_count - out_offset,
454 "Tx ring full....................%u\n",
455 pdcs->tx_ring_full);
456 out_offset += snprintf(buf + out_offset, out_count - out_offset,
457 "Rx ring full....................%u\n",
458 pdcs->rx_ring_full);
459 out_offset += snprintf(buf + out_offset, out_count - out_offset,
460 "Tx desc write fail. Ring full...%u\n",
461 pdcs->txnobuf);
462 out_offset += snprintf(buf + out_offset, out_count - out_offset,
463 "Rx desc write fail. Ring full...%u\n",
464 pdcs->rxnobuf);
465 out_offset += snprintf(buf + out_offset, out_count - out_offset,
466 "Receive overflow................%u\n",
467 pdcs->rx_oflow);
468 out_offset += snprintf(buf + out_offset, out_count - out_offset,
469 "Num frags in rx ring............%u\n",
470 NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
471 pdcs->nrxpost));
472
473 if (out_offset > out_count)
474 out_offset = out_count;
475
476 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
477 kfree(buf);
478 return ret;
479}
480
481static const struct file_operations pdc_debugfs_stats = {
482 .owner = THIS_MODULE,
483 .open = simple_open,
484 .read = pdc_debugfs_read,
485};
486
487/**
488 * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
489 * directory has not yet been created, create it now. Create a stats file in
490 * this directory for a SPU.
491 * @pdcs: PDC state structure
492 */
493static void pdc_setup_debugfs(struct pdc_state *pdcs)
494{
495 char spu_stats_name[16];
496
497 if (!debugfs_initialized())
498 return;
499
500 snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
501 if (!debugfs_dir)
502 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
503
504 /* S_IRUSR == 0400 */
505 pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, 0400,
506 debugfs_dir, pdcs,
507 &pdc_debugfs_stats);
508}
509
510static void pdc_free_debugfs(void)
511{
512 debugfs_remove_recursive(debugfs_dir);
513 debugfs_dir = NULL;
514}
515
516/**
517 * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
518 * @pdcs: PDC state for SPU that will generate result
519 * @dma_addr: DMA address of buffer that descriptor is being built for
520 * @buf_len: Length of the receive buffer, in bytes
521 * @flags: Flags to be stored in descriptor
522 */
523static inline void
524pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
525 u32 buf_len, u32 flags)
526{
527 struct device *dev = &pdcs->pdev->dev;
528 struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
529
530 dev_dbg(dev,
531 "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
532 pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
533
534 rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
535 rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
536 rxd->ctrl1 = cpu_to_le32(flags);
537 rxd->ctrl2 = cpu_to_le32(buf_len);
538
539 /* bump ring index and return */
540 pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
541}
542
543/**
544 * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
545 * hardware.
546 * @pdcs: PDC state for the SPU that will process this request
547 * @dma_addr: DMA address of packet to be transmitted
548 * @buf_len: Length of tx buffer, in bytes
549 * @flags: Flags to be stored in descriptor
550 */
551static inline void
552pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
553 u32 flags)
554{
555 struct device *dev = &pdcs->pdev->dev;
556 struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
557
558 dev_dbg(dev,
559 "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
560 pdcs->pdc_idx, pdcs->txout, buf_len, flags);
561
562 txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
563 txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
564 txd->ctrl1 = cpu_to_le32(flags);
565 txd->ctrl2 = cpu_to_le32(buf_len);
566
567 /* bump ring index and return */
568 pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
569}
570
571/**
572 * pdc_receive_one() - Receive a response message from a given SPU.
573 * @pdcs: PDC state for the SPU to receive from
574 *
575 * When the return code indicates success, the response message is available in
576 * the receive buffers provided prior to submission of the request.
577 *
578 * Return: PDC_SUCCESS if one or more receive descriptors was processed
579 * -EAGAIN indicates that no response message is available
580 * -EIO an error occurred
581 */
582static int
583pdc_receive_one(struct pdc_state *pdcs)
584{
585 struct device *dev = &pdcs->pdev->dev;
586 struct mbox_controller *mbc;
587 struct mbox_chan *chan;
588 struct brcm_message mssg;
589 u32 len, rx_status;
590 u32 num_frags;
591 u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
592 u32 frags_rdy; /* number of fragments ready to read */
593 u32 rx_idx; /* ring index of start of receive frame */
594 dma_addr_t resp_hdr_daddr;
595 struct pdc_rx_ctx *rx_ctx;
596
597 mbc = &pdcs->mbc;
598 chan = &mbc->chans[0];
599 mssg.type = BRCM_MESSAGE_SPU;
600
601 /*
602 * return if a complete response message is not yet ready.
603 * rxin_numd[rxin] is the number of fragments in the next msg
604 * to read.
605 */
606 frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
607 if ((frags_rdy == 0) ||
608 (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
609 /* No response ready */
610 return -EAGAIN;
611
612 num_frags = pdcs->txin_numd[pdcs->txin];
613 WARN_ON(num_frags == 0);
614
615 dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
616 sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
617
618 pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
619
620 dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
621 pdcs->pdc_idx, num_frags);
622
623 rx_idx = pdcs->rxin;
624 rx_ctx = &pdcs->rx_ctx[rx_idx];
625 num_frags = rx_ctx->rxin_numd;
626 /* Return opaque context with result */
627 mssg.ctx = rx_ctx->rxp_ctx;
628 rx_ctx->rxp_ctx = NULL;
629 resp_hdr = rx_ctx->resp_hdr;
630 resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
631 dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
632 DMA_FROM_DEVICE);
633
634 pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
635
636 dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
637 pdcs->pdc_idx, num_frags);
638
639 dev_dbg(dev,
640 "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
641 pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
642 pdcs->rxout, pdcs->last_rx_curr);
643
644 if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
645 /*
646 * For SPU-M, get length of response msg and rx overflow status.
647 */
648 rx_status = *((u32 *)resp_hdr);
649 len = rx_status & RX_STATUS_LEN;
650 dev_dbg(dev,
651 "SPU response length %u bytes", len);
652 if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
653 if (rx_status & RX_STATUS_OVERFLOW) {
654 dev_err_ratelimited(dev,
655 "crypto receive overflow");
656 pdcs->rx_oflow++;
657 } else {
658 dev_info_ratelimited(dev, "crypto rx len = 0");
659 }
660 return -EIO;
661 }
662 }
663
664 dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
665
666 mbox_chan_received_data(chan, &mssg);
667
668 pdcs->pdc_replies++;
669 return PDC_SUCCESS;
670}
671
672/**
673 * pdc_receive() - Process as many responses as are available in the rx ring.
674 * @pdcs: PDC state
675 *
676 * Called within the hard IRQ.
677 * Return:
678 */
679static int
680pdc_receive(struct pdc_state *pdcs)
681{
682 int rx_status;
683
684 /* read last_rx_curr from register once */
685 pdcs->last_rx_curr =
686 (ioread32(&pdcs->rxregs_64->status0) &
687 CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
688
689 do {
690 /* Could be many frames ready */
691 rx_status = pdc_receive_one(pdcs);
692 } while (rx_status == PDC_SUCCESS);
693
694 return 0;
695}
696
697/**
698 * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
699 * descriptors for a given SPU. The scatterlist buffers contain the data for a
700 * SPU request message.
701 * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
702 * @sg: Scatterlist whose buffers contain part of the SPU request
703 *
704 * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
705 * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
706 *
707 * Return: PDC_SUCCESS if successful
708 * < 0 otherwise
709 */
710static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
711{
712 u32 flags = 0;
713 u32 eot;
714 u32 tx_avail;
715
716 /*
717 * Num descriptors needed. Conservatively assume we need a descriptor
718 * for every entry in sg.
719 */
720 u32 num_desc;
721 u32 desc_w = 0; /* Number of tx descriptors written */
722 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
723 dma_addr_t databufptr; /* DMA address to put in descriptor */
724
725 num_desc = (u32)sg_nents(sg);
726
727 /* check whether enough tx descriptors are available */
728 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
729 pdcs->ntxpost);
730 if (unlikely(num_desc > tx_avail)) {
731 pdcs->txnobuf++;
732 return -ENOSPC;
733 }
734
735 /* build tx descriptors */
736 if (pdcs->tx_msg_start == pdcs->txout) {
737 /* Start of frame */
738 pdcs->txin_numd[pdcs->tx_msg_start] = 0;
739 pdcs->src_sg[pdcs->txout] = sg;
740 flags = D64_CTRL1_SOF;
741 }
742
743 while (sg) {
744 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
745 eot = D64_CTRL1_EOT;
746 else
747 eot = 0;
748
749 /*
750 * If sg buffer larger than PDC limit, split across
751 * multiple descriptors
752 */
753 bufcnt = sg_dma_len(sg);
754 databufptr = sg_dma_address(sg);
755 while (bufcnt > PDC_DMA_BUF_MAX) {
756 pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
757 flags | eot);
758 desc_w++;
759 bufcnt -= PDC_DMA_BUF_MAX;
760 databufptr += PDC_DMA_BUF_MAX;
761 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
762 eot = D64_CTRL1_EOT;
763 else
764 eot = 0;
765 }
766 sg = sg_next(sg);
767 if (!sg)
768 /* Writing last descriptor for frame */
769 flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
770 pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
771 desc_w++;
772 /* Clear start of frame after first descriptor */
773 flags &= ~D64_CTRL1_SOF;
774 }
775 pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
776
777 return PDC_SUCCESS;
778}
779
780/**
781 * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
782 * ring.
783 * @pdcs: PDC state for SPU to process the request
784 *
785 * Sets the index of the last descriptor written in both the rx and tx ring.
786 *
787 * Return: PDC_SUCCESS
788 */
789static int pdc_tx_list_final(struct pdc_state *pdcs)
790{
791 /*
792 * write barrier to ensure all register writes are complete
793 * before chip starts to process new request
794 */
795 wmb();
796 iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
797 iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
798 pdcs->pdc_requests++;
799
800 return PDC_SUCCESS;
801}
802
803/**
804 * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
805 * @pdcs: PDC state for SPU handling request
806 * @dst_sg: scatterlist providing rx buffers for response to be returned to
807 * mailbox client
808 * @ctx: Opaque context for this request
809 *
810 * Posts a single receive descriptor to hold the metadata that precedes a
811 * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
812 * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
813 * rx to indicate the start of a new message.
814 *
815 * Return: PDC_SUCCESS if successful
816 * < 0 if an error (e.g., rx ring is full)
817 */
818static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
819 void *ctx)
820{
821 u32 flags = 0;
822 u32 rx_avail;
823 u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
824 dma_addr_t daddr;
825 void *vaddr;
826 struct pdc_rx_ctx *rx_ctx;
827
828 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
829 pdcs->nrxpost);
830 if (unlikely(rx_pkt_cnt > rx_avail)) {
831 pdcs->rxnobuf++;
832 return -ENOSPC;
833 }
834
835 /* allocate a buffer for the dma rx status */
836 vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
837 if (unlikely(!vaddr))
838 return -ENOMEM;
839
840 /*
841 * Update msg_start indexes for both tx and rx to indicate the start
842 * of a new sequence of descriptor indexes that contain the fragments
843 * of the same message.
844 */
845 pdcs->rx_msg_start = pdcs->rxout;
846 pdcs->tx_msg_start = pdcs->txout;
847
848 /* This is always the first descriptor in the receive sequence */
849 flags = D64_CTRL1_SOF;
850 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
851
852 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
853 flags |= D64_CTRL1_EOT;
854
855 rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
856 rx_ctx->rxp_ctx = ctx;
857 rx_ctx->dst_sg = dst_sg;
858 rx_ctx->resp_hdr = vaddr;
859 rx_ctx->resp_hdr_daddr = daddr;
860 pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
861 return PDC_SUCCESS;
862}
863
864/**
865 * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
866 * descriptors for a given SPU. The caller must have already DMA mapped the
867 * scatterlist.
868 * @spu_idx: Indicates which SPU the buffers are for
869 * @sg: Scatterlist whose buffers are added to the receive ring
870 *
871 * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
872 * multiple receive descriptors are written, each with a buffer <=
873 * PDC_DMA_BUF_MAX.
874 *
875 * Return: PDC_SUCCESS if successful
876 * < 0 otherwise (e.g., receive ring is full)
877 */
878static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
879{
880 u32 flags = 0;
881 u32 rx_avail;
882
883 /*
884 * Num descriptors needed. Conservatively assume we need a descriptor
885 * for every entry from our starting point in the scatterlist.
886 */
887 u32 num_desc;
888 u32 desc_w = 0; /* Number of tx descriptors written */
889 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
890 dma_addr_t databufptr; /* DMA address to put in descriptor */
891
892 num_desc = (u32)sg_nents(sg);
893
894 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
895 pdcs->nrxpost);
896 if (unlikely(num_desc > rx_avail)) {
897 pdcs->rxnobuf++;
898 return -ENOSPC;
899 }
900
901 while (sg) {
902 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
903 flags = D64_CTRL1_EOT;
904 else
905 flags = 0;
906
907 /*
908 * If sg buffer larger than PDC limit, split across
909 * multiple descriptors
910 */
911 bufcnt = sg_dma_len(sg);
912 databufptr = sg_dma_address(sg);
913 while (bufcnt > PDC_DMA_BUF_MAX) {
914 pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
915 desc_w++;
916 bufcnt -= PDC_DMA_BUF_MAX;
917 databufptr += PDC_DMA_BUF_MAX;
918 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
919 flags = D64_CTRL1_EOT;
920 else
921 flags = 0;
922 }
923 pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
924 desc_w++;
925 sg = sg_next(sg);
926 }
927 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
928
929 return PDC_SUCCESS;
930}
931
932/**
933 * pdc_irq_handler() - Interrupt handler called in interrupt context.
934 * @irq: Interrupt number that has fired
935 * @data: device struct for DMA engine that generated the interrupt
936 *
937 * We have to clear the device interrupt status flags here. So cache the
938 * status for later use in the thread function. Other than that, just return
939 * WAKE_THREAD to invoke the thread function.
940 *
941 * Return: IRQ_WAKE_THREAD if interrupt is ours
942 * IRQ_NONE otherwise
943 */
944static irqreturn_t pdc_irq_handler(int irq, void *data)
945{
946 struct device *dev = (struct device *)data;
947 struct pdc_state *pdcs = dev_get_drvdata(dev);
948 u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
949
950 if (unlikely(intstatus == 0))
951 return IRQ_NONE;
952
953 /* Disable interrupts until soft handler runs */
954 iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
955
956 /* Clear interrupt flags in device */
957 iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
958
959 /* Wakeup IRQ thread */
960 tasklet_schedule(&pdcs->rx_tasklet);
961 return IRQ_HANDLED;
962}
963
964/**
965 * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
966 * a DMA receive interrupt. Reenables the receive interrupt.
967 * @data: PDC state structure
968 */
969static void pdc_tasklet_cb(unsigned long data)
970{
971 struct pdc_state *pdcs = (struct pdc_state *)data;
972
973 pdc_receive(pdcs);
974
975 /* reenable interrupts */
976 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
977}
978
979/**
980 * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
981 * descriptors in one ringset.
982 * @pdcs: PDC instance state
983 * @ringset: index of ringset being used
984 *
985 * Return: PDC_SUCCESS if ring initialized
986 * < 0 otherwise
987 */
988static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
989{
990 int i;
991 int err = PDC_SUCCESS;
992 struct dma64 *dma_reg;
993 struct device *dev = &pdcs->pdev->dev;
994 struct pdc_ring_alloc tx;
995 struct pdc_ring_alloc rx;
996
997 /* Allocate tx ring */
998 tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
999 if (unlikely(!tx.vbase)) {
1000 err = -ENOMEM;
1001 goto done;
1002 }
1003
1004 /* Allocate rx ring */
1005 rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
1006 if (unlikely(!rx.vbase)) {
1007 err = -ENOMEM;
1008 goto fail_dealloc;
1009 }
1010
1011 dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
1012 dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
1013 dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
1014 dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
1015
1016 memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
1017 memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
1018
1019 pdcs->rxin = 0;
1020 pdcs->rx_msg_start = 0;
1021 pdcs->last_rx_curr = 0;
1022 pdcs->rxout = 0;
1023 pdcs->txin = 0;
1024 pdcs->tx_msg_start = 0;
1025 pdcs->txout = 0;
1026
1027 /* Set descriptor array base addresses */
1028 pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
1029 pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
1030
1031 /* Tell device the base DMA address of each ring */
1032 dma_reg = &pdcs->regs->dmaregs[ringset];
1033
1034 /* But first disable DMA and set curptr to 0 for both TX & RX */
1035 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1036 iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
1037 &dma_reg->dmarcv.control);
1038 iowrite32(0, &dma_reg->dmaxmt.ptr);
1039 iowrite32(0, &dma_reg->dmarcv.ptr);
1040
1041 /* Set base DMA addresses */
1042 iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
1043 &dma_reg->dmaxmt.addrlow);
1044 iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
1045 &dma_reg->dmaxmt.addrhigh);
1046
1047 iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
1048 &dma_reg->dmarcv.addrlow);
1049 iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
1050 &dma_reg->dmarcv.addrhigh);
1051
1052 /* Re-enable DMA */
1053 iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
1054 iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
1055 &dma_reg->dmarcv.control);
1056
1057 /* Initialize descriptors */
1058 for (i = 0; i < PDC_RING_ENTRIES; i++) {
1059 /* Every tx descriptor can be used for start of frame. */
1060 if (i != pdcs->ntxpost) {
1061 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
1062 &pdcs->txd_64[i].ctrl1);
1063 } else {
1064 /* Last descriptor in ringset. Set End of Table. */
1065 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
1066 D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
1067 }
1068
1069 /* Every rx descriptor can be used for start of frame */
1070 if (i != pdcs->nrxpost) {
1071 iowrite32(D64_CTRL1_SOF,
1072 &pdcs->rxd_64[i].ctrl1);
1073 } else {
1074 /* Last descriptor in ringset. Set End of Table. */
1075 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
1076 &pdcs->rxd_64[i].ctrl1);
1077 }
1078 }
1079 return PDC_SUCCESS;
1080
1081fail_dealloc:
1082 dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
1083done:
1084 return err;
1085}
1086
1087static void pdc_ring_free(struct pdc_state *pdcs)
1088{
1089 if (pdcs->tx_ring_alloc.vbase) {
1090 dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
1091 pdcs->tx_ring_alloc.dmabase);
1092 pdcs->tx_ring_alloc.vbase = NULL;
1093 }
1094
1095 if (pdcs->rx_ring_alloc.vbase) {
1096 dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
1097 pdcs->rx_ring_alloc.dmabase);
1098 pdcs->rx_ring_alloc.vbase = NULL;
1099 }
1100}
1101
1102/**
1103 * pdc_desc_count() - Count the number of DMA descriptors that will be required
1104 * for a given scatterlist. Account for the max length of a DMA buffer.
1105 * @sg: Scatterlist to be DMA'd
1106 * Return: Number of descriptors required
1107 */
1108static u32 pdc_desc_count(struct scatterlist *sg)
1109{
1110 u32 cnt = 0;
1111
1112 while (sg) {
1113 cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
1114 sg = sg_next(sg);
1115 }
1116 return cnt;
1117}
1118
1119/**
1120 * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
1121 * and the rx ring has room for rx_cnt descriptors.
1122 * @pdcs: PDC state
1123 * @tx_cnt: The number of descriptors required in the tx ring
1124 * @rx_cnt: The number of descriptors required i the rx ring
1125 *
1126 * Return: true if one of the rings does not have enough space
1127 * false if sufficient space is available in both rings
1128 */
1129static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
1130{
1131 u32 rx_avail;
1132 u32 tx_avail;
1133 bool full = false;
1134
1135 /* Check if the tx and rx rings are likely to have enough space */
1136 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
1137 pdcs->nrxpost);
1138 if (unlikely(rx_cnt > rx_avail)) {
1139 pdcs->rx_ring_full++;
1140 full = true;
1141 }
1142
1143 if (likely(!full)) {
1144 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
1145 pdcs->ntxpost);
1146 if (unlikely(tx_cnt > tx_avail)) {
1147 pdcs->tx_ring_full++;
1148 full = true;
1149 }
1150 }
1151 return full;
1152}
1153
1154/**
1155 * pdc_last_tx_done() - If both the tx and rx rings have at least
1156 * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
1157 * framework can submit another message.
1158 * @chan: mailbox channel to check
1159 * Return: true if PDC can accept another message on this channel
1160 */
1161static bool pdc_last_tx_done(struct mbox_chan *chan)
1162{
1163 struct pdc_state *pdcs = chan->con_priv;
1164 bool ret;
1165
1166 if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
1167 PDC_RING_SPACE_MIN))) {
1168 pdcs->last_tx_not_done++;
1169 ret = false;
1170 } else {
1171 ret = true;
1172 }
1173 return ret;
1174}
1175
1176/**
1177 * pdc_send_data() - mailbox send_data function
1178 * @chan: The mailbox channel on which the data is sent. The channel
1179 * corresponds to a DMA ringset.
1180 * @data: The mailbox message to be sent. The message must be a
1181 * brcm_message structure.
1182 *
1183 * This function is registered as the send_data function for the mailbox
1184 * controller. From the destination scatterlist in the mailbox message, it
1185 * creates a sequence of receive descriptors in the rx ring. From the source
1186 * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
1187 * After creating the descriptors, it writes the rx ptr and tx ptr registers to
1188 * initiate the DMA transfer.
1189 *
1190 * This function does the DMA map and unmap of the src and dst scatterlists in
1191 * the mailbox message.
1192 *
1193 * Return: 0 if successful
1194 * -ENOTSUPP if the mailbox message is a type this driver does not
1195 * support
1196 * < 0 if an error
1197 */
1198static int pdc_send_data(struct mbox_chan *chan, void *data)
1199{
1200 struct pdc_state *pdcs = chan->con_priv;
1201 struct device *dev = &pdcs->pdev->dev;
1202 struct brcm_message *mssg = data;
1203 int err = PDC_SUCCESS;
1204 int src_nent;
1205 int dst_nent;
1206 int nent;
1207 u32 tx_desc_req;
1208 u32 rx_desc_req;
1209
1210 if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
1211 return -ENOTSUPP;
1212
1213 src_nent = sg_nents(mssg->spu.src);
1214 if (likely(src_nent)) {
1215 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
1216 if (unlikely(nent == 0))
1217 return -EIO;
1218 }
1219
1220 dst_nent = sg_nents(mssg->spu.dst);
1221 if (likely(dst_nent)) {
1222 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
1223 DMA_FROM_DEVICE);
1224 if (unlikely(nent == 0)) {
1225 dma_unmap_sg(dev, mssg->spu.src, src_nent,
1226 DMA_TO_DEVICE);
1227 return -EIO;
1228 }
1229 }
1230
1231 /*
1232 * Check if the tx and rx rings have enough space. Do this prior to
1233 * writing any tx or rx descriptors. Need to ensure that we do not write
1234 * a partial set of descriptors, or write just rx descriptors but
1235 * corresponding tx descriptors don't fit. Note that we want this check
1236 * and the entire sequence of descriptor to happen without another
1237 * thread getting in. The channel spin lock in the mailbox framework
1238 * ensures this.
1239 */
1240 tx_desc_req = pdc_desc_count(mssg->spu.src);
1241 rx_desc_req = pdc_desc_count(mssg->spu.dst);
1242 if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
1243 return -ENOSPC;
1244
1245 /* Create rx descriptors to SPU catch response */
1246 err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
1247 err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
1248
1249 /* Create tx descriptors to submit SPU request */
1250 err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
1251 err |= pdc_tx_list_final(pdcs); /* initiate transfer */
1252
1253 if (unlikely(err))
1254 dev_err(&pdcs->pdev->dev,
1255 "%s failed with error %d", __func__, err);
1256
1257 return err;
1258}
1259
1260static int pdc_startup(struct mbox_chan *chan)
1261{
1262 return pdc_ring_init(chan->con_priv, PDC_RINGSET);
1263}
1264
1265static void pdc_shutdown(struct mbox_chan *chan)
1266{
1267 struct pdc_state *pdcs = chan->con_priv;
1268
1269 if (!pdcs)
1270 return;
1271
1272 dev_dbg(&pdcs->pdev->dev,
1273 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1274 pdc_ring_free(pdcs);
1275}
1276
1277/**
1278 * pdc_hw_init() - Use the given initialization parameters to initialize the
1279 * state for one of the PDCs.
1280 * @pdcs: state of the PDC
1281 */
1282static
1283void pdc_hw_init(struct pdc_state *pdcs)
1284{
1285 struct platform_device *pdev;
1286 struct device *dev;
1287 struct dma64 *dma_reg;
1288 int ringset = PDC_RINGSET;
1289
1290 pdev = pdcs->pdev;
1291 dev = &pdev->dev;
1292
1293 dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
1294 dev_dbg(dev, "state structure: %p",
1295 pdcs);
1296 dev_dbg(dev, " - base virtual addr of hw regs %p",
1297 pdcs->pdc_reg_vbase);
1298
1299 /* initialize data structures */
1300 pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
1301 pdcs->txregs_64 = (struct dma64_regs *)
1302 (((u8 *)pdcs->pdc_reg_vbase) +
1303 PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1304 pdcs->rxregs_64 = (struct dma64_regs *)
1305 (((u8 *)pdcs->pdc_reg_vbase) +
1306 PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1307
1308 pdcs->ntxd = PDC_RING_ENTRIES;
1309 pdcs->nrxd = PDC_RING_ENTRIES;
1310 pdcs->ntxpost = PDC_RING_ENTRIES - 1;
1311 pdcs->nrxpost = PDC_RING_ENTRIES - 1;
1312 iowrite32(0, &pdcs->regs->intmask);
1313
1314 dma_reg = &pdcs->regs->dmaregs[ringset];
1315
1316 /* Configure DMA but will enable later in pdc_ring_init() */
1317 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1318
1319 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1320 &dma_reg->dmarcv.control);
1321
1322 /* Reset current index pointers after making sure DMA is disabled */
1323 iowrite32(0, &dma_reg->dmaxmt.ptr);
1324 iowrite32(0, &dma_reg->dmarcv.ptr);
1325
1326 if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
1327 iowrite32(PDC_CKSUM_CTRL,
1328 pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
1329}
1330
1331/**
1332 * pdc_hw_disable() - Disable the tx and rx control in the hw.
1333 * @pdcs: PDC state structure
1334 *
1335 */
1336static void pdc_hw_disable(struct pdc_state *pdcs)
1337{
1338 struct dma64 *dma_reg;
1339
1340 dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
1341 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1342 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1343 &dma_reg->dmarcv.control);
1344}
1345
1346/**
1347 * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
1348 * header returned with each response message.
1349 * @pdcs: PDC state structure
1350 *
1351 * The metadata is not returned to the mailbox client. So the PDC driver
1352 * manages these buffers.
1353 *
1354 * Return: PDC_SUCCESS
1355 * -ENOMEM if pool creation fails
1356 */
1357static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
1358{
1359 struct platform_device *pdev;
1360 struct device *dev;
1361
1362 pdev = pdcs->pdev;
1363 dev = &pdev->dev;
1364
1365 pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
1366 if (pdcs->use_bcm_hdr)
1367 pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
1368
1369 pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
1370 pdcs->pdc_resp_hdr_len,
1371 RX_BUF_ALIGN, 0);
1372 if (!pdcs->rx_buf_pool)
1373 return -ENOMEM;
1374
1375 return PDC_SUCCESS;
1376}
1377
1378/**
1379 * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
1380 * specify a threaded IRQ handler for deferred handling of interrupts outside of
1381 * interrupt context.
1382 * @pdcs: PDC state
1383 *
1384 * Set the interrupt mask for transmit and receive done.
1385 * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
1386 *
1387 * Return: PDC_SUCCESS
1388 * <0 if threaded irq request fails
1389 */
1390static int pdc_interrupts_init(struct pdc_state *pdcs)
1391{
1392 struct platform_device *pdev = pdcs->pdev;
1393 struct device *dev = &pdev->dev;
1394 struct device_node *dn = pdev->dev.of_node;
1395 int err;
1396
1397 /* interrupt configuration */
1398 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
1399 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET);
1400
1401 /* read irq from device tree */
1402 pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
1403 dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
1404 dev_name(dev), pdcs->pdc_irq, pdcs);
1405
1406 err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
1407 dev_name(dev), dev);
1408 if (err) {
1409 dev_err(dev, "IRQ %u request failed with err %d\n",
1410 pdcs->pdc_irq, err);
1411 return err;
1412 }
1413 return PDC_SUCCESS;
1414}
1415
1416static const struct mbox_chan_ops pdc_mbox_chan_ops = {
1417 .send_data = pdc_send_data,
1418 .last_tx_done = pdc_last_tx_done,
1419 .startup = pdc_startup,
1420 .shutdown = pdc_shutdown
1421};
1422
1423/**
1424 * pdc_mb_init() - Initialize the mailbox controller.
1425 * @pdcs: PDC state
1426 *
1427 * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
1428 * driver only uses one ringset and thus one mb channel. PDC uses the transmit
1429 * complete interrupt to determine when a mailbox message has successfully been
1430 * transmitted.
1431 *
1432 * Return: 0 on success
1433 * < 0 if there is an allocation or registration failure
1434 */
1435static int pdc_mb_init(struct pdc_state *pdcs)
1436{
1437 struct device *dev = &pdcs->pdev->dev;
1438 struct mbox_controller *mbc;
1439 int chan_index;
1440 int err;
1441
1442 mbc = &pdcs->mbc;
1443 mbc->dev = dev;
1444 mbc->ops = &pdc_mbox_chan_ops;
1445 mbc->num_chans = 1;
1446 mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
1447 GFP_KERNEL);
1448 if (!mbc->chans)
1449 return -ENOMEM;
1450
1451 mbc->txdone_irq = false;
1452 mbc->txdone_poll = true;
1453 mbc->txpoll_period = 1;
1454 for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
1455 mbc->chans[chan_index].con_priv = pdcs;
1456
1457 /* Register mailbox controller */
1458 err = mbox_controller_register(mbc);
1459 if (err) {
1460 dev_crit(dev,
1461 "Failed to register PDC mailbox controller. Error %d.",
1462 err);
1463 return err;
1464 }
1465 return 0;
1466}
1467
1468/**
1469 * pdc_dt_read() - Read application-specific data from device tree.
1470 * @pdev: Platform device
1471 * @pdcs: PDC state
1472 *
1473 * Reads the number of bytes of receive status that precede each received frame.
1474 * Reads whether transmit and received frames should be preceded by an 8-byte
1475 * BCM header.
1476 *
1477 * Return: 0 if successful
1478 * -ENODEV if device not available
1479 */
1480static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
1481{
1482 struct device *dev = &pdev->dev;
1483 struct device_node *dn = pdev->dev.of_node;
1484 int err;
1485
1486 err = of_property_read_u32(dn, "brcm,rx-status-len",
1487 &pdcs->rx_status_len);
1488 if (err < 0)
1489 dev_err(dev,
1490 "%s failed to get DMA receive status length from device tree",
1491 __func__);
1492
1493 pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
1494
1495 return 0;
1496}
1497
1498/**
1499 * pdc_probe() - Probe function for PDC driver.
1500 * @pdev: PDC platform device
1501 *
1502 * Reserve and map register regions defined in device tree.
1503 * Allocate and initialize tx and rx DMA rings.
1504 * Initialize a mailbox controller for each PDC.
1505 *
1506 * Return: 0 if successful
1507 * < 0 if an error
1508 */
1509static int pdc_probe(struct platform_device *pdev)
1510{
1511 int err = 0;
1512 struct device *dev = &pdev->dev;
1513 struct resource *pdc_regs;
1514 struct pdc_state *pdcs;
1515
1516 /* PDC state for one SPU */
1517 pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
1518 if (!pdcs) {
1519 err = -ENOMEM;
1520 goto cleanup;
1521 }
1522
1523 pdcs->pdev = pdev;
1524 platform_set_drvdata(pdev, pdcs);
1525 pdcs->pdc_idx = pdcg.num_spu;
1526 pdcg.num_spu++;
1527
1528 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1529 if (err) {
1530 dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
1531 goto cleanup;
1532 }
1533
1534 /* Create DMA pool for tx ring */
1535 pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
1536 RING_ALIGN, 0);
1537 if (!pdcs->ring_pool) {
1538 err = -ENOMEM;
1539 goto cleanup;
1540 }
1541
1542 err = pdc_dt_read(pdev, pdcs);
1543 if (err)
1544 goto cleanup_ring_pool;
1545
1546 pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1547 if (!pdc_regs) {
1548 err = -ENODEV;
1549 goto cleanup_ring_pool;
1550 }
1551 dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
1552 &pdc_regs->start, &pdc_regs->end);
1553
1554 pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
1555 if (IS_ERR(pdcs->pdc_reg_vbase)) {
1556 err = PTR_ERR(pdcs->pdc_reg_vbase);
1557 dev_err(&pdev->dev, "Failed to map registers: %d\n", err);
1558 goto cleanup_ring_pool;
1559 }
1560
1561 /* create rx buffer pool after dt read to know how big buffers are */
1562 err = pdc_rx_buf_pool_create(pdcs);
1563 if (err)
1564 goto cleanup_ring_pool;
1565
1566 pdc_hw_init(pdcs);
1567
1568 /* Init tasklet for deferred DMA rx processing */
1569 tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long)pdcs);
1570
1571 err = pdc_interrupts_init(pdcs);
1572 if (err)
1573 goto cleanup_buf_pool;
1574
1575 /* Initialize mailbox controller */
1576 err = pdc_mb_init(pdcs);
1577 if (err)
1578 goto cleanup_buf_pool;
1579
1580 pdcs->debugfs_stats = NULL;
1581 pdc_setup_debugfs(pdcs);
1582
1583 dev_dbg(dev, "pdc_probe() successful");
1584 return PDC_SUCCESS;
1585
1586cleanup_buf_pool:
1587 tasklet_kill(&pdcs->rx_tasklet);
1588 dma_pool_destroy(pdcs->rx_buf_pool);
1589
1590cleanup_ring_pool:
1591 dma_pool_destroy(pdcs->ring_pool);
1592
1593cleanup:
1594 return err;
1595}
1596
1597static int pdc_remove(struct platform_device *pdev)
1598{
1599 struct pdc_state *pdcs = platform_get_drvdata(pdev);
1600
1601 pdc_free_debugfs();
1602
1603 tasklet_kill(&pdcs->rx_tasklet);
1604
1605 pdc_hw_disable(pdcs);
1606
1607 mbox_controller_unregister(&pdcs->mbc);
1608
1609 dma_pool_destroy(pdcs->rx_buf_pool);
1610 dma_pool_destroy(pdcs->ring_pool);
1611 return 0;
1612}
1613
1614static const struct of_device_id pdc_mbox_of_match[] = {
1615 {.compatible = "brcm,iproc-pdc-mbox"},
1616 { /* sentinel */ }
1617};
1618MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
1619
1620static struct platform_driver pdc_mbox_driver = {
1621 .probe = pdc_probe,
1622 .remove = pdc_remove,
1623 .driver = {
1624 .name = "brcm-iproc-pdc-mbox",
1625 .of_match_table = of_match_ptr(pdc_mbox_of_match),
1626 },
1627};
1628module_platform_driver(pdc_mbox_driver);
1629
1630MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
1631MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
1632MODULE_LICENSE("GPL v2");