Loading...
1/*
2 * mtu3_qmu.c - Queue Management Unit driver for device controller
3 *
4 * Copyright (C) 2016 MediaTek Inc.
5 *
6 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19/*
20 * Queue Management Unit (QMU) is designed to unload SW effort
21 * to serve DMA interrupts.
22 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
23 * SW links data buffers and triggers QMU to send / receive data to
24 * host / from device at a time.
25 * And now only GPD is supported.
26 *
27 * For more detailed information, please refer to QMU Programming Guide
28 */
29
30#include <linux/dmapool.h>
31#include <linux/iopoll.h>
32
33#include "mtu3.h"
34
35#define QMU_CHECKSUM_LEN 16
36
37#define GPD_FLAGS_HWO BIT(0)
38#define GPD_FLAGS_BDP BIT(1)
39#define GPD_FLAGS_BPS BIT(2)
40#define GPD_FLAGS_IOC BIT(7)
41
42#define GPD_EXT_FLAG_ZLP BIT(5)
43
44
45static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
46 dma_addr_t dma_addr)
47{
48 dma_addr_t dma_base = ring->dma;
49 struct qmu_gpd *gpd_head = ring->start;
50 u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
51
52 if (offset >= MAX_GPD_NUM)
53 return NULL;
54
55 return gpd_head + offset;
56}
57
58static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
59 struct qmu_gpd *gpd)
60{
61 dma_addr_t dma_base = ring->dma;
62 struct qmu_gpd *gpd_head = ring->start;
63 u32 offset;
64
65 offset = gpd - gpd_head;
66 if (offset >= MAX_GPD_NUM)
67 return 0;
68
69 return dma_base + (offset * sizeof(*gpd));
70}
71
72static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
73{
74 ring->start = gpd;
75 ring->enqueue = gpd;
76 ring->dequeue = gpd;
77 ring->end = gpd + MAX_GPD_NUM - 1;
78}
79
80static void reset_gpd_list(struct mtu3_ep *mep)
81{
82 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
83 struct qmu_gpd *gpd = ring->start;
84
85 if (gpd) {
86 gpd->flag &= ~GPD_FLAGS_HWO;
87 gpd_ring_init(ring, gpd);
88 }
89}
90
91int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
92{
93 struct qmu_gpd *gpd;
94 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
95
96 /* software own all gpds as default */
97 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
98 if (gpd == NULL)
99 return -ENOMEM;
100
101 gpd_ring_init(ring, gpd);
102
103 return 0;
104}
105
106void mtu3_gpd_ring_free(struct mtu3_ep *mep)
107{
108 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
109
110 dma_pool_free(mep->mtu->qmu_gpd_pool,
111 ring->start, ring->dma);
112 memset(ring, 0, sizeof(*ring));
113}
114
115/*
116 * calculate check sum of a gpd or bd
117 * add "noinline" and "mb" to prevent wrong calculation
118 */
119static noinline u8 qmu_calc_checksum(u8 *data)
120{
121 u8 chksum = 0;
122 int i;
123
124 data[1] = 0x0; /* set checksum to 0 */
125
126 mb(); /* ensure the gpd/bd is really up-to-date */
127 for (i = 0; i < QMU_CHECKSUM_LEN; i++)
128 chksum += data[i];
129
130 /* Default: HWO=1, @flag[bit0] */
131 chksum += 1;
132
133 return 0xFF - chksum;
134}
135
136void mtu3_qmu_resume(struct mtu3_ep *mep)
137{
138 struct mtu3 *mtu = mep->mtu;
139 void __iomem *mbase = mtu->mac_base;
140 int epnum = mep->epnum;
141 u32 offset;
142
143 offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
144
145 mtu3_writel(mbase, offset, QMU_Q_RESUME);
146 if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
147 mtu3_writel(mbase, offset, QMU_Q_RESUME);
148}
149
150static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
151{
152 if (ring->enqueue < ring->end)
153 ring->enqueue++;
154 else
155 ring->enqueue = ring->start;
156
157 return ring->enqueue;
158}
159
160static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
161{
162 if (ring->dequeue < ring->end)
163 ring->dequeue++;
164 else
165 ring->dequeue = ring->start;
166
167 return ring->dequeue;
168}
169
170/* check if a ring is emtpy */
171static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
172{
173 struct qmu_gpd *enq = ring->enqueue;
174 struct qmu_gpd *next;
175
176 if (ring->enqueue < ring->end)
177 next = enq + 1;
178 else
179 next = ring->start;
180
181 /* one gpd is reserved to simplify gpd preparation */
182 return next == ring->dequeue;
183}
184
185int mtu3_prepare_transfer(struct mtu3_ep *mep)
186{
187 return gpd_ring_empty(&mep->gpd_ring);
188}
189
190static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
191{
192 struct qmu_gpd *enq;
193 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
194 struct qmu_gpd *gpd = ring->enqueue;
195 struct usb_request *req = &mreq->request;
196
197 /* set all fields to zero as default value */
198 memset(gpd, 0, sizeof(*gpd));
199
200 gpd->buffer = cpu_to_le32((u32)req->dma);
201 gpd->buf_len = cpu_to_le16(req->length);
202 gpd->flag |= GPD_FLAGS_IOC;
203
204 /* get the next GPD */
205 enq = advance_enq_gpd(ring);
206 dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n",
207 mep->epnum, gpd, enq);
208
209 enq->flag &= ~GPD_FLAGS_HWO;
210 gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
211
212 if (req->zero)
213 gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
214
215 gpd->chksum = qmu_calc_checksum((u8 *)gpd);
216 gpd->flag |= GPD_FLAGS_HWO;
217
218 mreq->gpd = gpd;
219
220 return 0;
221}
222
223static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
224{
225 struct qmu_gpd *enq;
226 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
227 struct qmu_gpd *gpd = ring->enqueue;
228 struct usb_request *req = &mreq->request;
229
230 /* set all fields to zero as default value */
231 memset(gpd, 0, sizeof(*gpd));
232
233 gpd->buffer = cpu_to_le32((u32)req->dma);
234 gpd->data_buf_len = cpu_to_le16(req->length);
235 gpd->flag |= GPD_FLAGS_IOC;
236
237 /* get the next GPD */
238 enq = advance_enq_gpd(ring);
239 dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n",
240 mep->epnum, gpd, enq);
241
242 enq->flag &= ~GPD_FLAGS_HWO;
243 gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
244 gpd->chksum = qmu_calc_checksum((u8 *)gpd);
245 gpd->flag |= GPD_FLAGS_HWO;
246
247 mreq->gpd = gpd;
248
249 return 0;
250}
251
252void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
253{
254
255 if (mep->is_in)
256 mtu3_prepare_tx_gpd(mep, mreq);
257 else
258 mtu3_prepare_rx_gpd(mep, mreq);
259}
260
261int mtu3_qmu_start(struct mtu3_ep *mep)
262{
263 struct mtu3 *mtu = mep->mtu;
264 void __iomem *mbase = mtu->mac_base;
265 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
266 u8 epnum = mep->epnum;
267
268 if (mep->is_in) {
269 /* set QMU start address */
270 mtu3_writel(mbase, USB_QMU_TQSAR(mep->epnum), ring->dma);
271 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
272 mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
273 /* send zero length packet according to ZLP flag in GPD */
274 mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
275 mtu3_writel(mbase, U3D_TQERRIESR0,
276 QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
277
278 if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
279 dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
280 return 0;
281 }
282 mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
283
284 } else {
285 mtu3_writel(mbase, USB_QMU_RQSAR(mep->epnum), ring->dma);
286 mtu3_setbits(mbase, MU3D_EP_RXCR0(mep->epnum), RX_DMAREQEN);
287 mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
288 /* don't expect ZLP */
289 mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
290 /* move to next GPD when receive ZLP */
291 mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
292 mtu3_writel(mbase, U3D_RQERRIESR0,
293 QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
294 mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
295
296 if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
297 dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
298 return 0;
299 }
300 mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
301 }
302
303 return 0;
304}
305
306/* may called in atomic context */
307void mtu3_qmu_stop(struct mtu3_ep *mep)
308{
309 struct mtu3 *mtu = mep->mtu;
310 void __iomem *mbase = mtu->mac_base;
311 int epnum = mep->epnum;
312 u32 value = 0;
313 u32 qcsr;
314 int ret;
315
316 qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
317
318 if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
319 dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
320 return;
321 }
322 mtu3_writel(mbase, qcsr, QMU_Q_STOP);
323
324 ret = readl_poll_timeout_atomic(mbase + qcsr, value,
325 !(value & QMU_Q_ACTIVE), 1, 1000);
326 if (ret) {
327 dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
328 return;
329 }
330
331 dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
332}
333
334void mtu3_qmu_flush(struct mtu3_ep *mep)
335{
336
337 dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
338 ((mep->is_in) ? "TX" : "RX"));
339
340 /*Stop QMU */
341 mtu3_qmu_stop(mep);
342 reset_gpd_list(mep);
343}
344
345/*
346 * QMU can't transfer zero length packet directly (a hardware limit
347 * on old SoCs), so when needs to send ZLP, we intentionally trigger
348 * a length error interrupt, and in the ISR sends a ZLP by BMU.
349 */
350static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
351{
352 struct mtu3_ep *mep = mtu->in_eps + epnum;
353 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
354 void __iomem *mbase = mtu->mac_base;
355 struct qmu_gpd *gpd_current = NULL;
356 dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
357 struct usb_request *req = NULL;
358 struct mtu3_request *mreq;
359 u32 txcsr = 0;
360 int ret;
361
362 mreq = next_request(mep);
363 if (mreq && mreq->request.length == 0)
364 req = &mreq->request;
365 else
366 return;
367
368 gpd_current = gpd_dma_to_virt(ring, gpd_dma);
369
370 if (le16_to_cpu(gpd_current->buf_len) != 0) {
371 dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
372 return;
373 }
374
375 dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
376
377 mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
378
379 ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
380 txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
381 if (ret) {
382 dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
383 return;
384 }
385 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
386
387 /* by pass the current GDP */
388 gpd_current->flag |= GPD_FLAGS_BPS;
389 gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current);
390 gpd_current->flag |= GPD_FLAGS_HWO;
391
392 /*enable DMAREQEN, switch back to QMU mode */
393 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
394 mtu3_qmu_resume(mep);
395}
396
397/*
398 * NOTE: request list maybe is already empty as following case:
399 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
400 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
401 * tasklet process both of them)-->qmu_interrupt for second one.
402 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
403 */
404static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
405{
406 struct mtu3_ep *mep = mtu->in_eps + epnum;
407 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
408 void __iomem *mbase = mtu->mac_base;
409 struct qmu_gpd *gpd = ring->dequeue;
410 struct qmu_gpd *gpd_current = NULL;
411 dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
412 struct usb_request *request = NULL;
413 struct mtu3_request *mreq;
414
415 /*transfer phy address got from QMU register to virtual address */
416 gpd_current = gpd_dma_to_virt(ring, gpd_dma);
417
418 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
419 __func__, epnum, gpd, gpd_current, ring->enqueue);
420
421 while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
422
423 mreq = next_request(mep);
424
425 if (mreq == NULL || mreq->gpd != gpd) {
426 dev_err(mtu->dev, "no correct TX req is found\n");
427 break;
428 }
429
430 request = &mreq->request;
431 request->actual = le16_to_cpu(gpd->buf_len);
432 mtu3_req_complete(mep, request, 0);
433
434 gpd = advance_deq_gpd(ring);
435 }
436
437 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
438 __func__, epnum, ring->dequeue, ring->enqueue);
439
440}
441
442static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
443{
444 struct mtu3_ep *mep = mtu->out_eps + epnum;
445 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
446 void __iomem *mbase = mtu->mac_base;
447 struct qmu_gpd *gpd = ring->dequeue;
448 struct qmu_gpd *gpd_current = NULL;
449 dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
450 struct usb_request *req = NULL;
451 struct mtu3_request *mreq;
452
453 gpd_current = gpd_dma_to_virt(ring, gpd_dma);
454
455 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
456 __func__, epnum, gpd, gpd_current, ring->enqueue);
457
458 while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
459
460 mreq = next_request(mep);
461
462 if (mreq == NULL || mreq->gpd != gpd) {
463 dev_err(mtu->dev, "no correct RX req is found\n");
464 break;
465 }
466 req = &mreq->request;
467
468 req->actual = le16_to_cpu(gpd->buf_len);
469 mtu3_req_complete(mep, req, 0);
470
471 gpd = advance_deq_gpd(ring);
472 }
473
474 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
475 __func__, epnum, ring->dequeue, ring->enqueue);
476}
477
478static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
479{
480 int i;
481
482 for (i = 1; i < mtu->num_eps; i++) {
483 if (done_status & QMU_RX_DONE_INT(i))
484 qmu_done_rx(mtu, i);
485 if (done_status & QMU_TX_DONE_INT(i))
486 qmu_done_tx(mtu, i);
487 }
488}
489
490static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
491{
492 void __iomem *mbase = mtu->mac_base;
493 u32 errval;
494 int i;
495
496 if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
497 errval = mtu3_readl(mbase, U3D_RQERRIR0);
498 for (i = 1; i < mtu->num_eps; i++) {
499 if (errval & QMU_RX_CS_ERR(i))
500 dev_err(mtu->dev, "Rx %d CS error!\n", i);
501
502 if (errval & QMU_RX_LEN_ERR(i))
503 dev_err(mtu->dev, "RX %d Length error\n", i);
504 }
505 mtu3_writel(mbase, U3D_RQERRIR0, errval);
506 }
507
508 if (qmu_status & RXQ_ZLPERR_INT) {
509 errval = mtu3_readl(mbase, U3D_RQERRIR1);
510 for (i = 1; i < mtu->num_eps; i++) {
511 if (errval & QMU_RX_ZLP_ERR(i))
512 dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
513 }
514 mtu3_writel(mbase, U3D_RQERRIR1, errval);
515 }
516
517 if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
518 errval = mtu3_readl(mbase, U3D_TQERRIR0);
519 for (i = 1; i < mtu->num_eps; i++) {
520 if (errval & QMU_TX_CS_ERR(i))
521 dev_err(mtu->dev, "Tx %d checksum error!\n", i);
522
523 if (errval & QMU_TX_LEN_ERR(i))
524 qmu_tx_zlp_error_handler(mtu, i);
525 }
526 mtu3_writel(mbase, U3D_TQERRIR0, errval);
527 }
528}
529
530irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
531{
532 void __iomem *mbase = mtu->mac_base;
533 u32 qmu_status;
534 u32 qmu_done_status;
535
536 /* U3D_QISAR1 is read update */
537 qmu_status = mtu3_readl(mbase, U3D_QISAR1);
538 qmu_status &= mtu3_readl(mbase, U3D_QIER1);
539
540 qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
541 qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
542 mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
543 dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
544 (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
545 qmu_status);
546
547 if (qmu_done_status)
548 qmu_done_isr(mtu, qmu_done_status);
549
550 if (qmu_status)
551 qmu_exception_isr(mtu, qmu_status);
552
553 return IRQ_HANDLED;
554}
555
556int mtu3_qmu_init(struct mtu3 *mtu)
557{
558
559 compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
560
561 mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
562 QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
563
564 if (!mtu->qmu_gpd_pool)
565 return -ENOMEM;
566
567 return 0;
568}
569
570void mtu3_qmu_exit(struct mtu3 *mtu)
571{
572 dma_pool_destroy(mtu->qmu_gpd_pool);
573}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * mtu3_qmu.c - Queue Management Unit driver for device controller
4 *
5 * Copyright (C) 2016 MediaTek Inc.
6 *
7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
8 */
9
10/*
11 * Queue Management Unit (QMU) is designed to unload SW effort
12 * to serve DMA interrupts.
13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
14 * SW links data buffers and triggers QMU to send / receive data to
15 * host / from device at a time.
16 * And now only GPD is supported.
17 *
18 * For more detailed information, please refer to QMU Programming Guide
19 */
20
21#include <linux/dmapool.h>
22#include <linux/iopoll.h>
23
24#include "mtu3.h"
25#include "mtu3_trace.h"
26
27#define QMU_CHECKSUM_LEN 16
28
29#define GPD_FLAGS_HWO BIT(0)
30#define GPD_FLAGS_BDP BIT(1)
31#define GPD_FLAGS_BPS BIT(2)
32#define GPD_FLAGS_ZLP BIT(6)
33#define GPD_FLAGS_IOC BIT(7)
34#define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
35
36#define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16)
37#define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12)
38#define GPD_RX_BUF_LEN(mtu, x) \
39({ \
40 typeof(x) x_ = (x); \
41 ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
42})
43
44#define GPD_DATA_LEN_OG(x) ((x) & 0xffff)
45#define GPD_DATA_LEN_EL(x) ((x) & 0xfffff)
46#define GPD_DATA_LEN(mtu, x) \
47({ \
48 typeof(x) x_ = (x); \
49 ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
50})
51
52#define GPD_EXT_FLAG_ZLP BIT(29)
53#define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20)
54#define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16)
55#define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28)
56#define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24)
57#define GPD_EXT_NGP(mtu, x) \
58({ \
59 typeof(x) x_ = (x); \
60 ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
61})
62
63#define GPD_EXT_BUF(mtu, x) \
64({ \
65 typeof(x) x_ = (x); \
66 ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
67})
68
69#define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
70#define HILO_DMA(hi, lo) \
71 ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
72
73static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
74{
75 u32 txcpr;
76 u32 txhiar;
77
78 txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
79 txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
80
81 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
82}
83
84static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
85{
86 u32 rxcpr;
87 u32 rxhiar;
88
89 rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
90 rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
91
92 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
93}
94
95static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
96{
97 u32 tqhiar;
98
99 mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
100 cpu_to_le32(lower_32_bits(dma)));
101 tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
102 tqhiar &= ~QMU_START_ADDR_HI_MSK;
103 tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
104 mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
105}
106
107static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
108{
109 u32 rqhiar;
110
111 mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
112 cpu_to_le32(lower_32_bits(dma)));
113 rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
114 rqhiar &= ~QMU_START_ADDR_HI_MSK;
115 rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
116 mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
117}
118
119static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
120 dma_addr_t dma_addr)
121{
122 dma_addr_t dma_base = ring->dma;
123 struct qmu_gpd *gpd_head = ring->start;
124 u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
125
126 if (offset >= MAX_GPD_NUM)
127 return NULL;
128
129 return gpd_head + offset;
130}
131
132static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
133 struct qmu_gpd *gpd)
134{
135 dma_addr_t dma_base = ring->dma;
136 struct qmu_gpd *gpd_head = ring->start;
137 u32 offset;
138
139 offset = gpd - gpd_head;
140 if (offset >= MAX_GPD_NUM)
141 return 0;
142
143 return dma_base + (offset * sizeof(*gpd));
144}
145
146static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
147{
148 ring->start = gpd;
149 ring->enqueue = gpd;
150 ring->dequeue = gpd;
151 ring->end = gpd + MAX_GPD_NUM - 1;
152}
153
154static void reset_gpd_list(struct mtu3_ep *mep)
155{
156 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
157 struct qmu_gpd *gpd = ring->start;
158
159 if (gpd) {
160 gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
161 gpd_ring_init(ring, gpd);
162 }
163}
164
165int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
166{
167 struct qmu_gpd *gpd;
168 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
169
170 /* software own all gpds as default */
171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
172 if (gpd == NULL)
173 return -ENOMEM;
174
175 gpd_ring_init(ring, gpd);
176
177 return 0;
178}
179
180void mtu3_gpd_ring_free(struct mtu3_ep *mep)
181{
182 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
183
184 dma_pool_free(mep->mtu->qmu_gpd_pool,
185 ring->start, ring->dma);
186 memset(ring, 0, sizeof(*ring));
187}
188
189void mtu3_qmu_resume(struct mtu3_ep *mep)
190{
191 struct mtu3 *mtu = mep->mtu;
192 void __iomem *mbase = mtu->mac_base;
193 int epnum = mep->epnum;
194 u32 offset;
195
196 offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
197
198 mtu3_writel(mbase, offset, QMU_Q_RESUME);
199 if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
200 mtu3_writel(mbase, offset, QMU_Q_RESUME);
201}
202
203static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
204{
205 if (ring->enqueue < ring->end)
206 ring->enqueue++;
207 else
208 ring->enqueue = ring->start;
209
210 return ring->enqueue;
211}
212
213static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
214{
215 if (ring->dequeue < ring->end)
216 ring->dequeue++;
217 else
218 ring->dequeue = ring->start;
219
220 return ring->dequeue;
221}
222
223/* check if a ring is emtpy */
224static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
225{
226 struct qmu_gpd *enq = ring->enqueue;
227 struct qmu_gpd *next;
228
229 if (ring->enqueue < ring->end)
230 next = enq + 1;
231 else
232 next = ring->start;
233
234 /* one gpd is reserved to simplify gpd preparation */
235 return next == ring->dequeue;
236}
237
238int mtu3_prepare_transfer(struct mtu3_ep *mep)
239{
240 return gpd_ring_empty(&mep->gpd_ring);
241}
242
243static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
244{
245 struct qmu_gpd *enq;
246 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
247 struct qmu_gpd *gpd = ring->enqueue;
248 struct usb_request *req = &mreq->request;
249 struct mtu3 *mtu = mep->mtu;
250 dma_addr_t enq_dma;
251 u32 ext_addr;
252
253 gpd->dw0_info = 0; /* SW own it */
254 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
255 ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
256 gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
257
258 /* get the next GPD */
259 enq = advance_enq_gpd(ring);
260 enq_dma = gpd_virt_to_dma(ring, enq);
261 dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
262 mep->epnum, gpd, enq, &enq_dma);
263
264 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
265 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
266 ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
267 gpd->dw0_info = cpu_to_le32(ext_addr);
268
269 if (req->zero) {
270 if (mtu->gen2cp)
271 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP);
272 else
273 gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
274 }
275
276 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
277
278 mreq->gpd = gpd;
279 trace_mtu3_prepare_gpd(mep, gpd);
280
281 return 0;
282}
283
284static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
285{
286 struct qmu_gpd *enq;
287 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
288 struct qmu_gpd *gpd = ring->enqueue;
289 struct usb_request *req = &mreq->request;
290 struct mtu3 *mtu = mep->mtu;
291 dma_addr_t enq_dma;
292 u32 ext_addr;
293
294 gpd->dw0_info = 0; /* SW own it */
295 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
296 ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
297 gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
298
299 /* get the next GPD */
300 enq = advance_enq_gpd(ring);
301 enq_dma = gpd_virt_to_dma(ring, enq);
302 dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
303 mep->epnum, gpd, enq, &enq_dma);
304
305 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
306 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
307 ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
308 gpd->dw3_info = cpu_to_le32(ext_addr);
309 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
310
311 mreq->gpd = gpd;
312 trace_mtu3_prepare_gpd(mep, gpd);
313
314 return 0;
315}
316
317void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
318{
319
320 if (mep->is_in)
321 mtu3_prepare_tx_gpd(mep, mreq);
322 else
323 mtu3_prepare_rx_gpd(mep, mreq);
324}
325
326int mtu3_qmu_start(struct mtu3_ep *mep)
327{
328 struct mtu3 *mtu = mep->mtu;
329 void __iomem *mbase = mtu->mac_base;
330 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
331 u8 epnum = mep->epnum;
332
333 if (mep->is_in) {
334 /* set QMU start address */
335 write_txq_start_addr(mbase, epnum, ring->dma);
336 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
337 /* send zero length packet according to ZLP flag in GPD */
338 mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
339 mtu3_writel(mbase, U3D_TQERRIESR0,
340 QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
341
342 if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
343 dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
344 return 0;
345 }
346 mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
347
348 } else {
349 write_rxq_start_addr(mbase, epnum, ring->dma);
350 mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
351 /* don't expect ZLP */
352 mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
353 /* move to next GPD when receive ZLP */
354 mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
355 mtu3_writel(mbase, U3D_RQERRIESR0,
356 QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
357 mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
358
359 if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
360 dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
361 return 0;
362 }
363 mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
364 }
365
366 return 0;
367}
368
369/* may called in atomic context */
370void mtu3_qmu_stop(struct mtu3_ep *mep)
371{
372 struct mtu3 *mtu = mep->mtu;
373 void __iomem *mbase = mtu->mac_base;
374 int epnum = mep->epnum;
375 u32 value = 0;
376 u32 qcsr;
377 int ret;
378
379 qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
380
381 if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
382 dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
383 return;
384 }
385 mtu3_writel(mbase, qcsr, QMU_Q_STOP);
386
387 ret = readl_poll_timeout_atomic(mbase + qcsr, value,
388 !(value & QMU_Q_ACTIVE), 1, 1000);
389 if (ret) {
390 dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
391 return;
392 }
393
394 dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
395}
396
397void mtu3_qmu_flush(struct mtu3_ep *mep)
398{
399
400 dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
401 ((mep->is_in) ? "TX" : "RX"));
402
403 /*Stop QMU */
404 mtu3_qmu_stop(mep);
405 reset_gpd_list(mep);
406}
407
408/*
409 * QMU can't transfer zero length packet directly (a hardware limit
410 * on old SoCs), so when needs to send ZLP, we intentionally trigger
411 * a length error interrupt, and in the ISR sends a ZLP by BMU.
412 */
413static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
414{
415 struct mtu3_ep *mep = mtu->in_eps + epnum;
416 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
417 void __iomem *mbase = mtu->mac_base;
418 struct qmu_gpd *gpd_current = NULL;
419 struct mtu3_request *mreq;
420 dma_addr_t cur_gpd_dma;
421 u32 txcsr = 0;
422 int ret;
423
424 mreq = next_request(mep);
425 if (mreq && mreq->request.length != 0)
426 return;
427
428 cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
429 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
430
431 if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) {
432 dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
433 return;
434 }
435
436 dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
437 trace_mtu3_zlp_exp_gpd(mep, gpd_current);
438
439 mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
440
441 ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
442 txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
443 if (ret) {
444 dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
445 return;
446 }
447 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
448
449 /* by pass the current GDP */
450 gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
451
452 /*enable DMAREQEN, switch back to QMU mode */
453 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
454 mtu3_qmu_resume(mep);
455}
456
457/*
458 * NOTE: request list maybe is already empty as following case:
459 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
460 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
461 * tasklet process both of them)-->qmu_interrupt for second one.
462 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
463 */
464static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
465{
466 struct mtu3_ep *mep = mtu->in_eps + epnum;
467 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
468 void __iomem *mbase = mtu->mac_base;
469 struct qmu_gpd *gpd = ring->dequeue;
470 struct qmu_gpd *gpd_current = NULL;
471 struct usb_request *request = NULL;
472 struct mtu3_request *mreq;
473 dma_addr_t cur_gpd_dma;
474
475 /*transfer phy address got from QMU register to virtual address */
476 cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
477 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
478
479 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
480 __func__, epnum, gpd, gpd_current, ring->enqueue);
481
482 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
483
484 mreq = next_request(mep);
485
486 if (mreq == NULL || mreq->gpd != gpd) {
487 dev_err(mtu->dev, "no correct TX req is found\n");
488 break;
489 }
490
491 request = &mreq->request;
492 request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
493 trace_mtu3_complete_gpd(mep, gpd);
494 mtu3_req_complete(mep, request, 0);
495
496 gpd = advance_deq_gpd(ring);
497 }
498
499 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
500 __func__, epnum, ring->dequeue, ring->enqueue);
501
502}
503
504static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
505{
506 struct mtu3_ep *mep = mtu->out_eps + epnum;
507 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
508 void __iomem *mbase = mtu->mac_base;
509 struct qmu_gpd *gpd = ring->dequeue;
510 struct qmu_gpd *gpd_current = NULL;
511 struct usb_request *req = NULL;
512 struct mtu3_request *mreq;
513 dma_addr_t cur_gpd_dma;
514
515 cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
516 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
517
518 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
519 __func__, epnum, gpd, gpd_current, ring->enqueue);
520
521 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
522
523 mreq = next_request(mep);
524
525 if (mreq == NULL || mreq->gpd != gpd) {
526 dev_err(mtu->dev, "no correct RX req is found\n");
527 break;
528 }
529 req = &mreq->request;
530
531 req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
532 trace_mtu3_complete_gpd(mep, gpd);
533 mtu3_req_complete(mep, req, 0);
534
535 gpd = advance_deq_gpd(ring);
536 }
537
538 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
539 __func__, epnum, ring->dequeue, ring->enqueue);
540}
541
542static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
543{
544 int i;
545
546 for (i = 1; i < mtu->num_eps; i++) {
547 if (done_status & QMU_RX_DONE_INT(i))
548 qmu_done_rx(mtu, i);
549 if (done_status & QMU_TX_DONE_INT(i))
550 qmu_done_tx(mtu, i);
551 }
552}
553
554static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
555{
556 void __iomem *mbase = mtu->mac_base;
557 u32 errval;
558 int i;
559
560 if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
561 errval = mtu3_readl(mbase, U3D_RQERRIR0);
562 for (i = 1; i < mtu->num_eps; i++) {
563 if (errval & QMU_RX_CS_ERR(i))
564 dev_err(mtu->dev, "Rx %d CS error!\n", i);
565
566 if (errval & QMU_RX_LEN_ERR(i))
567 dev_err(mtu->dev, "RX %d Length error\n", i);
568 }
569 mtu3_writel(mbase, U3D_RQERRIR0, errval);
570 }
571
572 if (qmu_status & RXQ_ZLPERR_INT) {
573 errval = mtu3_readl(mbase, U3D_RQERRIR1);
574 for (i = 1; i < mtu->num_eps; i++) {
575 if (errval & QMU_RX_ZLP_ERR(i))
576 dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
577 }
578 mtu3_writel(mbase, U3D_RQERRIR1, errval);
579 }
580
581 if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
582 errval = mtu3_readl(mbase, U3D_TQERRIR0);
583 for (i = 1; i < mtu->num_eps; i++) {
584 if (errval & QMU_TX_CS_ERR(i))
585 dev_err(mtu->dev, "Tx %d checksum error!\n", i);
586
587 if (errval & QMU_TX_LEN_ERR(i))
588 qmu_tx_zlp_error_handler(mtu, i);
589 }
590 mtu3_writel(mbase, U3D_TQERRIR0, errval);
591 }
592}
593
594irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
595{
596 void __iomem *mbase = mtu->mac_base;
597 u32 qmu_status;
598 u32 qmu_done_status;
599
600 /* U3D_QISAR1 is read update */
601 qmu_status = mtu3_readl(mbase, U3D_QISAR1);
602 qmu_status &= mtu3_readl(mbase, U3D_QIER1);
603
604 qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
605 qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
606 mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
607 dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
608 (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
609 qmu_status);
610 trace_mtu3_qmu_isr(qmu_done_status, qmu_status);
611
612 if (qmu_done_status)
613 qmu_done_isr(mtu, qmu_done_status);
614
615 if (qmu_status)
616 qmu_exception_isr(mtu, qmu_status);
617
618 return IRQ_HANDLED;
619}
620
621int mtu3_qmu_init(struct mtu3 *mtu)
622{
623
624 compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
625
626 mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
627 QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
628
629 if (!mtu->qmu_gpd_pool)
630 return -ENOMEM;
631
632 return 0;
633}
634
635void mtu3_qmu_exit(struct mtu3 *mtu)
636{
637 dma_pool_destroy(mtu->qmu_gpd_pool);
638}