Loading...
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5
6#include <linux/dma-mapping.h>
7#include "mt76.h"
8#include "dma.h"
9
10#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
11
12#define Q_READ(_q, _field) ({ \
13 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
14 u32 _val; \
15 if ((_q)->flags & MT_QFLAG_WED) \
16 _val = mtk_wed_device_reg_read((_q)->wed, \
17 ((_q)->wed_regs + \
18 _offset)); \
19 else \
20 _val = readl(&(_q)->regs->_field); \
21 _val; \
22})
23
24#define Q_WRITE(_q, _field, _val) do { \
25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
26 if ((_q)->flags & MT_QFLAG_WED) \
27 mtk_wed_device_reg_write((_q)->wed, \
28 ((_q)->wed_regs + _offset), \
29 _val); \
30 else \
31 writel(_val, &(_q)->regs->_field); \
32} while (0)
33
34#else
35
36#define Q_READ(_q, _field) readl(&(_q)->regs->_field)
37#define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
38
39#endif
40
41static struct mt76_txwi_cache *
42mt76_alloc_txwi(struct mt76_dev *dev)
43{
44 struct mt76_txwi_cache *t;
45 dma_addr_t addr;
46 u8 *txwi;
47 int size;
48
49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
50 txwi = kzalloc(size, GFP_ATOMIC);
51 if (!txwi)
52 return NULL;
53
54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
55 DMA_TO_DEVICE);
56 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
57 kfree(txwi);
58 return NULL;
59 }
60
61 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
62 t->dma_addr = addr;
63
64 return t;
65}
66
67static struct mt76_txwi_cache *
68mt76_alloc_rxwi(struct mt76_dev *dev)
69{
70 struct mt76_txwi_cache *t;
71
72 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
73 if (!t)
74 return NULL;
75
76 t->ptr = NULL;
77 return t;
78}
79
80static struct mt76_txwi_cache *
81__mt76_get_txwi(struct mt76_dev *dev)
82{
83 struct mt76_txwi_cache *t = NULL;
84
85 spin_lock(&dev->lock);
86 if (!list_empty(&dev->txwi_cache)) {
87 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
88 list);
89 list_del(&t->list);
90 }
91 spin_unlock(&dev->lock);
92
93 return t;
94}
95
96static struct mt76_txwi_cache *
97__mt76_get_rxwi(struct mt76_dev *dev)
98{
99 struct mt76_txwi_cache *t = NULL;
100
101 spin_lock_bh(&dev->wed_lock);
102 if (!list_empty(&dev->rxwi_cache)) {
103 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
104 list);
105 list_del(&t->list);
106 }
107 spin_unlock_bh(&dev->wed_lock);
108
109 return t;
110}
111
112static struct mt76_txwi_cache *
113mt76_get_txwi(struct mt76_dev *dev)
114{
115 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
116
117 if (t)
118 return t;
119
120 return mt76_alloc_txwi(dev);
121}
122
123struct mt76_txwi_cache *
124mt76_get_rxwi(struct mt76_dev *dev)
125{
126 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
127
128 if (t)
129 return t;
130
131 return mt76_alloc_rxwi(dev);
132}
133EXPORT_SYMBOL_GPL(mt76_get_rxwi);
134
135void
136mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
137{
138 if (!t)
139 return;
140
141 spin_lock(&dev->lock);
142 list_add(&t->list, &dev->txwi_cache);
143 spin_unlock(&dev->lock);
144}
145EXPORT_SYMBOL_GPL(mt76_put_txwi);
146
147void
148mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
149{
150 if (!t)
151 return;
152
153 spin_lock_bh(&dev->wed_lock);
154 list_add(&t->list, &dev->rxwi_cache);
155 spin_unlock_bh(&dev->wed_lock);
156}
157EXPORT_SYMBOL_GPL(mt76_put_rxwi);
158
159static void
160mt76_free_pending_txwi(struct mt76_dev *dev)
161{
162 struct mt76_txwi_cache *t;
163
164 local_bh_disable();
165 while ((t = __mt76_get_txwi(dev)) != NULL) {
166 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
167 DMA_TO_DEVICE);
168 kfree(mt76_get_txwi_ptr(dev, t));
169 }
170 local_bh_enable();
171}
172
173void
174mt76_free_pending_rxwi(struct mt76_dev *dev)
175{
176 struct mt76_txwi_cache *t;
177
178 local_bh_disable();
179 while ((t = __mt76_get_rxwi(dev)) != NULL) {
180 if (t->ptr)
181 mt76_put_page_pool_buf(t->ptr, false);
182 kfree(t);
183 }
184 local_bh_enable();
185}
186EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
187
188static void
189mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
190{
191 Q_WRITE(q, desc_base, q->desc_dma);
192 if (q->flags & MT_QFLAG_WED_RRO_EN)
193 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
194 else
195 Q_WRITE(q, ring_size, q->ndesc);
196 q->head = Q_READ(q, dma_idx);
197 q->tail = q->head;
198}
199
200static void
201__mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
202 bool reset_idx)
203{
204 if (!q || !q->ndesc)
205 return;
206
207 if (!mt76_queue_is_wed_rro_ind(q)) {
208 int i;
209
210 /* clear descriptors */
211 for (i = 0; i < q->ndesc; i++)
212 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
213 }
214
215 if (reset_idx) {
216 Q_WRITE(q, cpu_idx, 0);
217 Q_WRITE(q, dma_idx, 0);
218 }
219 mt76_dma_sync_idx(dev, q);
220}
221
222static void
223mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
224{
225 __mt76_dma_queue_reset(dev, q, true);
226}
227
228static int
229mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
230 struct mt76_queue_buf *buf, void *data)
231{
232 struct mt76_queue_entry *entry = &q->entry[q->head];
233 struct mt76_txwi_cache *txwi = NULL;
234 struct mt76_desc *desc;
235 int idx = q->head;
236 u32 buf1 = 0, ctrl;
237 int rx_token;
238
239 if (mt76_queue_is_wed_rro_ind(q)) {
240 struct mt76_wed_rro_desc *rro_desc;
241
242 rro_desc = (struct mt76_wed_rro_desc *)q->desc;
243 data = &rro_desc[q->head];
244 goto done;
245 }
246
247 desc = &q->desc[q->head];
248 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
249#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
250 buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
251#endif
252
253 if (mt76_queue_is_wed_rx(q)) {
254 txwi = mt76_get_rxwi(dev);
255 if (!txwi)
256 return -ENOMEM;
257
258 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
259 if (rx_token < 0) {
260 mt76_put_rxwi(dev, txwi);
261 return -ENOMEM;
262 }
263
264 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
265 ctrl |= MT_DMA_CTL_TO_HOST;
266 }
267
268 WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
269 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
270 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
271 WRITE_ONCE(desc->info, 0);
272
273done:
274 entry->dma_addr[0] = buf->addr;
275 entry->dma_len[0] = buf->len;
276 entry->txwi = txwi;
277 entry->buf = data;
278 entry->wcid = 0xffff;
279 entry->skip_buf1 = true;
280 q->head = (q->head + 1) % q->ndesc;
281 q->queued++;
282
283 return idx;
284}
285
286static int
287mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
288 struct mt76_queue_buf *buf, int nbufs, u32 info,
289 struct sk_buff *skb, void *txwi)
290{
291 struct mt76_queue_entry *entry;
292 struct mt76_desc *desc;
293 int i, idx = -1;
294 u32 ctrl, next;
295
296 if (txwi) {
297 q->entry[q->head].txwi = DMA_DUMMY_DATA;
298 q->entry[q->head].skip_buf0 = true;
299 }
300
301 for (i = 0; i < nbufs; i += 2, buf += 2) {
302 u32 buf0 = buf[0].addr, buf1 = 0;
303
304 idx = q->head;
305 next = (q->head + 1) % q->ndesc;
306
307 desc = &q->desc[idx];
308 entry = &q->entry[idx];
309
310 if (buf[0].skip_unmap)
311 entry->skip_buf0 = true;
312 entry->skip_buf1 = i == nbufs - 1;
313
314 entry->dma_addr[0] = buf[0].addr;
315 entry->dma_len[0] = buf[0].len;
316
317 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
318#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
319 info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);
320#endif
321 if (i < nbufs - 1) {
322 entry->dma_addr[1] = buf[1].addr;
323 entry->dma_len[1] = buf[1].len;
324 buf1 = buf[1].addr;
325 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
326#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
327 info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,
328 buf[1].addr >> 32);
329#endif
330 if (buf[1].skip_unmap)
331 entry->skip_buf1 = true;
332 }
333
334 if (i == nbufs - 1)
335 ctrl |= MT_DMA_CTL_LAST_SEC0;
336 else if (i == nbufs - 2)
337 ctrl |= MT_DMA_CTL_LAST_SEC1;
338
339 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
340 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
341 WRITE_ONCE(desc->info, cpu_to_le32(info));
342 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
343
344 q->head = next;
345 q->queued++;
346 }
347
348 q->entry[idx].txwi = txwi;
349 q->entry[idx].skb = skb;
350 q->entry[idx].wcid = 0xffff;
351
352 return idx;
353}
354
355static void
356mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
357 struct mt76_queue_entry *prev_e)
358{
359 struct mt76_queue_entry *e = &q->entry[idx];
360
361 if (!e->skip_buf0)
362 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
363 DMA_TO_DEVICE);
364
365 if (!e->skip_buf1)
366 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
367 DMA_TO_DEVICE);
368
369 if (e->txwi == DMA_DUMMY_DATA)
370 e->txwi = NULL;
371
372 *prev_e = *e;
373 memset(e, 0, sizeof(*e));
374}
375
376static void
377mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
378{
379 wmb();
380 Q_WRITE(q, cpu_idx, q->head);
381}
382
383static void
384mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
385{
386 struct mt76_queue_entry entry;
387 int last;
388
389 if (!q || !q->ndesc)
390 return;
391
392 spin_lock_bh(&q->cleanup_lock);
393 if (flush)
394 last = -1;
395 else
396 last = Q_READ(q, dma_idx);
397
398 while (q->queued > 0 && q->tail != last) {
399 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
400 mt76_queue_tx_complete(dev, q, &entry);
401
402 if (entry.txwi) {
403 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
404 mt76_put_txwi(dev, entry.txwi);
405 }
406
407 if (!flush && q->tail == last)
408 last = Q_READ(q, dma_idx);
409 }
410 spin_unlock_bh(&q->cleanup_lock);
411
412 if (flush) {
413 spin_lock_bh(&q->lock);
414 mt76_dma_sync_idx(dev, q);
415 mt76_dma_kick_queue(dev, q);
416 spin_unlock_bh(&q->lock);
417 }
418
419 if (!q->queued)
420 wake_up(&dev->tx_wait);
421}
422
423static void *
424mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
425 int *len, u32 *info, bool *more, bool *drop)
426{
427 struct mt76_queue_entry *e = &q->entry[idx];
428 struct mt76_desc *desc = &q->desc[idx];
429 u32 ctrl, desc_info, buf1;
430 void *buf = e->buf;
431
432 if (mt76_queue_is_wed_rro_ind(q))
433 goto done;
434
435 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
436 if (len) {
437 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
438 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
439 }
440
441 desc_info = le32_to_cpu(desc->info);
442 if (info)
443 *info = desc_info;
444
445 buf1 = le32_to_cpu(desc->buf1);
446 mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
447
448 if (mt76_queue_is_wed_rx(q)) {
449 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
450 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
451
452 if (!t)
453 return NULL;
454
455 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
456 SKB_WITH_OVERHEAD(q->buf_size),
457 page_pool_get_dma_dir(q->page_pool));
458
459 buf = t->ptr;
460 t->dma_addr = 0;
461 t->ptr = NULL;
462
463 mt76_put_rxwi(dev, t);
464 if (drop)
465 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
466 } else {
467 dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
468 SKB_WITH_OVERHEAD(q->buf_size),
469 page_pool_get_dma_dir(q->page_pool));
470 }
471
472done:
473 e->buf = NULL;
474 return buf;
475}
476
477static void *
478mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
479 int *len, u32 *info, bool *more, bool *drop)
480{
481 int idx = q->tail;
482
483 *more = false;
484 if (!q->queued)
485 return NULL;
486
487 if (mt76_queue_is_wed_rro_data(q))
488 return NULL;
489
490 if (!mt76_queue_is_wed_rro_ind(q)) {
491 if (flush)
492 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
493 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
494 return NULL;
495 }
496
497 q->tail = (q->tail + 1) % q->ndesc;
498 q->queued--;
499
500 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
501}
502
503static int
504mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
505 struct sk_buff *skb, u32 tx_info)
506{
507 struct mt76_queue_buf buf = {};
508 dma_addr_t addr;
509
510 if (test_bit(MT76_MCU_RESET, &dev->phy.state))
511 goto error;
512
513 if (q->queued + 1 >= q->ndesc - 1)
514 goto error;
515
516 addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
517 DMA_TO_DEVICE);
518 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
519 goto error;
520
521 buf.addr = addr;
522 buf.len = skb->len;
523
524 spin_lock_bh(&q->lock);
525 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
526 mt76_dma_kick_queue(dev, q);
527 spin_unlock_bh(&q->lock);
528
529 return 0;
530
531error:
532 dev_kfree_skb(skb);
533 return -ENOMEM;
534}
535
536static int
537mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
538 enum mt76_txq_id qid, struct sk_buff *skb,
539 struct mt76_wcid *wcid, struct ieee80211_sta *sta)
540{
541 struct ieee80211_tx_status status = {
542 .sta = sta,
543 };
544 struct mt76_tx_info tx_info = {
545 .skb = skb,
546 };
547 struct ieee80211_hw *hw;
548 int len, n = 0, ret = -ENOMEM;
549 struct mt76_txwi_cache *t;
550 struct sk_buff *iter;
551 dma_addr_t addr;
552 u8 *txwi;
553
554 if (test_bit(MT76_RESET, &dev->phy.state))
555 goto free_skb;
556
557 t = mt76_get_txwi(dev);
558 if (!t)
559 goto free_skb;
560
561 txwi = mt76_get_txwi_ptr(dev, t);
562
563 skb->prev = skb->next = NULL;
564 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
565 mt76_insert_hdr_pad(skb);
566
567 len = skb_headlen(skb);
568 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
569 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
570 goto free;
571
572 tx_info.buf[n].addr = t->dma_addr;
573 tx_info.buf[n++].len = dev->drv->txwi_size;
574 tx_info.buf[n].addr = addr;
575 tx_info.buf[n++].len = len;
576
577 skb_walk_frags(skb, iter) {
578 if (n == ARRAY_SIZE(tx_info.buf))
579 goto unmap;
580
581 addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
582 DMA_TO_DEVICE);
583 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
584 goto unmap;
585
586 tx_info.buf[n].addr = addr;
587 tx_info.buf[n++].len = iter->len;
588 }
589 tx_info.nbuf = n;
590
591 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
592 ret = -ENOMEM;
593 goto unmap;
594 }
595
596 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
597 DMA_TO_DEVICE);
598 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
599 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
600 DMA_TO_DEVICE);
601 if (ret < 0)
602 goto unmap;
603
604 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
605 tx_info.info, tx_info.skb, t);
606
607unmap:
608 for (n--; n > 0; n--)
609 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
610 tx_info.buf[n].len, DMA_TO_DEVICE);
611
612free:
613#ifdef CONFIG_NL80211_TESTMODE
614 /* fix tx_done accounting on queue overflow */
615 if (mt76_is_testmode_skb(dev, skb, &hw)) {
616 struct mt76_phy *phy = hw->priv;
617
618 if (tx_info.skb == phy->test.tx_skb)
619 phy->test.tx_done--;
620 }
621#endif
622
623 mt76_put_txwi(dev, t);
624
625free_skb:
626 status.skb = tx_info.skb;
627 hw = mt76_tx_status_get_hw(dev, tx_info.skb);
628 spin_lock_bh(&dev->rx_lock);
629 ieee80211_tx_status_ext(hw, &status);
630 spin_unlock_bh(&dev->rx_lock);
631
632 return ret;
633}
634
635static int
636mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
637 bool allow_direct)
638{
639 int len = SKB_WITH_OVERHEAD(q->buf_size);
640 int frames = 0;
641
642 if (!q->ndesc)
643 return 0;
644
645 spin_lock_bh(&q->lock);
646
647 while (q->queued < q->ndesc - 1) {
648 struct mt76_queue_buf qbuf = {};
649 enum dma_data_direction dir;
650 dma_addr_t addr;
651 int offset;
652 void *buf = NULL;
653
654 if (mt76_queue_is_wed_rro_ind(q))
655 goto done;
656
657 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
658 if (!buf)
659 break;
660
661 addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
662 dir = page_pool_get_dma_dir(q->page_pool);
663 dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
664
665 qbuf.addr = addr + q->buf_offset;
666done:
667 qbuf.len = len - q->buf_offset;
668 qbuf.skip_unmap = false;
669 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
670 mt76_put_page_pool_buf(buf, allow_direct);
671 break;
672 }
673 frames++;
674 }
675
676 if (frames || mt76_queue_is_wed_rx(q))
677 mt76_dma_kick_queue(dev, q);
678
679 spin_unlock_bh(&q->lock);
680
681 return frames;
682}
683
684int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
685{
686#ifdef CONFIG_NET_MEDIATEK_SOC_WED
687 int ret = 0, type, ring;
688 u16 flags;
689
690 if (!q || !q->ndesc)
691 return -EINVAL;
692
693 flags = q->flags;
694 if (!q->wed || !mtk_wed_device_active(q->wed))
695 q->flags &= ~MT_QFLAG_WED;
696
697 if (!(q->flags & MT_QFLAG_WED))
698 return 0;
699
700 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
701 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
702
703 switch (type) {
704 case MT76_WED_Q_TX:
705 ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
706 reset);
707 if (!ret)
708 q->wed_regs = q->wed->tx_ring[ring].reg_base;
709 break;
710 case MT76_WED_Q_TXFREE:
711 /* WED txfree queue needs ring to be initialized before setup */
712 q->flags = 0;
713 mt76_dma_queue_reset(dev, q);
714 mt76_dma_rx_fill(dev, q, false);
715
716 ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
717 if (!ret)
718 q->wed_regs = q->wed->txfree_ring.reg_base;
719 break;
720 case MT76_WED_Q_RX:
721 ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
722 reset);
723 if (!ret)
724 q->wed_regs = q->wed->rx_ring[ring].reg_base;
725 break;
726 case MT76_WED_RRO_Q_DATA:
727 q->flags &= ~MT_QFLAG_WED;
728 __mt76_dma_queue_reset(dev, q, false);
729 mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
730 q->head = q->ndesc - 1;
731 q->queued = q->head;
732 break;
733 case MT76_WED_RRO_Q_MSDU_PG:
734 q->flags &= ~MT_QFLAG_WED;
735 __mt76_dma_queue_reset(dev, q, false);
736 mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
737 q->head = q->ndesc - 1;
738 q->queued = q->head;
739 break;
740 case MT76_WED_RRO_Q_IND:
741 q->flags &= ~MT_QFLAG_WED;
742 mt76_dma_queue_reset(dev, q);
743 mt76_dma_rx_fill(dev, q, false);
744 mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
745 break;
746 default:
747 ret = -EINVAL;
748 break;
749 }
750 q->flags = flags;
751
752 return ret;
753#else
754 return 0;
755#endif
756}
757EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
758
759static int
760mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
761 int idx, int n_desc, int bufsize,
762 u32 ring_base)
763{
764 int ret, size;
765
766 spin_lock_init(&q->lock);
767 spin_lock_init(&q->cleanup_lock);
768
769 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
770 q->ndesc = n_desc;
771 q->buf_size = bufsize;
772 q->hw_idx = idx;
773
774 size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
775 : sizeof(struct mt76_desc);
776 q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
777 &q->desc_dma, GFP_KERNEL);
778 if (!q->desc)
779 return -ENOMEM;
780
781 if (mt76_queue_is_wed_rro_ind(q)) {
782 struct mt76_wed_rro_desc *rro_desc;
783 int i;
784
785 rro_desc = (struct mt76_wed_rro_desc *)q->desc;
786 for (i = 0; i < q->ndesc; i++) {
787 struct mt76_wed_rro_ind *cmd;
788
789 cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
790 cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
791 }
792 }
793
794 size = q->ndesc * sizeof(*q->entry);
795 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
796 if (!q->entry)
797 return -ENOMEM;
798
799 ret = mt76_create_page_pool(dev, q);
800 if (ret)
801 return ret;
802
803 ret = mt76_dma_wed_setup(dev, q, false);
804 if (ret)
805 return ret;
806
807 if (mtk_wed_device_active(&dev->mmio.wed)) {
808 if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
809 mt76_queue_is_wed_tx_free(q))
810 return 0;
811 }
812
813 mt76_dma_queue_reset(dev, q);
814
815 return 0;
816}
817
818static void
819mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
820{
821 void *buf;
822 bool more;
823
824 if (!q->ndesc)
825 return;
826
827 do {
828 spin_lock_bh(&q->lock);
829 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
830 spin_unlock_bh(&q->lock);
831
832 if (!buf)
833 break;
834
835 if (!mt76_queue_is_wed_rro(q))
836 mt76_put_page_pool_buf(buf, false);
837 } while (1);
838
839 spin_lock_bh(&q->lock);
840 if (q->rx_head) {
841 dev_kfree_skb(q->rx_head);
842 q->rx_head = NULL;
843 }
844
845 spin_unlock_bh(&q->lock);
846}
847
848static void
849mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
850{
851 struct mt76_queue *q = &dev->q_rx[qid];
852
853 if (!q->ndesc)
854 return;
855
856 if (!mt76_queue_is_wed_rro_ind(q)) {
857 int i;
858
859 for (i = 0; i < q->ndesc; i++)
860 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
861 }
862
863 mt76_dma_rx_cleanup(dev, q);
864
865 /* reset WED rx queues */
866 mt76_dma_wed_setup(dev, q, true);
867
868 if (mt76_queue_is_wed_tx_free(q))
869 return;
870
871 if (mtk_wed_device_active(&dev->mmio.wed) &&
872 mt76_queue_is_wed_rro(q))
873 return;
874
875 mt76_dma_sync_idx(dev, q);
876 mt76_dma_rx_fill(dev, q, false);
877}
878
879static void
880mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
881 int len, bool more, u32 info, bool allow_direct)
882{
883 struct sk_buff *skb = q->rx_head;
884 struct skb_shared_info *shinfo = skb_shinfo(skb);
885 int nr_frags = shinfo->nr_frags;
886
887 if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
888 struct page *page = virt_to_head_page(data);
889 int offset = data - page_address(page) + q->buf_offset;
890
891 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
892 } else {
893 mt76_put_page_pool_buf(data, allow_direct);
894 }
895
896 if (more)
897 return;
898
899 q->rx_head = NULL;
900 if (nr_frags < ARRAY_SIZE(shinfo->frags))
901 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
902 else
903 dev_kfree_skb(skb);
904}
905
906static int
907mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
908{
909 int len, data_len, done = 0, dma_idx;
910 struct sk_buff *skb;
911 unsigned char *data;
912 bool check_ddone = false;
913 bool allow_direct = !mt76_queue_is_wed_rx(q);
914 bool more;
915
916 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
917 mt76_queue_is_wed_tx_free(q)) {
918 dma_idx = Q_READ(q, dma_idx);
919 check_ddone = true;
920 }
921
922 while (done < budget) {
923 bool drop = false;
924 u32 info;
925
926 if (check_ddone) {
927 if (q->tail == dma_idx)
928 dma_idx = Q_READ(q, dma_idx);
929
930 if (q->tail == dma_idx)
931 break;
932 }
933
934 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
935 &drop);
936 if (!data)
937 break;
938
939 if (drop)
940 goto free_frag;
941
942 if (q->rx_head)
943 data_len = q->buf_size;
944 else
945 data_len = SKB_WITH_OVERHEAD(q->buf_size);
946
947 if (data_len < len + q->buf_offset) {
948 dev_kfree_skb(q->rx_head);
949 q->rx_head = NULL;
950 goto free_frag;
951 }
952
953 if (q->rx_head) {
954 mt76_add_fragment(dev, q, data, len, more, info,
955 allow_direct);
956 continue;
957 }
958
959 if (!more && dev->drv->rx_check &&
960 !(dev->drv->rx_check(dev, data, len)))
961 goto free_frag;
962
963 skb = napi_build_skb(data, q->buf_size);
964 if (!skb)
965 goto free_frag;
966
967 skb_reserve(skb, q->buf_offset);
968 skb_mark_for_recycle(skb);
969
970 *(u32 *)skb->cb = info;
971
972 __skb_put(skb, len);
973 done++;
974
975 if (more) {
976 q->rx_head = skb;
977 continue;
978 }
979
980 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
981 continue;
982
983free_frag:
984 mt76_put_page_pool_buf(data, allow_direct);
985 }
986
987 mt76_dma_rx_fill(dev, q, true);
988 return done;
989}
990
991int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
992{
993 struct mt76_dev *dev;
994 int qid, done = 0, cur;
995
996 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
997 qid = napi - dev->napi;
998
999 rcu_read_lock();
1000
1001 do {
1002 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
1003 mt76_rx_poll_complete(dev, qid, napi);
1004 done += cur;
1005 } while (cur && done < budget);
1006
1007 rcu_read_unlock();
1008
1009 if (done < budget && napi_complete(napi))
1010 dev->drv->rx_poll_complete(dev, qid);
1011
1012 return done;
1013}
1014EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
1015
1016static int
1017mt76_dma_init(struct mt76_dev *dev,
1018 int (*poll)(struct napi_struct *napi, int budget))
1019{
1020 int i;
1021
1022 init_dummy_netdev(&dev->napi_dev);
1023 init_dummy_netdev(&dev->tx_napi_dev);
1024 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
1025 wiphy_name(dev->hw->wiphy));
1026 dev->napi_dev.threaded = 1;
1027 init_completion(&dev->mmio.wed_reset);
1028 init_completion(&dev->mmio.wed_reset_complete);
1029
1030 mt76_for_each_q_rx(dev, i) {
1031 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
1032 mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
1033 napi_enable(&dev->napi[i]);
1034 }
1035
1036 return 0;
1037}
1038
1039static const struct mt76_queue_ops mt76_dma_ops = {
1040 .init = mt76_dma_init,
1041 .alloc = mt76_dma_alloc_queue,
1042 .reset_q = mt76_dma_queue_reset,
1043 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
1044 .tx_queue_skb = mt76_dma_tx_queue_skb,
1045 .tx_cleanup = mt76_dma_tx_cleanup,
1046 .rx_cleanup = mt76_dma_rx_cleanup,
1047 .rx_reset = mt76_dma_rx_reset,
1048 .kick = mt76_dma_kick_queue,
1049};
1050
1051void mt76_dma_attach(struct mt76_dev *dev)
1052{
1053 dev->queue_ops = &mt76_dma_ops;
1054}
1055EXPORT_SYMBOL_GPL(mt76_dma_attach);
1056
1057void mt76_dma_wed_reset(struct mt76_dev *dev)
1058{
1059 struct mt76_mmio *mmio = &dev->mmio;
1060
1061 if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
1062 return;
1063
1064 complete(&mmio->wed_reset);
1065
1066 if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
1067 dev_err(dev->dev, "wed reset complete timeout\n");
1068}
1069EXPORT_SYMBOL_GPL(mt76_dma_wed_reset);
1070
1071void mt76_dma_cleanup(struct mt76_dev *dev)
1072{
1073 int i;
1074
1075 mt76_worker_disable(&dev->tx_worker);
1076 netif_napi_del(&dev->tx_napi);
1077
1078 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
1079 struct mt76_phy *phy = dev->phys[i];
1080 int j;
1081
1082 if (!phy)
1083 continue;
1084
1085 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
1086 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
1087 }
1088
1089 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
1090 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
1091
1092 mt76_for_each_q_rx(dev, i) {
1093 struct mt76_queue *q = &dev->q_rx[i];
1094
1095 if (mtk_wed_device_active(&dev->mmio.wed) &&
1096 mt76_queue_is_wed_rro(q))
1097 continue;
1098
1099 netif_napi_del(&dev->napi[i]);
1100 mt76_dma_rx_cleanup(dev, q);
1101
1102 page_pool_destroy(q->page_pool);
1103 }
1104
1105 if (mtk_wed_device_active(&dev->mmio.wed))
1106 mtk_wed_device_detach(&dev->mmio.wed);
1107
1108 if (mtk_wed_device_active(&dev->mmio.wed_hif2))
1109 mtk_wed_device_detach(&dev->mmio.wed_hif2);
1110
1111 mt76_free_pending_txwi(dev);
1112 mt76_free_pending_rxwi(dev);
1113}
1114EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5
6#include <linux/dma-mapping.h>
7#include "mt76.h"
8#include "dma.h"
9
10static int
11mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
12 int idx, int n_desc, int bufsize,
13 u32 ring_base)
14{
15 int size;
16 int i;
17
18 spin_lock_init(&q->lock);
19
20 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
21 q->ndesc = n_desc;
22 q->buf_size = bufsize;
23 q->hw_idx = idx;
24
25 size = q->ndesc * sizeof(struct mt76_desc);
26 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
27 if (!q->desc)
28 return -ENOMEM;
29
30 size = q->ndesc * sizeof(*q->entry);
31 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
32 if (!q->entry)
33 return -ENOMEM;
34
35 /* clear descriptors */
36 for (i = 0; i < q->ndesc; i++)
37 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
38
39 writel(q->desc_dma, &q->regs->desc_base);
40 writel(0, &q->regs->cpu_idx);
41 writel(0, &q->regs->dma_idx);
42 writel(q->ndesc, &q->regs->ring_size);
43
44 return 0;
45}
46
47static int
48mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
49 struct mt76_queue_buf *buf, int nbufs, u32 info,
50 struct sk_buff *skb, void *txwi)
51{
52 struct mt76_desc *desc;
53 u32 ctrl;
54 int i, idx = -1;
55
56 if (txwi) {
57 q->entry[q->head].txwi = DMA_DUMMY_DATA;
58 q->entry[q->head].skip_buf0 = true;
59 }
60
61 for (i = 0; i < nbufs; i += 2, buf += 2) {
62 u32 buf0 = buf[0].addr, buf1 = 0;
63
64 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
65 if (i < nbufs - 1) {
66 buf1 = buf[1].addr;
67 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
68 }
69
70 if (i == nbufs - 1)
71 ctrl |= MT_DMA_CTL_LAST_SEC0;
72 else if (i == nbufs - 2)
73 ctrl |= MT_DMA_CTL_LAST_SEC1;
74
75 idx = q->head;
76 q->head = (q->head + 1) % q->ndesc;
77
78 desc = &q->desc[idx];
79
80 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
81 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
82 WRITE_ONCE(desc->info, cpu_to_le32(info));
83 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
84
85 q->queued++;
86 }
87
88 q->entry[idx].txwi = txwi;
89 q->entry[idx].skb = skb;
90
91 return idx;
92}
93
94static void
95mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
96 struct mt76_queue_entry *prev_e)
97{
98 struct mt76_queue_entry *e = &q->entry[idx];
99 __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
100 u32 ctrl = le32_to_cpu(__ctrl);
101
102 if (!e->skip_buf0) {
103 __le32 addr = READ_ONCE(q->desc[idx].buf0);
104 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
105
106 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
107 DMA_TO_DEVICE);
108 }
109
110 if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
111 __le32 addr = READ_ONCE(q->desc[idx].buf1);
112 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
113
114 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
115 DMA_TO_DEVICE);
116 }
117
118 if (e->txwi == DMA_DUMMY_DATA)
119 e->txwi = NULL;
120
121 if (e->skb == DMA_DUMMY_DATA)
122 e->skb = NULL;
123
124 *prev_e = *e;
125 memset(e, 0, sizeof(*e));
126}
127
128static void
129mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
130{
131 writel(q->desc_dma, &q->regs->desc_base);
132 writel(q->ndesc, &q->regs->ring_size);
133 q->head = readl(&q->regs->dma_idx);
134 q->tail = q->head;
135 writel(q->head, &q->regs->cpu_idx);
136}
137
138static void
139mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
140{
141 struct mt76_sw_queue *sq = &dev->q_tx[qid];
142 struct mt76_queue *q = sq->q;
143 struct mt76_queue_entry entry;
144 unsigned int n_swq_queued[4] = {};
145 unsigned int n_queued = 0;
146 bool wake = false;
147 int i, last;
148
149 if (!q)
150 return;
151
152 if (flush)
153 last = -1;
154 else
155 last = readl(&q->regs->dma_idx);
156
157 while ((q->queued > n_queued) && q->tail != last) {
158 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
159 if (entry.schedule)
160 n_swq_queued[entry.qid]++;
161
162 q->tail = (q->tail + 1) % q->ndesc;
163 n_queued++;
164
165 if (entry.skb)
166 dev->drv->tx_complete_skb(dev, qid, &entry);
167
168 if (entry.txwi) {
169 if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
170 mt76_put_txwi(dev, entry.txwi);
171 wake = !flush;
172 }
173
174 if (!flush && q->tail == last)
175 last = readl(&q->regs->dma_idx);
176 }
177
178 spin_lock_bh(&q->lock);
179
180 q->queued -= n_queued;
181 for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
182 if (!n_swq_queued[i])
183 continue;
184
185 dev->q_tx[i].swq_queued -= n_swq_queued[i];
186 }
187
188 if (flush)
189 mt76_dma_sync_idx(dev, q);
190
191 wake = wake && q->stopped &&
192 qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
193 if (wake)
194 q->stopped = false;
195
196 if (!q->queued)
197 wake_up(&dev->tx_wait);
198
199 spin_unlock_bh(&q->lock);
200
201 if (wake)
202 ieee80211_wake_queue(dev->hw, qid);
203}
204
205static void *
206mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
207 int *len, u32 *info, bool *more)
208{
209 struct mt76_queue_entry *e = &q->entry[idx];
210 struct mt76_desc *desc = &q->desc[idx];
211 dma_addr_t buf_addr;
212 void *buf = e->buf;
213 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
214
215 buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
216 if (len) {
217 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
218 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
219 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
220 }
221
222 if (info)
223 *info = le32_to_cpu(desc->info);
224
225 dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
226 e->buf = NULL;
227
228 return buf;
229}
230
231static void *
232mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
233 int *len, u32 *info, bool *more)
234{
235 int idx = q->tail;
236
237 *more = false;
238 if (!q->queued)
239 return NULL;
240
241 if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
242 return NULL;
243
244 q->tail = (q->tail + 1) % q->ndesc;
245 q->queued--;
246
247 return mt76_dma_get_buf(dev, q, idx, len, info, more);
248}
249
250static void
251mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
252{
253 writel(q->head, &q->regs->cpu_idx);
254}
255
256static int
257mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
258 struct sk_buff *skb, u32 tx_info)
259{
260 struct mt76_queue *q = dev->q_tx[qid].q;
261 struct mt76_queue_buf buf;
262 dma_addr_t addr;
263
264 addr = dma_map_single(dev->dev, skb->data, skb->len,
265 DMA_TO_DEVICE);
266 if (unlikely(dma_mapping_error(dev->dev, addr)))
267 return -ENOMEM;
268
269 buf.addr = addr;
270 buf.len = skb->len;
271
272 spin_lock_bh(&q->lock);
273 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
274 mt76_dma_kick_queue(dev, q);
275 spin_unlock_bh(&q->lock);
276
277 return 0;
278}
279
280static int
281mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
282 struct sk_buff *skb, struct mt76_wcid *wcid,
283 struct ieee80211_sta *sta)
284{
285 struct mt76_queue *q = dev->q_tx[qid].q;
286 struct mt76_tx_info tx_info = {
287 .skb = skb,
288 };
289 int len, n = 0, ret = -ENOMEM;
290 struct mt76_queue_entry e;
291 struct mt76_txwi_cache *t;
292 struct sk_buff *iter;
293 dma_addr_t addr;
294 u8 *txwi;
295
296 t = mt76_get_txwi(dev);
297 if (!t) {
298 ieee80211_free_txskb(dev->hw, skb);
299 return -ENOMEM;
300 }
301 txwi = mt76_get_txwi_ptr(dev, t);
302
303 skb->prev = skb->next = NULL;
304 if (dev->drv->tx_aligned4_skbs)
305 mt76_insert_hdr_pad(skb);
306
307 len = skb_headlen(skb);
308 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
309 if (unlikely(dma_mapping_error(dev->dev, addr)))
310 goto free;
311
312 tx_info.buf[n].addr = t->dma_addr;
313 tx_info.buf[n++].len = dev->drv->txwi_size;
314 tx_info.buf[n].addr = addr;
315 tx_info.buf[n++].len = len;
316
317 skb_walk_frags(skb, iter) {
318 if (n == ARRAY_SIZE(tx_info.buf))
319 goto unmap;
320
321 addr = dma_map_single(dev->dev, iter->data, iter->len,
322 DMA_TO_DEVICE);
323 if (unlikely(dma_mapping_error(dev->dev, addr)))
324 goto unmap;
325
326 tx_info.buf[n].addr = addr;
327 tx_info.buf[n++].len = iter->len;
328 }
329 tx_info.nbuf = n;
330
331 dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
332 DMA_TO_DEVICE);
333 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
334 dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
335 DMA_TO_DEVICE);
336 if (ret < 0)
337 goto unmap;
338
339 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
340 ret = -ENOMEM;
341 goto unmap;
342 }
343
344 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
345 tx_info.info, tx_info.skb, t);
346
347unmap:
348 for (n--; n > 0; n--)
349 dma_unmap_single(dev->dev, tx_info.buf[n].addr,
350 tx_info.buf[n].len, DMA_TO_DEVICE);
351
352free:
353 e.skb = tx_info.skb;
354 e.txwi = t;
355 dev->drv->tx_complete_skb(dev, qid, &e);
356 mt76_put_txwi(dev, t);
357 return ret;
358}
359
360static int
361mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
362{
363 dma_addr_t addr;
364 void *buf;
365 int frames = 0;
366 int len = SKB_WITH_OVERHEAD(q->buf_size);
367 int offset = q->buf_offset;
368 int idx;
369
370 spin_lock_bh(&q->lock);
371
372 while (q->queued < q->ndesc - 1) {
373 struct mt76_queue_buf qbuf;
374
375 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
376 if (!buf)
377 break;
378
379 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
380 if (unlikely(dma_mapping_error(dev->dev, addr))) {
381 skb_free_frag(buf);
382 break;
383 }
384
385 qbuf.addr = addr + offset;
386 qbuf.len = len - offset;
387 idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
388 frames++;
389 }
390
391 if (frames)
392 mt76_dma_kick_queue(dev, q);
393
394 spin_unlock_bh(&q->lock);
395
396 return frames;
397}
398
399static void
400mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
401{
402 struct page *page;
403 void *buf;
404 bool more;
405
406 spin_lock_bh(&q->lock);
407 do {
408 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
409 if (!buf)
410 break;
411
412 skb_free_frag(buf);
413 } while (1);
414 spin_unlock_bh(&q->lock);
415
416 if (!q->rx_page.va)
417 return;
418
419 page = virt_to_page(q->rx_page.va);
420 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
421 memset(&q->rx_page, 0, sizeof(q->rx_page));
422}
423
424static void
425mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
426{
427 struct mt76_queue *q = &dev->q_rx[qid];
428 int i;
429
430 for (i = 0; i < q->ndesc; i++)
431 q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
432
433 mt76_dma_rx_cleanup(dev, q);
434 mt76_dma_sync_idx(dev, q);
435 mt76_dma_rx_fill(dev, q);
436
437 if (!q->rx_head)
438 return;
439
440 dev_kfree_skb(q->rx_head);
441 q->rx_head = NULL;
442}
443
444static void
445mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
446 int len, bool more)
447{
448 struct page *page = virt_to_head_page(data);
449 int offset = data - page_address(page);
450 struct sk_buff *skb = q->rx_head;
451
452 offset += q->buf_offset;
453 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
454 q->buf_size);
455
456 if (more)
457 return;
458
459 q->rx_head = NULL;
460 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
461}
462
463static int
464mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
465{
466 int len, data_len, done = 0;
467 struct sk_buff *skb;
468 unsigned char *data;
469 bool more;
470
471 while (done < budget) {
472 u32 info;
473
474 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
475 if (!data)
476 break;
477
478 if (q->rx_head)
479 data_len = q->buf_size;
480 else
481 data_len = SKB_WITH_OVERHEAD(q->buf_size);
482
483 if (data_len < len + q->buf_offset) {
484 dev_kfree_skb(q->rx_head);
485 q->rx_head = NULL;
486
487 skb_free_frag(data);
488 continue;
489 }
490
491 if (q->rx_head) {
492 mt76_add_fragment(dev, q, data, len, more);
493 continue;
494 }
495
496 skb = build_skb(data, q->buf_size);
497 if (!skb) {
498 skb_free_frag(data);
499 continue;
500 }
501 skb_reserve(skb, q->buf_offset);
502
503 if (q == &dev->q_rx[MT_RXQ_MCU]) {
504 u32 *rxfce = (u32 *)skb->cb;
505 *rxfce = info;
506 }
507
508 __skb_put(skb, len);
509 done++;
510
511 if (more) {
512 q->rx_head = skb;
513 continue;
514 }
515
516 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
517 }
518
519 mt76_dma_rx_fill(dev, q);
520 return done;
521}
522
523static int
524mt76_dma_rx_poll(struct napi_struct *napi, int budget)
525{
526 struct mt76_dev *dev;
527 int qid, done = 0, cur;
528
529 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
530 qid = napi - dev->napi;
531
532 rcu_read_lock();
533
534 do {
535 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
536 mt76_rx_poll_complete(dev, qid, napi);
537 done += cur;
538 } while (cur && done < budget);
539
540 rcu_read_unlock();
541
542 if (done < budget) {
543 napi_complete(napi);
544 dev->drv->rx_poll_complete(dev, qid);
545 }
546
547 return done;
548}
549
550static int
551mt76_dma_init(struct mt76_dev *dev)
552{
553 int i;
554
555 init_dummy_netdev(&dev->napi_dev);
556
557 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
558 netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
559 64);
560 mt76_dma_rx_fill(dev, &dev->q_rx[i]);
561 skb_queue_head_init(&dev->rx_skb[i]);
562 napi_enable(&dev->napi[i]);
563 }
564
565 return 0;
566}
567
568static const struct mt76_queue_ops mt76_dma_ops = {
569 .init = mt76_dma_init,
570 .alloc = mt76_dma_alloc_queue,
571 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
572 .tx_queue_skb = mt76_dma_tx_queue_skb,
573 .tx_cleanup = mt76_dma_tx_cleanup,
574 .rx_reset = mt76_dma_rx_reset,
575 .kick = mt76_dma_kick_queue,
576};
577
578void mt76_dma_attach(struct mt76_dev *dev)
579{
580 dev->queue_ops = &mt76_dma_ops;
581}
582EXPORT_SYMBOL_GPL(mt76_dma_attach);
583
584void mt76_dma_cleanup(struct mt76_dev *dev)
585{
586 int i;
587
588 netif_napi_del(&dev->tx_napi);
589 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
590 mt76_dma_tx_cleanup(dev, i, true);
591
592 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
593 netif_napi_del(&dev->napi[i]);
594 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
595 }
596}
597EXPORT_SYMBOL_GPL(mt76_dma_cleanup);