Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  3 *
  4 * Permission to use, copy, modify, and/or distribute this software for any
  5 * purpose with or without fee is hereby granted, provided that the above
  6 * copyright notice and this permission notice appear in all copies.
  7 *
  8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 15 */
 16
 17#include <linux/dma-mapping.h>
 18#include "mt76.h"
 19#include "dma.h"
 20
 21#define DMA_DUMMY_TXWI	((void *) ~0)
 22
 23static int
 24mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
 25{
 26	int size;
 27	int i;
 28
 29	spin_lock_init(&q->lock);
 30	INIT_LIST_HEAD(&q->swq);
 31
 32	size = q->ndesc * sizeof(struct mt76_desc);
 33	q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
 34	if (!q->desc)
 35		return -ENOMEM;
 36
 37	size = q->ndesc * sizeof(*q->entry);
 38	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
 39	if (!q->entry)
 40		return -ENOMEM;
 41
 42	/* clear descriptors */
 43	for (i = 0; i < q->ndesc; i++)
 44		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 45
 46	iowrite32(q->desc_dma, &q->regs->desc_base);
 47	iowrite32(0, &q->regs->cpu_idx);
 48	iowrite32(0, &q->regs->dma_idx);
 49	iowrite32(q->ndesc, &q->regs->ring_size);
 50
 51	return 0;
 52}
 53
 54static int
 55mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
 56		 struct mt76_queue_buf *buf, int nbufs, u32 info,
 57		 struct sk_buff *skb, void *txwi)
 58{
 59	struct mt76_desc *desc;
 60	u32 ctrl;
 61	int i, idx = -1;
 62
 63	if (txwi)
 64		q->entry[q->head].txwi = DMA_DUMMY_TXWI;
 65
 66	for (i = 0; i < nbufs; i += 2, buf += 2) {
 67		u32 buf0 = buf[0].addr, buf1 = 0;
 68
 69		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
 70		if (i < nbufs - 1) {
 71			buf1 = buf[1].addr;
 72			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
 73		}
 74
 75		if (i == nbufs - 1)
 76			ctrl |= MT_DMA_CTL_LAST_SEC0;
 77		else if (i == nbufs - 2)
 78			ctrl |= MT_DMA_CTL_LAST_SEC1;
 79
 80		idx = q->head;
 81		q->head = (q->head + 1) % q->ndesc;
 82
 83		desc = &q->desc[idx];
 84
 85		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
 86		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
 87		WRITE_ONCE(desc->info, cpu_to_le32(info));
 88		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
 89
 90		q->queued++;
 91	}
 92
 93	q->entry[idx].txwi = txwi;
 94	q->entry[idx].skb = skb;
 95
 96	return idx;
 97}
 98
 99static void
100mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
101			struct mt76_queue_entry *prev_e)
102{
103	struct mt76_queue_entry *e = &q->entry[idx];
104	__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
105	u32 ctrl = le32_to_cpu(__ctrl);
106
107	if (!e->txwi || !e->skb) {
108		__le32 addr = READ_ONCE(q->desc[idx].buf0);
109		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
110
111		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
112				 DMA_TO_DEVICE);
113	}
114
115	if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
116		__le32 addr = READ_ONCE(q->desc[idx].buf1);
117		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
118
119		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
120				 DMA_TO_DEVICE);
121	}
122
123	if (e->txwi == DMA_DUMMY_TXWI)
124		e->txwi = NULL;
125
126	*prev_e = *e;
127	memset(e, 0, sizeof(*e));
128}
129
130static void
131mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
132{
133	q->head = ioread32(&q->regs->dma_idx);
134	q->tail = q->head;
135	iowrite32(q->head, &q->regs->cpu_idx);
136}
137
138static void
139mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
140{
141	struct mt76_queue *q = &dev->q_tx[qid];
142	struct mt76_queue_entry entry;
143	bool wake = false;
144	int last;
145
146	if (!q->ndesc)
147		return;
148
149	spin_lock_bh(&q->lock);
150	if (flush)
151		last = -1;
152	else
153		last = ioread32(&q->regs->dma_idx);
154
155	while (q->queued && q->tail != last) {
156		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
157		if (entry.schedule)
158			q->swq_queued--;
159
160		if (entry.skb)
161			dev->drv->tx_complete_skb(dev, q, &entry, flush);
162
163		if (entry.txwi) {
164			mt76_put_txwi(dev, entry.txwi);
165			wake = true;
166		}
167
168		q->tail = (q->tail + 1) % q->ndesc;
169		q->queued--;
170
171		if (!flush && q->tail == last)
172			last = ioread32(&q->regs->dma_idx);
173	}
174
175	if (!flush)
176		mt76_txq_schedule(dev, q);
177	else
178		mt76_dma_sync_idx(dev, q);
179
180	wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
181	spin_unlock_bh(&q->lock);
182
183	if (wake)
184		ieee80211_wake_queue(dev->hw, qid);
185}
186
187static void *
188mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
189		 int *len, u32 *info, bool *more)
190{
191	struct mt76_queue_entry *e = &q->entry[idx];
192	struct mt76_desc *desc = &q->desc[idx];
193	dma_addr_t buf_addr;
194	void *buf = e->buf;
195	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
196
197	buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
198	if (len) {
199		u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
200		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
201		*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
202	}
203
204	if (info)
205		*info = le32_to_cpu(desc->info);
206
207	dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
208	e->buf = NULL;
209
210	return buf;
211}
212
213static void *
214mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
215		 int *len, u32 *info, bool *more)
216{
217	int idx = q->tail;
218
219	*more = false;
220	if (!q->queued)
221		return NULL;
222
223	if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
224		return NULL;
225
226	q->tail = (q->tail + 1) % q->ndesc;
227	q->queued--;
228
229	return mt76_dma_get_buf(dev, q, idx, len, info, more);
230}
231
232static void
233mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
234{
235	iowrite32(q->head, &q->regs->cpu_idx);
236}
237
238static int
239mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
240{
241	dma_addr_t addr;
242	void *buf;
243	int frames = 0;
244	int len = SKB_WITH_OVERHEAD(q->buf_size);
245	int offset = q->buf_offset;
246	int idx;
247	void *(*alloc)(unsigned int fragsz);
248
249	if (napi)
250		alloc = napi_alloc_frag;
251	else
252		alloc = netdev_alloc_frag;
253
254	spin_lock_bh(&q->lock);
255
256	while (q->queued < q->ndesc - 1) {
257		struct mt76_queue_buf qbuf;
258
259		buf = alloc(q->buf_size);
260		if (!buf)
261			break;
262
263		addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
264		if (dma_mapping_error(dev->dev, addr)) {
265			skb_free_frag(buf);
266			break;
267		}
268
269		qbuf.addr = addr + offset;
270		qbuf.len = len - offset;
271		idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
272		frames++;
273	}
274
275	if (frames)
276		mt76_dma_kick_queue(dev, q);
277
278	spin_unlock_bh(&q->lock);
279
280	return frames;
281}
282
283static void
284mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
285{
286	void *buf;
287	bool more;
288
289	spin_lock_bh(&q->lock);
290	do {
291		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
292		if (!buf)
293			break;
294
295		skb_free_frag(buf);
296	} while (1);
297	spin_unlock_bh(&q->lock);
298}
299
300static void
301mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
302{
303	struct mt76_queue *q = &dev->q_rx[qid];
304	int i;
305
306	for (i = 0; i < q->ndesc; i++)
307		q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
308
309	mt76_dma_rx_cleanup(dev, q);
310	mt76_dma_sync_idx(dev, q);
311	mt76_dma_rx_fill(dev, q, false);
312}
313
314static void
315mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
316		  int len, bool more)
317{
318	struct page *page = virt_to_head_page(data);
319	int offset = data - page_address(page);
320	struct sk_buff *skb = q->rx_head;
321
322	offset += q->buf_offset;
323	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
324			q->buf_size);
325
326	if (more)
327		return;
328
329	q->rx_head = NULL;
330	dev->drv->rx_skb(dev, q - dev->q_rx, skb);
331}
332
333static int
334mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
335{
336	struct sk_buff *skb;
337	unsigned char *data;
338	int len;
339	int done = 0;
340	bool more;
341
342	while (done < budget) {
343		u32 info;
344
345		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
346		if (!data)
347			break;
348
349		if (q->rx_head) {
350			mt76_add_fragment(dev, q, data, len, more);
351			continue;
352		}
353
354		skb = build_skb(data, q->buf_size);
355		if (!skb) {
356			skb_free_frag(data);
357			continue;
358		}
359
360		skb_reserve(skb, q->buf_offset);
361		if (skb->tail + len > skb->end) {
362			dev_kfree_skb(skb);
363			continue;
364		}
365
366		if (q == &dev->q_rx[MT_RXQ_MCU]) {
367			u32 *rxfce = (u32 *) skb->cb;
368			*rxfce = info;
369		}
370
371		__skb_put(skb, len);
372		done++;
373
374		if (more) {
375			q->rx_head = skb;
376			continue;
377		}
378
379		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
380	}
381
382	mt76_dma_rx_fill(dev, q, true);
383	return done;
384}
385
386static int
387mt76_dma_rx_poll(struct napi_struct *napi, int budget)
388{
389	struct mt76_dev *dev;
390	int qid, done = 0, cur;
391
392	dev = container_of(napi->dev, struct mt76_dev, napi_dev);
393	qid = napi - dev->napi;
394
395	rcu_read_lock();
396
397	do {
398		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
399		mt76_rx_poll_complete(dev, qid);
400		done += cur;
401	} while (cur && done < budget);
402
403	rcu_read_unlock();
404
405	if (done < budget) {
406		napi_complete(napi);
407		dev->drv->rx_poll_complete(dev, qid);
408	}
409
410	return done;
411}
412
413static int
414mt76_dma_init(struct mt76_dev *dev)
415{
416	int i;
417
418	init_dummy_netdev(&dev->napi_dev);
419
420	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
421		netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
422			       64);
423		mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
424		skb_queue_head_init(&dev->rx_skb[i]);
425		napi_enable(&dev->napi[i]);
426	}
427
428	return 0;
429}
430
431static const struct mt76_queue_ops mt76_dma_ops = {
432	.init = mt76_dma_init,
433	.alloc = mt76_dma_alloc_queue,
434	.add_buf = mt76_dma_add_buf,
435	.tx_cleanup = mt76_dma_tx_cleanup,
436	.rx_reset = mt76_dma_rx_reset,
437	.kick = mt76_dma_kick_queue,
438};
439
440int mt76_dma_attach(struct mt76_dev *dev)
441{
442	dev->queue_ops = &mt76_dma_ops;
443	return 0;
444}
445EXPORT_SYMBOL_GPL(mt76_dma_attach);
446
447void mt76_dma_cleanup(struct mt76_dev *dev)
448{
449	int i;
450
451	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
452		mt76_dma_tx_cleanup(dev, i, true);
453
454	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
455		netif_napi_del(&dev->napi[i]);
456		mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
457	}
458}
459EXPORT_SYMBOL_GPL(mt76_dma_cleanup);