Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*	Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
  2 *	Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
  3 *	Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
  4 *	Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
  5 *	Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
  6 *	Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
  7 *	Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
  8 *	Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
  9 *	<http://rt2x00.serialmonkey.com>
 10 *
 11 *	This program is free software; you can redistribute it and/or modify
 12 *	it under the terms of the GNU General Public License as published by
 13 *	the Free Software Foundation; either version 2 of the License, or
 14 *	(at your option) any later version.
 15 *
 16 *	This program is distributed in the hope that it will be useful,
 17 *	but WITHOUT ANY WARRANTY; without even the implied warranty of
 18 *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 19 *	GNU General Public License for more details.
 20 *
 21 *	You should have received a copy of the GNU General Public License
 22 *	along with this program; if not, see <http://www.gnu.org/licenses/>.
 23 */
 24
 25/*	Module: rt2800mmio
 26 *	Abstract: rt2800 MMIO device routines.
 27 */
 28
 29#include <linux/kernel.h>
 30#include <linux/module.h>
 31#include <linux/export.h>
 32
 33#include "rt2x00.h"
 34#include "rt2x00mmio.h"
 35#include "rt2800.h"
 36#include "rt2800lib.h"
 37#include "rt2800mmio.h"
 38
 39/*
 40 * TX descriptor initialization
 41 */
 42__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
 43{
 44	return (__le32 *) entry->skb->data;
 45}
 46EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
 47
 48void rt2800mmio_write_tx_desc(struct queue_entry *entry,
 49			      struct txentry_desc *txdesc)
 50{
 51	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 52	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 53	__le32 *txd = entry_priv->desc;
 54	u32 word;
 55	const unsigned int txwi_size = entry->queue->winfo_size;
 56
 57	/*
 58	 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
 59	 * must contains a TXWI structure + 802.11 header + padding + 802.11
 60	 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
 61	 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
 62	 * data. It means that LAST_SEC0 is always 0.
 63	 */
 64
 65	/*
 66	 * Initialize TX descriptor
 67	 */
 68	word = 0;
 69	rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
 70	rt2x00_desc_write(txd, 0, word);
 71
 72	word = 0;
 73	rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
 74	rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
 75			   !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
 76	rt2x00_set_field32(&word, TXD_W1_BURST,
 77			   test_bit(ENTRY_TXD_BURST, &txdesc->flags));
 78	rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
 79	rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
 80	rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
 81	rt2x00_desc_write(txd, 1, word);
 82
 83	word = 0;
 84	rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
 85			   skbdesc->skb_dma + txwi_size);
 86	rt2x00_desc_write(txd, 2, word);
 87
 88	word = 0;
 89	rt2x00_set_field32(&word, TXD_W3_WIV,
 90			   !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
 91	rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
 92	rt2x00_desc_write(txd, 3, word);
 93
 94	/*
 95	 * Register descriptor details in skb frame descriptor.
 96	 */
 97	skbdesc->desc = txd;
 98	skbdesc->desc_len = TXD_DESC_SIZE;
 99}
100EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
101
102/*
103 * RX control handlers
104 */
105void rt2800mmio_fill_rxdone(struct queue_entry *entry,
106			    struct rxdone_entry_desc *rxdesc)
107{
108	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
109	__le32 *rxd = entry_priv->desc;
110	u32 word;
111
112	rt2x00_desc_read(rxd, 3, &word);
113
114	if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
115		rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
116
117	/*
118	 * Unfortunately we don't know the cipher type used during
119	 * decryption. This prevents us from correct providing
120	 * correct statistics through debugfs.
121	 */
122	rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
123
124	if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
125		/*
126		 * Hardware has stripped IV/EIV data from 802.11 frame during
127		 * decryption. Unfortunately the descriptor doesn't contain
128		 * any fields with the EIV/IV data either, so they can't
129		 * be restored by rt2x00lib.
130		 */
131		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
132
133		/*
134		 * The hardware has already checked the Michael Mic and has
135		 * stripped it from the frame. Signal this to mac80211.
136		 */
137		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
138
139		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
140			rxdesc->flags |= RX_FLAG_DECRYPTED;
141		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
142			rxdesc->flags |= RX_FLAG_MMIC_ERROR;
143	}
144
145	if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
146		rxdesc->dev_flags |= RXDONE_MY_BSS;
147
148	if (rt2x00_get_field32(word, RXD_W3_L2PAD))
149		rxdesc->dev_flags |= RXDONE_L2PAD;
150
151	/*
152	 * Process the RXWI structure that is at the start of the buffer.
153	 */
154	rt2800_process_rxwi(entry, rxdesc);
155}
156EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
157
158/*
159 * Interrupt functions.
160 */
161static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
162{
163	struct ieee80211_conf conf = { .flags = 0 };
164	struct rt2x00lib_conf libconf = { .conf = &conf };
165
166	rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
167}
168
169static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
170{
171	__le32 *txwi;
172	u32 word;
173	int wcid, tx_wcid;
174
175	wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
176
177	txwi = rt2800_drv_get_txwi(entry);
178	rt2x00_desc_read(txwi, 1, &word);
179	tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
180
181	return (tx_wcid == wcid);
182}
183
184static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
185{
186	u32 status = *(u32 *)data;
187
188	/*
189	 * rt2800pci hardware might reorder frames when exchanging traffic
190	 * with multiple BA enabled STAs.
191	 *
192	 * For example, a tx queue
193	 *    [ STA1 | STA2 | STA1 | STA2 ]
194	 * can result in tx status reports
195	 *    [ STA1 | STA1 | STA2 | STA2 ]
196	 * when the hw decides to aggregate the frames for STA1 into one AMPDU.
197	 *
198	 * To mitigate this effect, associate the tx status to the first frame
199	 * in the tx queue with a matching wcid.
200	 */
201	if (rt2800mmio_txdone_entry_check(entry, status) &&
202	    !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
203		/*
204		 * Got a matching frame, associate the tx status with
205		 * the frame
206		 */
207		entry->status = status;
208		set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
209		return true;
210	}
211
212	/* Check the next frame */
213	return false;
214}
215
216static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
217{
218	u32 status = *(u32 *)data;
219
220	/*
221	 * Find the first frame without tx status and assign this status to it
222	 * regardless if it matches or not.
223	 */
224	if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
225		/*
226		 * Got a matching frame, associate the tx status with
227		 * the frame
228		 */
229		entry->status = status;
230		set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
231		return true;
232	}
233
234	/* Check the next frame */
235	return false;
236}
237static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
238					      void *data)
239{
240	if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
241		rt2800_txdone_entry(entry, entry->status,
242				    rt2800mmio_get_txwi(entry));
243		return false;
244	}
245
246	/* No more frames to release */
247	return true;
248}
249
250static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
251{
252	struct data_queue *queue;
253	u32 status;
254	u8 qid;
255	int max_tx_done = 16;
256
257	while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
258		qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
259		if (unlikely(qid >= QID_RX)) {
260			/*
261			 * Unknown queue, this shouldn't happen. Just drop
262			 * this tx status.
263			 */
264			rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
265				    qid);
266			break;
267		}
268
269		queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
270		if (unlikely(queue == NULL)) {
271			/*
272			 * The queue is NULL, this shouldn't happen. Stop
273			 * processing here and drop the tx status
274			 */
275			rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
276				    qid);
277			break;
278		}
279
280		if (unlikely(rt2x00queue_empty(queue))) {
281			/*
282			 * The queue is empty. Stop processing here
283			 * and drop the tx status.
284			 */
285			rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
286				    qid);
287			break;
288		}
289
290		/*
291		 * Let's associate this tx status with the first
292		 * matching frame.
293		 */
294		if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
295						Q_INDEX, &status,
296						rt2800mmio_txdone_find_entry)) {
297			/*
298			 * We cannot match the tx status to any frame, so just
299			 * use the first one.
300			 */
301			if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
302							Q_INDEX, &status,
303							rt2800mmio_txdone_match_first)) {
304				rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
305					    qid);
306				break;
307			}
308		}
309
310		/*
311		 * Release all frames with a valid tx status.
312		 */
313		rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
314					   Q_INDEX, NULL,
315					   rt2800mmio_txdone_release_entries);
316
317		if (--max_tx_done == 0)
318			break;
319	}
320
321	return !max_tx_done;
322}
323
324static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
325					       struct rt2x00_field32 irq_field)
326{
327	u32 reg;
328
329	/*
330	 * Enable a single interrupt. The interrupt mask register
331	 * access needs locking.
332	 */
333	spin_lock_irq(&rt2x00dev->irqmask_lock);
334	rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
335	rt2x00_set_field32(&reg, irq_field, 1);
336	rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
337	spin_unlock_irq(&rt2x00dev->irqmask_lock);
338}
339
340void rt2800mmio_txstatus_tasklet(unsigned long data)
341{
342	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
343	if (rt2800mmio_txdone(rt2x00dev))
344		tasklet_schedule(&rt2x00dev->txstatus_tasklet);
345
346	/*
347	 * No need to enable the tx status interrupt here as we always
348	 * leave it enabled to minimize the possibility of a tx status
349	 * register overflow. See comment in interrupt handler.
350	 */
351}
352EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
353
354void rt2800mmio_pretbtt_tasklet(unsigned long data)
355{
356	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
357	rt2x00lib_pretbtt(rt2x00dev);
358	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
359		rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
360}
361EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
362
363void rt2800mmio_tbtt_tasklet(unsigned long data)
364{
365	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
366	struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
367	u32 reg;
368
369	rt2x00lib_beacondone(rt2x00dev);
370
371	if (rt2x00dev->intf_ap_count) {
372		/*
373		 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
374		 * causing beacon skew and as a result causing problems with
375		 * some powersaving clients over time. Shorten the beacon
376		 * interval every 64 beacons by 64us to mitigate this effect.
377		 */
378		if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
379			rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
380			rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
381					   (rt2x00dev->beacon_int * 16) - 1);
382			rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
383		} else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
384			rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
385			rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
386					   (rt2x00dev->beacon_int * 16));
387			rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
388		}
389		drv_data->tbtt_tick++;
390		drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
391	}
392
393	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
394		rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
395}
396EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
397
398void rt2800mmio_rxdone_tasklet(unsigned long data)
399{
400	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
401	if (rt2x00mmio_rxdone(rt2x00dev))
402		tasklet_schedule(&rt2x00dev->rxdone_tasklet);
403	else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
404		rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
405}
406EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
407
408void rt2800mmio_autowake_tasklet(unsigned long data)
409{
410	struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
411	rt2800mmio_wakeup(rt2x00dev);
412	if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
413		rt2800mmio_enable_interrupt(rt2x00dev,
414					    INT_MASK_CSR_AUTO_WAKEUP);
415}
416EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
417
418static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
419{
420	u32 status;
421	int i;
422
423	/*
424	 * The TX_FIFO_STATUS interrupt needs special care. We should
425	 * read TX_STA_FIFO but we should do it immediately as otherwise
426	 * the register can overflow and we would lose status reports.
427	 *
428	 * Hence, read the TX_STA_FIFO register and copy all tx status
429	 * reports into a kernel FIFO which is handled in the txstatus
430	 * tasklet. We use a tasklet to process the tx status reports
431	 * because we can schedule the tasklet multiple times (when the
432	 * interrupt fires again during tx status processing).
433	 *
434	 * Furthermore we don't disable the TX_FIFO_STATUS
435	 * interrupt here but leave it enabled so that the TX_STA_FIFO
436	 * can also be read while the tx status tasklet gets executed.
437	 *
438	 * Since we have only one producer and one consumer we don't
439	 * need to lock the kfifo.
440	 */
441	for (i = 0; i < rt2x00dev->tx->limit; i++) {
442		rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
443
444		if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
445			break;
446
447		if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) {
448			rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
449			break;
450		}
451	}
452
453	/* Schedule the tasklet for processing the tx status. */
454	tasklet_schedule(&rt2x00dev->txstatus_tasklet);
455}
456
457irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
458{
459	struct rt2x00_dev *rt2x00dev = dev_instance;
460	u32 reg, mask;
461
462	/* Read status and ACK all interrupts */
463	rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
464	rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
465
466	if (!reg)
467		return IRQ_NONE;
468
469	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
470		return IRQ_HANDLED;
471
472	/*
473	 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
474	 * for interrupts and interrupt masks we can just use the value of
475	 * INT_SOURCE_CSR to create the interrupt mask.
476	 */
477	mask = ~reg;
478
479	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
480		rt2800mmio_txstatus_interrupt(rt2x00dev);
481		/*
482		 * Never disable the TX_FIFO_STATUS interrupt.
483		 */
484		rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
485	}
486
487	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
488		tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
489
490	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
491		tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
492
493	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
494		tasklet_schedule(&rt2x00dev->rxdone_tasklet);
495
496	if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
497		tasklet_schedule(&rt2x00dev->autowake_tasklet);
498
499	/*
500	 * Disable all interrupts for which a tasklet was scheduled right now,
501	 * the tasklet will reenable the appropriate interrupts.
502	 */
503	spin_lock(&rt2x00dev->irqmask_lock);
504	rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
505	reg &= mask;
506	rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
507	spin_unlock(&rt2x00dev->irqmask_lock);
508
509	return IRQ_HANDLED;
510}
511EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
512
513void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
514			   enum dev_state state)
515{
516	u32 reg;
517	unsigned long flags;
518
519	/*
520	 * When interrupts are being enabled, the interrupt registers
521	 * should clear the register to assure a clean state.
522	 */
523	if (state == STATE_RADIO_IRQ_ON) {
524		rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
525		rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
526	}
527
528	spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
529	reg = 0;
530	if (state == STATE_RADIO_IRQ_ON) {
531		rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
532		rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
533		rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
534		rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
535		rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
536	}
537	rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
538	spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
539
540	if (state == STATE_RADIO_IRQ_OFF) {
541		/*
542		 * Wait for possibly running tasklets to finish.
543		 */
544		tasklet_kill(&rt2x00dev->txstatus_tasklet);
545		tasklet_kill(&rt2x00dev->rxdone_tasklet);
546		tasklet_kill(&rt2x00dev->autowake_tasklet);
547		tasklet_kill(&rt2x00dev->tbtt_tasklet);
548		tasklet_kill(&rt2x00dev->pretbtt_tasklet);
549	}
550}
551EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
552
553/*
554 * Queue handlers.
555 */
556void rt2800mmio_start_queue(struct data_queue *queue)
557{
558	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
559	u32 reg;
560
561	switch (queue->qid) {
562	case QID_RX:
563		rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
564		rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
565		rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
566		break;
567	case QID_BEACON:
568		rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
569		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
570		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
571		rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
572		rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
573
574		rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
575		rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
576		rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
577		break;
578	default:
579		break;
580	}
581}
582EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
583
584void rt2800mmio_kick_queue(struct data_queue *queue)
585{
586	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
587	struct queue_entry *entry;
588
589	switch (queue->qid) {
590	case QID_AC_VO:
591	case QID_AC_VI:
592	case QID_AC_BE:
593	case QID_AC_BK:
594		entry = rt2x00queue_get_entry(queue, Q_INDEX);
595		rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
596					  entry->entry_idx);
597		break;
598	case QID_MGMT:
599		entry = rt2x00queue_get_entry(queue, Q_INDEX);
600		rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
601					  entry->entry_idx);
602		break;
603	default:
604		break;
605	}
606}
607EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
608
609void rt2800mmio_stop_queue(struct data_queue *queue)
610{
611	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
612	u32 reg;
613
614	switch (queue->qid) {
615	case QID_RX:
616		rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
617		rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
618		rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
619		break;
620	case QID_BEACON:
621		rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
622		rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
623		rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
624		rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
625		rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
626
627		rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
628		rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
629		rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
630
631		/*
632		 * Wait for current invocation to finish. The tasklet
633		 * won't be scheduled anymore afterwards since we disabled
634		 * the TBTT and PRE TBTT timer.
635		 */
636		tasklet_kill(&rt2x00dev->tbtt_tasklet);
637		tasklet_kill(&rt2x00dev->pretbtt_tasklet);
638
639		break;
640	default:
641		break;
642	}
643}
644EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
645
646void rt2800mmio_queue_init(struct data_queue *queue)
647{
648	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
649	unsigned short txwi_size, rxwi_size;
650
651	rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
652
653	switch (queue->qid) {
654	case QID_RX:
655		queue->limit = 128;
656		queue->data_size = AGGREGATION_SIZE;
657		queue->desc_size = RXD_DESC_SIZE;
658		queue->winfo_size = rxwi_size;
659		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
660		break;
661
662	case QID_AC_VO:
663	case QID_AC_VI:
664	case QID_AC_BE:
665	case QID_AC_BK:
666		queue->limit = 64;
667		queue->data_size = AGGREGATION_SIZE;
668		queue->desc_size = TXD_DESC_SIZE;
669		queue->winfo_size = txwi_size;
670		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
671		break;
672
673	case QID_BEACON:
674		queue->limit = 8;
675		queue->data_size = 0; /* No DMA required for beacons */
676		queue->desc_size = TXD_DESC_SIZE;
677		queue->winfo_size = txwi_size;
678		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
679		break;
680
681	case QID_ATIM:
682		/* fallthrough */
683	default:
684		BUG();
685		break;
686	}
687}
688EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
689
690/*
691 * Initialization functions.
692 */
693bool rt2800mmio_get_entry_state(struct queue_entry *entry)
694{
695	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
696	u32 word;
697
698	if (entry->queue->qid == QID_RX) {
699		rt2x00_desc_read(entry_priv->desc, 1, &word);
700
701		return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
702	} else {
703		rt2x00_desc_read(entry_priv->desc, 1, &word);
704
705		return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
706	}
707}
708EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
709
710void rt2800mmio_clear_entry(struct queue_entry *entry)
711{
712	struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
713	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
714	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
715	u32 word;
716
717	if (entry->queue->qid == QID_RX) {
718		rt2x00_desc_read(entry_priv->desc, 0, &word);
719		rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
720		rt2x00_desc_write(entry_priv->desc, 0, word);
721
722		rt2x00_desc_read(entry_priv->desc, 1, &word);
723		rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
724		rt2x00_desc_write(entry_priv->desc, 1, word);
725
726		/*
727		 * Set RX IDX in register to inform hardware that we have
728		 * handled this entry and it is available for reuse again.
729		 */
730		rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
731					  entry->entry_idx);
732	} else {
733		rt2x00_desc_read(entry_priv->desc, 1, &word);
734		rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
735		rt2x00_desc_write(entry_priv->desc, 1, word);
736	}
737}
738EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
739
740int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
741{
742	struct queue_entry_priv_mmio *entry_priv;
743
744	/*
745	 * Initialize registers.
746	 */
747	entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
748	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
749				  entry_priv->desc_dma);
750	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
751				  rt2x00dev->tx[0].limit);
752	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
753	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
754
755	entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
756	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
757				  entry_priv->desc_dma);
758	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
759				  rt2x00dev->tx[1].limit);
760	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
761	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
762
763	entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
764	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
765				  entry_priv->desc_dma);
766	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
767				  rt2x00dev->tx[2].limit);
768	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
769	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
770
771	entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
772	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
773				  entry_priv->desc_dma);
774	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
775				  rt2x00dev->tx[3].limit);
776	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
777	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
778
779	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
780	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
781	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
782	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
783
784	rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
785	rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
786	rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
787	rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
788
789	entry_priv = rt2x00dev->rx->entries[0].priv_data;
790	rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
791				  entry_priv->desc_dma);
792	rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
793				  rt2x00dev->rx[0].limit);
794	rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
795				  rt2x00dev->rx[0].limit - 1);
796	rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
797
798	rt2800_disable_wpdma(rt2x00dev);
799
800	rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
801
802	return 0;
803}
804EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
805
806int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
807{
808	u32 reg;
809
810	/*
811	 * Reset DMA indexes
812	 */
813	rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
814	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
815	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
816	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
817	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
818	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
819	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
820	rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
821	rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
822
823	rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
824	rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
825
826	if (rt2x00_is_pcie(rt2x00dev) &&
827	    (rt2x00_rt(rt2x00dev, RT3090) ||
828	     rt2x00_rt(rt2x00dev, RT3390) ||
829	     rt2x00_rt(rt2x00dev, RT3572) ||
830	     rt2x00_rt(rt2x00dev, RT3593) ||
831	     rt2x00_rt(rt2x00dev, RT5390) ||
832	     rt2x00_rt(rt2x00dev, RT5392) ||
833	     rt2x00_rt(rt2x00dev, RT5592))) {
834		rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
835		rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
836		rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
837		rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
838	}
839
840	rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
841
842	reg = 0;
843	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
844	rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
845	rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
846
847	rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
848
849	return 0;
850}
851EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
852
853/*
854 * Device state switch handlers.
855 */
856int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
857{
858	/* Wait for DMA, ignore error until we initialize queues. */
859	rt2800_wait_wpdma_ready(rt2x00dev);
860
861	if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
862		return -EIO;
863
864	return rt2800_enable_radio(rt2x00dev);
865}
866EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
867
868MODULE_AUTHOR(DRV_PROJECT);
869MODULE_VERSION(DRV_VERSION);
870MODULE_DESCRIPTION("rt2800 MMIO library");
871MODULE_LICENSE("GPL");