Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
  3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
  4 *
  5 * Permission to use, copy, modify, and distribute this software for any
  6 * purpose with or without fee is hereby granted, provided that the above
  7 * copyright notice and this permission notice appear in all copies.
  8 *
  9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 16 *
 17 */
 18
 19/********************************************\
 20Queue Control Unit, DFS Control Unit Functions
 21\********************************************/
 22
 
 
 23#include "ath5k.h"
 24#include "reg.h"
 25#include "debug.h"
 26#include "base.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27
 28
 29/******************\
 30* Helper functions *
 31\******************/
 32
 33/*
 34 * Get number of pending frames
 35 * for a specific queue [5211+]
 
 36 */
 37u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
 
 38{
 39	u32 pending;
 40	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
 41
 42	/* Return if queue is declared inactive */
 43	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
 44		return false;
 45
 46	/* XXX: How about AR5K_CFG_TXCNT ? */
 47	if (ah->ah_version == AR5K_AR5210)
 48		return false;
 49
 50	pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
 51	pending &= AR5K_QCU_STS_FRMPENDCNT;
 52
 53	/* It's possible to have no frames pending even if TXE
 54	 * is set. To indicate that q has not stopped return
 55	 * true */
 56	if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
 57		return true;
 58
 59	return pending;
 60}
 61
 62/*
 63 * Set a transmit queue inactive
 
 
 64 */
 65void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 
 66{
 67	if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
 68		return;
 69
 70	/* This queue will be skipped in further operations */
 71	ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
 72	/*For SIMR setup*/
 73	AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
 74}
 75
 76/*
 
 
 
 77 * Make sure cw is a power of 2 minus 1 and smaller than 1024
 78 */
 79static u16 ath5k_cw_validate(u16 cw_req)
 
 80{
 81	u32 cw = 1;
 82	cw_req = min(cw_req, (u16)1023);
 83
 84	while (cw < cw_req)
 85		cw = (cw << 1) | 1;
 
 
 
 
 
 
 
 
 
 86
 87	return cw;
 88}
 89
 90/*
 91 * Get properties for a transmit queue
 
 
 
 92 */
 93int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
 
 94		struct ath5k_txq_info *queue_info)
 95{
 96	memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
 97	return 0;
 98}
 99
100/*
101 * Set properties for a transmit queue
 
 
 
 
 
102 */
103int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
 
104				const struct ath5k_txq_info *qinfo)
105{
106	struct ath5k_txq_info *qi;
107
108	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
109
110	qi = &ah->ah_txq[queue];
111
112	if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
113		return -EIO;
114
115	/* copy and validate values */
116	qi->tqi_type = qinfo->tqi_type;
117	qi->tqi_subtype = qinfo->tqi_subtype;
118	qi->tqi_flags = qinfo->tqi_flags;
119	/*
120	 * According to the docs: Although the AIFS field is 8 bit wide,
121	 * the maximum supported value is 0xFC. Setting it higher than that
122	 * will cause the DCU to hang.
123	 */
124	qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
125	qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
126	qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
127	qi->tqi_cbr_period = qinfo->tqi_cbr_period;
128	qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
129	qi->tqi_burst_time = qinfo->tqi_burst_time;
130	qi->tqi_ready_time = qinfo->tqi_ready_time;
131
132	/*XXX: Is this supported on 5210 ?*/
133	/*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
134	if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
135		((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
136		 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
137	     qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
138		qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
139
140	return 0;
141}
142
143/*
144 * Initialize a transmit queue
 
 
 
 
 
145 */
146int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
 
147		struct ath5k_txq_info *queue_info)
148{
149	unsigned int queue;
150	int ret;
151
152	/*
153	 * Get queue by type
154	 */
155	/* 5210 only has 2 queues */
156	if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
157		switch (queue_type) {
158		case AR5K_TX_QUEUE_DATA:
159			queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
160			break;
161		case AR5K_TX_QUEUE_BEACON:
162		case AR5K_TX_QUEUE_CAB:
163			queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
164			break;
165		default:
166			return -EINVAL;
167		}
168	} else {
169		switch (queue_type) {
170		case AR5K_TX_QUEUE_DATA:
171			for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
172				ah->ah_txq[queue].tqi_type !=
173				AR5K_TX_QUEUE_INACTIVE; queue++) {
174
175				if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
176					return -EINVAL;
177			}
178			break;
179		case AR5K_TX_QUEUE_UAPSD:
180			queue = AR5K_TX_QUEUE_ID_UAPSD;
181			break;
182		case AR5K_TX_QUEUE_BEACON:
183			queue = AR5K_TX_QUEUE_ID_BEACON;
184			break;
185		case AR5K_TX_QUEUE_CAB:
186			queue = AR5K_TX_QUEUE_ID_CAB;
187			break;
188		case AR5K_TX_QUEUE_XR_DATA:
189			if (ah->ah_version != AR5K_AR5212)
190				ATH5K_ERR(ah,
191					"XR data queues only supported in"
192					" 5212!\n");
193			queue = AR5K_TX_QUEUE_ID_XR_DATA;
194			break;
195		default:
196			return -EINVAL;
197		}
198	}
199
200	/*
201	 * Setup internal queue structure
202	 */
203	memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
204	ah->ah_txq[queue].tqi_type = queue_type;
205
206	if (queue_info != NULL) {
207		queue_info->tqi_type = queue_type;
208		ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
209		if (ret)
210			return ret;
211	}
212
213	/*
214	 * We use ah_txq_status to hold a temp value for
215	 * the Secondary interrupt mask registers on 5211+
216	 * check out ath5k_hw_reset_tx_queue
217	 */
218	AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
219
220	return queue;
221}
222
223
224/*******************************\
225* Single QCU/DCU initialization *
226\*******************************/
227
228/*
229 * Set tx retry limits on DCU
 
 
 
 
 
230 */
231void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
 
232				  unsigned int queue)
233{
234	/* Single data queue on AR5210 */
235	if (ah->ah_version == AR5K_AR5210) {
236		struct ath5k_txq_info *tq = &ah->ah_txq[queue];
237
238		if (queue > 0)
239			return;
240
241		ath5k_hw_reg_write(ah,
242			(tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
243			| AR5K_REG_SM(ah->ah_retry_long,
244				      AR5K_NODCU_RETRY_LMT_SLG_RETRY)
245			| AR5K_REG_SM(ah->ah_retry_short,
246				      AR5K_NODCU_RETRY_LMT_SSH_RETRY)
247			| AR5K_REG_SM(ah->ah_retry_long,
248				      AR5K_NODCU_RETRY_LMT_LG_RETRY)
249			| AR5K_REG_SM(ah->ah_retry_short,
250				      AR5K_NODCU_RETRY_LMT_SH_RETRY),
251			AR5K_NODCU_RETRY_LMT);
252	/* DCU on AR5211+ */
253	} else {
254		ath5k_hw_reg_write(ah,
255			AR5K_REG_SM(ah->ah_retry_long,
256				    AR5K_DCU_RETRY_LMT_RTS)
257			| AR5K_REG_SM(ah->ah_retry_long,
258				      AR5K_DCU_RETRY_LMT_STA_RTS)
259			| AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
260				      AR5K_DCU_RETRY_LMT_STA_DATA),
261			AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
262	}
263}
264
265/**
266 * ath5k_hw_reset_tx_queue - Initialize a single hw queue
 
 
267 *
268 * @ah The &struct ath5k_hw
269 * @queue The hw queue number
270 *
271 * Set DFS properties for the given transmit queue on DCU
272 * and configures all queue-specific parameters.
273 */
274int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 
275{
276	struct ath5k_txq_info *tq = &ah->ah_txq[queue];
277
278	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
279
280	tq = &ah->ah_txq[queue];
281
282	/* Skip if queue inactive or if we are on AR5210
283	 * that doesn't have QCU/DCU */
284	if ((ah->ah_version == AR5K_AR5210) ||
285	(tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
286		return 0;
287
288	/*
289	 * Set contention window (cw_min/cw_max)
290	 * and arbitrated interframe space (aifs)...
291	 */
292	ath5k_hw_reg_write(ah,
293		AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
294		AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
295		AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
296		AR5K_QUEUE_DFS_LOCAL_IFS(queue));
297
298	/*
299	 * Set tx retry limits for this queue
300	 */
301	ath5k_hw_set_tx_retry_limits(ah, queue);
302
303
304	/*
305	 * Set misc registers
306	 */
307
308	/* Enable DCU to wait for next fragment from QCU */
309	AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
310				AR5K_DCU_MISC_FRAG_WAIT);
311
312	/* On Maui and Spirit use the global seqnum on DCU */
313	if (ah->ah_mac_version < AR5K_SREV_AR5211)
314		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
315					AR5K_DCU_MISC_SEQNUM_CTL);
316
317	/* Constant bit rate period */
318	if (tq->tqi_cbr_period) {
319		ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
320					AR5K_QCU_CBRCFG_INTVAL) |
321					AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
322					AR5K_QCU_CBRCFG_ORN_THRES),
323					AR5K_QUEUE_CBRCFG(queue));
324
325		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
326					AR5K_QCU_MISC_FRSHED_CBR);
327
328		if (tq->tqi_cbr_overflow_limit)
329			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
330					AR5K_QCU_MISC_CBR_THRES_ENABLE);
331	}
332
333	/* Ready time interval */
334	if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
335		ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
336					AR5K_QCU_RDYTIMECFG_INTVAL) |
337					AR5K_QCU_RDYTIMECFG_ENABLE,
338					AR5K_QUEUE_RDYTIMECFG(queue));
339
340	if (tq->tqi_burst_time) {
341		ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
342					AR5K_DCU_CHAN_TIME_DUR) |
343					AR5K_DCU_CHAN_TIME_ENABLE,
344					AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
345
346		if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
347			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
348					AR5K_QCU_MISC_RDY_VEOL_POLICY);
349	}
350
351	/* Enable/disable Post frame backoff */
352	if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
353		ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
354					AR5K_QUEUE_DFS_MISC(queue));
355
356	/* Enable/disable fragmentation burst backoff */
357	if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
358		ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
359					AR5K_QUEUE_DFS_MISC(queue));
360
361	/*
362	 * Set registers by queue type
363	 */
364	switch (tq->tqi_type) {
365	case AR5K_TX_QUEUE_BEACON:
366		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
367				AR5K_QCU_MISC_FRSHED_DBA_GT |
368				AR5K_QCU_MISC_CBREXP_BCN_DIS |
369				AR5K_QCU_MISC_BCN_ENABLE);
370
371		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
372				(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
373				AR5K_DCU_MISC_ARBLOCK_CTL_S) |
374				AR5K_DCU_MISC_ARBLOCK_IGNORE |
375				AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
376				AR5K_DCU_MISC_BCN_ENABLE);
377		break;
378
379	case AR5K_TX_QUEUE_CAB:
380		/* XXX: use BCN_SENT_GT, if we can figure out how */
381		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
382					AR5K_QCU_MISC_FRSHED_DBA_GT |
383					AR5K_QCU_MISC_CBREXP_DIS |
384					AR5K_QCU_MISC_CBREXP_BCN_DIS);
385
386		ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
387					(AR5K_TUNE_SW_BEACON_RESP -
388					AR5K_TUNE_DMA_BEACON_RESP) -
389				AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
390					AR5K_QCU_RDYTIMECFG_ENABLE,
391					AR5K_QUEUE_RDYTIMECFG(queue));
392
393		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
394					(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
395					AR5K_DCU_MISC_ARBLOCK_CTL_S));
396		break;
397
398	case AR5K_TX_QUEUE_UAPSD:
399		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
400					AR5K_QCU_MISC_CBREXP_DIS);
401		break;
402
403	case AR5K_TX_QUEUE_DATA:
404	default:
405			break;
406	}
407
408	/* TODO: Handle frame compression */
409
410	/*
411	 * Enable interrupts for this tx queue
412	 * in the secondary interrupt mask registers
413	 */
414	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
415		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
416
417	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
418		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
419
420	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
421		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
422
423	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
424		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
425
426	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
427		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
428
429	if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
430		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
431
432	if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
433		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
434
435	if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
436		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
437
438	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
439		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
440
441	/* Update secondary interrupt mask registers */
442
443	/* Filter out inactive queues */
444	ah->ah_txq_imr_txok &= ah->ah_txq_status;
445	ah->ah_txq_imr_txerr &= ah->ah_txq_status;
446	ah->ah_txq_imr_txurn &= ah->ah_txq_status;
447	ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
448	ah->ah_txq_imr_txeol &= ah->ah_txq_status;
449	ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
450	ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
451	ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
452	ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
453
454	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
455					AR5K_SIMR0_QCU_TXOK) |
456					AR5K_REG_SM(ah->ah_txq_imr_txdesc,
457					AR5K_SIMR0_QCU_TXDESC),
458					AR5K_SIMR0);
459
460	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
461					AR5K_SIMR1_QCU_TXERR) |
462					AR5K_REG_SM(ah->ah_txq_imr_txeol,
463					AR5K_SIMR1_QCU_TXEOL),
464					AR5K_SIMR1);
465
466	/* Update SIMR2 but don't overwrite rest simr2 settings */
467	AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
468	AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
469				AR5K_REG_SM(ah->ah_txq_imr_txurn,
470				AR5K_SIMR2_QCU_TXURN));
471
472	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
473				AR5K_SIMR3_QCBRORN) |
474				AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
475				AR5K_SIMR3_QCBRURN),
476				AR5K_SIMR3);
477
478	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
479				AR5K_SIMR4_QTRIG), AR5K_SIMR4);
480
481	/* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
482	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
483				AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
484
485	/* No queue has TXNOFRM enabled, disable the interrupt
486	 * by setting AR5K_TXNOFRM to zero */
487	if (ah->ah_txq_imr_nofrm == 0)
488		ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
489
490	/* Set QCU mask for this DCU to save power */
491	AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
492
493	return 0;
494}
495
496
497/**************************\
498* Global QCU/DCU functions *
499\**************************/
500
501/**
502 * ath5k_hw_set_ifs_intervals  - Set global inter-frame spaces on DCU
503 *
504 * @ah The &struct ath5k_hw
505 * @slot_time Slot time in us
506 *
507 * Sets the global IFS intervals on DCU (also works on AR5210) for
508 * the given slot time and the current bwmode.
509 */
510int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
511{
512	struct ieee80211_channel *channel = ah->ah_current_channel;
 
513	struct ieee80211_rate *rate;
514	u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
515	u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
516
517	if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
518		return -EINVAL;
519
520	sifs = ath5k_hw_get_default_sifs(ah);
521	sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
522
523	/* EIFS
524	 * Txtime of ack at lowest rate + SIFS + DIFS
525	 * (DIFS = SIFS + 2 * Slot time)
526	 *
527	 * Note: HAL has some predefined values for EIFS
528	 * Turbo:   (37 + 2 * 6)
529	 * Default: (74 + 2 * 9)
530	 * Half:    (149 + 2 * 13)
531	 * Quarter: (298 + 2 * 21)
532	 *
533	 * (74 + 2 * 6) for AR5210 default and turbo !
534	 *
535	 * According to the formula we have
536	 * ack_tx_time = 25 for turbo and
537	 * ack_tx_time = 42.5 * clock multiplier
538	 * for default/half/quarter.
539	 *
540	 * This can't be right, 42 is what we would get
541	 * from ath5k_hw_get_frame_dur_for_bwmode or
542	 * ieee80211_generic_frame_duration for zero frame
543	 * length and without SIFS !
544	 *
545	 * Also we have different lowest rate for 802.11a
546	 */
547	if (channel->hw_value & CHANNEL_5GHZ)
548		rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
549	else
550		rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
551
552	ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
 
553
554	/* ack_tx_time includes an SIFS already */
555	eifs = ack_tx_time + sifs + 2 * slot_time;
556	eifs_clock = ath5k_hw_htoclock(ah, eifs);
557
558	/* Set IFS settings on AR5210 */
559	if (ah->ah_version == AR5K_AR5210) {
560		u32 pifs, pifs_clock, difs, difs_clock;
561
562		/* Set slot time */
563		ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
564
565		/* Set EIFS */
566		eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
567
568		/* PIFS = Slot time + SIFS */
569		pifs = slot_time + sifs;
570		pifs_clock = ath5k_hw_htoclock(ah, pifs);
571		pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
572
573		/* DIFS = SIFS + 2 * Slot time */
574		difs = sifs + 2 * slot_time;
575		difs_clock = ath5k_hw_htoclock(ah, difs);
576
577		/* Set SIFS/DIFS */
578		ath5k_hw_reg_write(ah, (difs_clock <<
579				AR5K_IFS0_DIFS_S) | sifs_clock,
580				AR5K_IFS0);
581
582		/* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
583		ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
584				(AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
585				AR5K_IFS1);
586
587		return 0;
588	}
589
590	/* Set IFS slot time */
591	ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
592
593	/* Set EIFS interval */
594	ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
595
596	/* Set SIFS interval in usecs */
597	AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
598				AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
599				sifs);
600
601	/* Set SIFS interval in clock cycles */
602	ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
603
604	return 0;
605}
606
607
608int ath5k_hw_init_queues(struct ath5k_hw *ah)
 
 
 
 
 
 
 
 
609{
610	int i, ret;
611
612	/* TODO: HW Compression support for data queues */
613	/* TODO: Burst prefetch for data queues */
614
615	/*
616	 * Reset queues and start beacon timers at the end of the reset routine
617	 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
618	 * Note: If we want we can assign multiple qcus on one dcu.
619	 */
620	if (ah->ah_version != AR5K_AR5210)
621		for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
622			ret = ath5k_hw_reset_tx_queue(ah, i);
623			if (ret) {
624				ATH5K_ERR(ah,
625					"failed to reset TX queue #%d\n", i);
626				return ret;
627			}
628		}
629	else
630		/* No QCU/DCU on AR5210, just set tx
631		 * retry limits. We set IFS parameters
632		 * on ath5k_hw_set_ifs_intervals */
633		ath5k_hw_set_tx_retry_limits(ah, 0);
634
635	/* Set the turbo flag when operating on 40MHz */
636	if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
637		AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
638				AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
639
640	/* If we didn't set IFS timings through
641	 * ath5k_hw_set_coverage_class make sure
642	 * we set them here */
643	if (!ah->ah_coverage_class) {
644		unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
645		ath5k_hw_set_ifs_intervals(ah, slot_time);
646	}
647
648	return 0;
649}
v3.5.6
  1/*
  2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
  3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
  4 *
  5 * Permission to use, copy, modify, and distribute this software for any
  6 * purpose with or without fee is hereby granted, provided that the above
  7 * copyright notice and this permission notice appear in all copies.
  8 *
  9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 16 *
 17 */
 18
 19/********************************************\
 20Queue Control Unit, DCF Control Unit Functions
 21\********************************************/
 22
 23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 24
 25#include "ath5k.h"
 26#include "reg.h"
 27#include "debug.h"
 28#include <linux/log2.h>
 29
 30/**
 31 * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
 32 *
 33 * Here we setup parameters for the 12 available TX queues. Note that
 34 * on the various registers we can usually only map the first 10 of them so
 35 * basically we have 10 queues to play with. Each queue has a matching
 36 * QCU that controls when the queue will get triggered and multiple QCUs
 37 * can be mapped to a single DCU that controls the various DFS parameters
 38 * for the various queues. In our setup we have a 1:1 mapping between QCUs
 39 * and DCUs allowing us to have different DFS settings for each queue.
 40 *
 41 * When a frame goes into a TX queue, QCU decides when it'll trigger a
 42 * transmission based on various criteria (such as how many data we have inside
 43 * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
 44 * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
 45 * (arbitrator) decides the priority of each QCU based on it's configuration
 46 * (e.g. beacons are always transmitted when they leave DCU bypassing all other
 47 * frames from other queues waiting to be transmitted). After a frame leaves
 48 * the DCU it goes to PCU for further processing and then to PHY for
 49 * the actual transmission.
 50 */
 51
 52
 53/******************\
 54* Helper functions *
 55\******************/
 56
 57/**
 58 * ath5k_hw_num_tx_pending() - Get number of pending frames for a  given queue
 59 * @ah: The &struct ath5k_hw
 60 * @queue: One of enum ath5k_tx_queue_id
 61 */
 62u32
 63ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
 64{
 65	u32 pending;
 66	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
 67
 68	/* Return if queue is declared inactive */
 69	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
 70		return false;
 71
 72	/* XXX: How about AR5K_CFG_TXCNT ? */
 73	if (ah->ah_version == AR5K_AR5210)
 74		return false;
 75
 76	pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
 77	pending &= AR5K_QCU_STS_FRMPENDCNT;
 78
 79	/* It's possible to have no frames pending even if TXE
 80	 * is set. To indicate that q has not stopped return
 81	 * true */
 82	if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
 83		return true;
 84
 85	return pending;
 86}
 87
 88/**
 89 * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
 90 * @ah: The &struct ath5k_hw
 91 * @queue: One of enum ath5k_tx_queue_id
 92 */
 93void
 94ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 95{
 96	if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
 97		return;
 98
 99	/* This queue will be skipped in further operations */
100	ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
101	/*For SIMR setup*/
102	AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
103}
104
105/**
106 * ath5k_cw_validate() - Make sure the given cw is valid
107 * @cw_req: The contention window value to check
108 *
109 * Make sure cw is a power of 2 minus 1 and smaller than 1024
110 */
111static u16
112ath5k_cw_validate(u16 cw_req)
113{
 
114	cw_req = min(cw_req, (u16)1023);
115
116	/* Check if cw_req + 1 a power of 2 */
117	if (is_power_of_2(cw_req + 1))
118		return cw_req;
119
120	/* Check if cw_req is a power of 2 */
121	if (is_power_of_2(cw_req))
122		return cw_req - 1;
123
124	/* If none of the above is correct
125	 * find the closest power of 2 */
126	cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
127
128	return cw_req;
129}
130
131/**
132 * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
133 * @ah: The &struct ath5k_hw
134 * @queue: One of enum ath5k_tx_queue_id
135 * @queue_info: The &struct ath5k_txq_info to fill
136 */
137int
138ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
139		struct ath5k_txq_info *queue_info)
140{
141	memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
142	return 0;
143}
144
145/**
146 * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
147 * @ah: The &struct ath5k_hw
148 * @queue: One of enum ath5k_tx_queue_id
149 * @qinfo: The &struct ath5k_txq_info to use
150 *
151 * Returns 0 on success or -EIO if queue is inactive
152 */
153int
154ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
155				const struct ath5k_txq_info *qinfo)
156{
157	struct ath5k_txq_info *qi;
158
159	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
160
161	qi = &ah->ah_txq[queue];
162
163	if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
164		return -EIO;
165
166	/* copy and validate values */
167	qi->tqi_type = qinfo->tqi_type;
168	qi->tqi_subtype = qinfo->tqi_subtype;
169	qi->tqi_flags = qinfo->tqi_flags;
170	/*
171	 * According to the docs: Although the AIFS field is 8 bit wide,
172	 * the maximum supported value is 0xFC. Setting it higher than that
173	 * will cause the DCU to hang.
174	 */
175	qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
176	qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
177	qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
178	qi->tqi_cbr_period = qinfo->tqi_cbr_period;
179	qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
180	qi->tqi_burst_time = qinfo->tqi_burst_time;
181	qi->tqi_ready_time = qinfo->tqi_ready_time;
182
183	/*XXX: Is this supported on 5210 ?*/
184	/*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
185	if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
186		((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
187		 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
188	     qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
189		qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
190
191	return 0;
192}
193
194/**
195 * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
196 * @ah: The &struct ath5k_hw
197 * @queue_type: One of enum ath5k_tx_queue
198 * @queue_info: The &struct ath5k_txq_info to use
199 *
200 * Returns 0 on success, -EINVAL on invalid arguments
201 */
202int
203ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
204		struct ath5k_txq_info *queue_info)
205{
206	unsigned int queue;
207	int ret;
208
209	/*
210	 * Get queue by type
211	 */
212	/* 5210 only has 2 queues */
213	if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
214		switch (queue_type) {
215		case AR5K_TX_QUEUE_DATA:
216			queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
217			break;
218		case AR5K_TX_QUEUE_BEACON:
219		case AR5K_TX_QUEUE_CAB:
220			queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
221			break;
222		default:
223			return -EINVAL;
224		}
225	} else {
226		switch (queue_type) {
227		case AR5K_TX_QUEUE_DATA:
228			for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
229				ah->ah_txq[queue].tqi_type !=
230				AR5K_TX_QUEUE_INACTIVE; queue++) {
231
232				if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
233					return -EINVAL;
234			}
235			break;
236		case AR5K_TX_QUEUE_UAPSD:
237			queue = AR5K_TX_QUEUE_ID_UAPSD;
238			break;
239		case AR5K_TX_QUEUE_BEACON:
240			queue = AR5K_TX_QUEUE_ID_BEACON;
241			break;
242		case AR5K_TX_QUEUE_CAB:
243			queue = AR5K_TX_QUEUE_ID_CAB;
244			break;
 
 
 
 
 
 
 
245		default:
246			return -EINVAL;
247		}
248	}
249
250	/*
251	 * Setup internal queue structure
252	 */
253	memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
254	ah->ah_txq[queue].tqi_type = queue_type;
255
256	if (queue_info != NULL) {
257		queue_info->tqi_type = queue_type;
258		ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
259		if (ret)
260			return ret;
261	}
262
263	/*
264	 * We use ah_txq_status to hold a temp value for
265	 * the Secondary interrupt mask registers on 5211+
266	 * check out ath5k_hw_reset_tx_queue
267	 */
268	AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
269
270	return queue;
271}
272
273
274/*******************************\
275* Single QCU/DCU initialization *
276\*******************************/
277
278/**
279 * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
280 * @ah: The &struct ath5k_hw
281 * @queue: One of enum ath5k_tx_queue_id
282 *
283 * This function is used when initializing a queue, to set
284 * retry limits based on ah->ah_retry_* and the chipset used.
285 */
286void
287ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
288				  unsigned int queue)
289{
290	/* Single data queue on AR5210 */
291	if (ah->ah_version == AR5K_AR5210) {
292		struct ath5k_txq_info *tq = &ah->ah_txq[queue];
293
294		if (queue > 0)
295			return;
296
297		ath5k_hw_reg_write(ah,
298			(tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
299			| AR5K_REG_SM(ah->ah_retry_long,
300				      AR5K_NODCU_RETRY_LMT_SLG_RETRY)
301			| AR5K_REG_SM(ah->ah_retry_short,
302				      AR5K_NODCU_RETRY_LMT_SSH_RETRY)
303			| AR5K_REG_SM(ah->ah_retry_long,
304				      AR5K_NODCU_RETRY_LMT_LG_RETRY)
305			| AR5K_REG_SM(ah->ah_retry_short,
306				      AR5K_NODCU_RETRY_LMT_SH_RETRY),
307			AR5K_NODCU_RETRY_LMT);
308	/* DCU on AR5211+ */
309	} else {
310		ath5k_hw_reg_write(ah,
311			AR5K_REG_SM(ah->ah_retry_long,
312				    AR5K_DCU_RETRY_LMT_RTS)
313			| AR5K_REG_SM(ah->ah_retry_long,
314				      AR5K_DCU_RETRY_LMT_STA_RTS)
315			| AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
316				      AR5K_DCU_RETRY_LMT_STA_DATA),
317			AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
318	}
319}
320
321/**
322 * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
323 * @ah: The &struct ath5k_hw
324 * @queue: One of enum ath5k_tx_queue_id
325 *
326 * Set DCF properties for the given transmit queue on DCU
 
 
 
327 * and configures all queue-specific parameters.
328 */
329int
330ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
331{
332	struct ath5k_txq_info *tq = &ah->ah_txq[queue];
333
334	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
335
336	tq = &ah->ah_txq[queue];
337
338	/* Skip if queue inactive or if we are on AR5210
339	 * that doesn't have QCU/DCU */
340	if ((ah->ah_version == AR5K_AR5210) ||
341	(tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
342		return 0;
343
344	/*
345	 * Set contention window (cw_min/cw_max)
346	 * and arbitrated interframe space (aifs)...
347	 */
348	ath5k_hw_reg_write(ah,
349		AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
350		AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
351		AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
352		AR5K_QUEUE_DFS_LOCAL_IFS(queue));
353
354	/*
355	 * Set tx retry limits for this queue
356	 */
357	ath5k_hw_set_tx_retry_limits(ah, queue);
358
359
360	/*
361	 * Set misc registers
362	 */
363
364	/* Enable DCU to wait for next fragment from QCU */
365	AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
366				AR5K_DCU_MISC_FRAG_WAIT);
367
368	/* On Maui and Spirit use the global seqnum on DCU */
369	if (ah->ah_mac_version < AR5K_SREV_AR5211)
370		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
371					AR5K_DCU_MISC_SEQNUM_CTL);
372
373	/* Constant bit rate period */
374	if (tq->tqi_cbr_period) {
375		ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
376					AR5K_QCU_CBRCFG_INTVAL) |
377					AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
378					AR5K_QCU_CBRCFG_ORN_THRES),
379					AR5K_QUEUE_CBRCFG(queue));
380
381		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
382					AR5K_QCU_MISC_FRSHED_CBR);
383
384		if (tq->tqi_cbr_overflow_limit)
385			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
386					AR5K_QCU_MISC_CBR_THRES_ENABLE);
387	}
388
389	/* Ready time interval */
390	if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
391		ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
392					AR5K_QCU_RDYTIMECFG_INTVAL) |
393					AR5K_QCU_RDYTIMECFG_ENABLE,
394					AR5K_QUEUE_RDYTIMECFG(queue));
395
396	if (tq->tqi_burst_time) {
397		ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
398					AR5K_DCU_CHAN_TIME_DUR) |
399					AR5K_DCU_CHAN_TIME_ENABLE,
400					AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
401
402		if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
403			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
404					AR5K_QCU_MISC_RDY_VEOL_POLICY);
405	}
406
407	/* Enable/disable Post frame backoff */
408	if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
409		ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
410					AR5K_QUEUE_DFS_MISC(queue));
411
412	/* Enable/disable fragmentation burst backoff */
413	if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
414		ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
415					AR5K_QUEUE_DFS_MISC(queue));
416
417	/*
418	 * Set registers by queue type
419	 */
420	switch (tq->tqi_type) {
421	case AR5K_TX_QUEUE_BEACON:
422		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
423				AR5K_QCU_MISC_FRSHED_DBA_GT |
424				AR5K_QCU_MISC_CBREXP_BCN_DIS |
425				AR5K_QCU_MISC_BCN_ENABLE);
426
427		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
428				(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
429				AR5K_DCU_MISC_ARBLOCK_CTL_S) |
430				AR5K_DCU_MISC_ARBLOCK_IGNORE |
431				AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
432				AR5K_DCU_MISC_BCN_ENABLE);
433		break;
434
435	case AR5K_TX_QUEUE_CAB:
436		/* XXX: use BCN_SENT_GT, if we can figure out how */
437		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
438					AR5K_QCU_MISC_FRSHED_DBA_GT |
439					AR5K_QCU_MISC_CBREXP_DIS |
440					AR5K_QCU_MISC_CBREXP_BCN_DIS);
441
442		ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
443					(AR5K_TUNE_SW_BEACON_RESP -
444					AR5K_TUNE_DMA_BEACON_RESP) -
445				AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
446					AR5K_QCU_RDYTIMECFG_ENABLE,
447					AR5K_QUEUE_RDYTIMECFG(queue));
448
449		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
450					(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
451					AR5K_DCU_MISC_ARBLOCK_CTL_S));
452		break;
453
454	case AR5K_TX_QUEUE_UAPSD:
455		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
456					AR5K_QCU_MISC_CBREXP_DIS);
457		break;
458
459	case AR5K_TX_QUEUE_DATA:
460	default:
461			break;
462	}
463
464	/* TODO: Handle frame compression */
465
466	/*
467	 * Enable interrupts for this tx queue
468	 * in the secondary interrupt mask registers
469	 */
470	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
471		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
472
473	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
474		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
475
476	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
477		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
478
479	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
480		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
481
482	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
483		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
484
485	if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
486		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
487
488	if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
489		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
490
491	if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
492		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
493
494	if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
495		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
496
497	/* Update secondary interrupt mask registers */
498
499	/* Filter out inactive queues */
500	ah->ah_txq_imr_txok &= ah->ah_txq_status;
501	ah->ah_txq_imr_txerr &= ah->ah_txq_status;
502	ah->ah_txq_imr_txurn &= ah->ah_txq_status;
503	ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
504	ah->ah_txq_imr_txeol &= ah->ah_txq_status;
505	ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
506	ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
507	ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
508	ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
509
510	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
511					AR5K_SIMR0_QCU_TXOK) |
512					AR5K_REG_SM(ah->ah_txq_imr_txdesc,
513					AR5K_SIMR0_QCU_TXDESC),
514					AR5K_SIMR0);
515
516	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
517					AR5K_SIMR1_QCU_TXERR) |
518					AR5K_REG_SM(ah->ah_txq_imr_txeol,
519					AR5K_SIMR1_QCU_TXEOL),
520					AR5K_SIMR1);
521
522	/* Update SIMR2 but don't overwrite rest simr2 settings */
523	AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
524	AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
525				AR5K_REG_SM(ah->ah_txq_imr_txurn,
526				AR5K_SIMR2_QCU_TXURN));
527
528	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
529				AR5K_SIMR3_QCBRORN) |
530				AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
531				AR5K_SIMR3_QCBRURN),
532				AR5K_SIMR3);
533
534	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
535				AR5K_SIMR4_QTRIG), AR5K_SIMR4);
536
537	/* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
538	ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
539				AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
540
541	/* No queue has TXNOFRM enabled, disable the interrupt
542	 * by setting AR5K_TXNOFRM to zero */
543	if (ah->ah_txq_imr_nofrm == 0)
544		ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
545
546	/* Set QCU mask for this DCU to save power */
547	AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
548
549	return 0;
550}
551
552
553/**************************\
554* Global QCU/DCU functions *
555\**************************/
556
557/**
558 * ath5k_hw_set_ifs_intervals()  - Set global inter-frame spaces on DCU
559 * @ah: The &struct ath5k_hw
560 * @slot_time: Slot time in us
 
561 *
562 * Sets the global IFS intervals on DCU (also works on AR5210) for
563 * the given slot time and the current bwmode.
564 */
565int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
566{
567	struct ieee80211_channel *channel = ah->ah_current_channel;
568	enum ieee80211_band band;
569	struct ieee80211_rate *rate;
570	u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
571	u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
572
573	if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
574		return -EINVAL;
575
576	sifs = ath5k_hw_get_default_sifs(ah);
577	sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
578
579	/* EIFS
580	 * Txtime of ack at lowest rate + SIFS + DIFS
581	 * (DIFS = SIFS + 2 * Slot time)
582	 *
583	 * Note: HAL has some predefined values for EIFS
584	 * Turbo:   (37 + 2 * 6)
585	 * Default: (74 + 2 * 9)
586	 * Half:    (149 + 2 * 13)
587	 * Quarter: (298 + 2 * 21)
588	 *
589	 * (74 + 2 * 6) for AR5210 default and turbo !
590	 *
591	 * According to the formula we have
592	 * ack_tx_time = 25 for turbo and
593	 * ack_tx_time = 42.5 * clock multiplier
594	 * for default/half/quarter.
595	 *
596	 * This can't be right, 42 is what we would get
597	 * from ath5k_hw_get_frame_dur_for_bwmode or
598	 * ieee80211_generic_frame_duration for zero frame
599	 * length and without SIFS !
600	 *
601	 * Also we have different lowest rate for 802.11a
602	 */
603	if (channel->band == IEEE80211_BAND_5GHZ)
604		band = IEEE80211_BAND_5GHZ;
605	else
606		band = IEEE80211_BAND_2GHZ;
607
608	rate = &ah->sbands[band].bitrates[0];
609	ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
610
611	/* ack_tx_time includes an SIFS already */
612	eifs = ack_tx_time + sifs + 2 * slot_time;
613	eifs_clock = ath5k_hw_htoclock(ah, eifs);
614
615	/* Set IFS settings on AR5210 */
616	if (ah->ah_version == AR5K_AR5210) {
617		u32 pifs, pifs_clock, difs, difs_clock;
618
619		/* Set slot time */
620		ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
621
622		/* Set EIFS */
623		eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
624
625		/* PIFS = Slot time + SIFS */
626		pifs = slot_time + sifs;
627		pifs_clock = ath5k_hw_htoclock(ah, pifs);
628		pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
629
630		/* DIFS = SIFS + 2 * Slot time */
631		difs = sifs + 2 * slot_time;
632		difs_clock = ath5k_hw_htoclock(ah, difs);
633
634		/* Set SIFS/DIFS */
635		ath5k_hw_reg_write(ah, (difs_clock <<
636				AR5K_IFS0_DIFS_S) | sifs_clock,
637				AR5K_IFS0);
638
639		/* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
640		ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
641				(AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
642				AR5K_IFS1);
643
644		return 0;
645	}
646
647	/* Set IFS slot time */
648	ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
649
650	/* Set EIFS interval */
651	ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
652
653	/* Set SIFS interval in usecs */
654	AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
655				AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
656				sifs);
657
658	/* Set SIFS interval in clock cycles */
659	ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
660
661	return 0;
662}
663
664
665/**
666 * ath5k_hw_init_queues() - Initialize tx queues
667 * @ah: The &struct ath5k_hw
668 *
669 * Initializes all tx queues based on information on
670 * ah->ah_txq* set by the driver
671 */
672int
673ath5k_hw_init_queues(struct ath5k_hw *ah)
674{
675	int i, ret;
676
677	/* TODO: HW Compression support for data queues */
678	/* TODO: Burst prefetch for data queues */
679
680	/*
681	 * Reset queues and start beacon timers at the end of the reset routine
682	 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
683	 * Note: If we want we can assign multiple qcus on one dcu.
684	 */
685	if (ah->ah_version != AR5K_AR5210)
686		for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
687			ret = ath5k_hw_reset_tx_queue(ah, i);
688			if (ret) {
689				ATH5K_ERR(ah,
690					"failed to reset TX queue #%d\n", i);
691				return ret;
692			}
693		}
694	else
695		/* No QCU/DCU on AR5210, just set tx
696		 * retry limits. We set IFS parameters
697		 * on ath5k_hw_set_ifs_intervals */
698		ath5k_hw_set_tx_retry_limits(ah, 0);
699
700	/* Set the turbo flag when operating on 40MHz */
701	if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
702		AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
703				AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
704
705	/* If we didn't set IFS timings through
706	 * ath5k_hw_set_coverage_class make sure
707	 * we set them here */
708	if (!ah->ah_coverage_class) {
709		unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
710		ath5k_hw_set_ifs_intervals(ah, slot_time);
711	}
712
713	return 0;
714}