Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/******************************************************************************
   3 *
 
 
   4 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
   5 * Copyright (C) 2019 Intel Corporation
   6 * Copyright (C) 2023 Intel Corporation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 *****************************************************************************/
   8
   9#include <linux/kernel.h>
  10#include <linux/module.h>
  11#include <linux/sched.h>
  12#include <linux/ieee80211.h>
  13#include "iwl-io.h"
  14#include "iwl-trans.h"
  15#include "iwl-agn-hw.h"
  16#include "dev.h"
  17#include "agn.h"
  18
  19static const u8 tid_to_ac[] = {
  20	IEEE80211_AC_BE,
  21	IEEE80211_AC_BK,
  22	IEEE80211_AC_BK,
  23	IEEE80211_AC_BE,
  24	IEEE80211_AC_VI,
  25	IEEE80211_AC_VI,
  26	IEEE80211_AC_VO,
  27	IEEE80211_AC_VO,
  28};
  29
  30static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
  31				     struct ieee80211_tx_info *info,
  32				     __le16 fc, __le32 *tx_flags)
  33{
  34	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
  35	    info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
  36	    info->flags & IEEE80211_TX_CTL_AMPDU)
  37		*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
  38}
  39
  40/*
  41 * handle build REPLY_TX command notification.
  42 */
  43static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
  44				      struct sk_buff *skb,
  45				      struct iwl_tx_cmd *tx_cmd,
  46				      struct ieee80211_tx_info *info,
  47				      struct ieee80211_hdr *hdr, u8 sta_id)
  48{
  49	__le16 fc = hdr->frame_control;
  50	__le32 tx_flags = tx_cmd->tx_flags;
  51
  52	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  53
  54	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  55		tx_flags |= TX_CMD_FLG_ACK_MSK;
  56	else
  57		tx_flags &= ~TX_CMD_FLG_ACK_MSK;
  58
  59	if (ieee80211_is_probe_resp(fc))
  60		tx_flags |= TX_CMD_FLG_TSF_MSK;
  61	else if (ieee80211_is_back_req(fc))
  62		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  63	else if (info->band == NL80211_BAND_2GHZ &&
  64		 priv->lib->bt_params &&
  65		 priv->lib->bt_params->advanced_bt_coexist &&
  66		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
  67		 ieee80211_is_reassoc_req(fc) ||
  68		 info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
  69		tx_flags |= TX_CMD_FLG_IGNORE_BT;
  70
  71
  72	tx_cmd->sta_id = sta_id;
  73	if (ieee80211_has_morefrags(fc))
  74		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  75
  76	if (ieee80211_is_data_qos(fc)) {
  77		u8 *qc = ieee80211_get_qos_ctl(hdr);
  78		tx_cmd->tid_tspec = qc[0] & 0xf;
  79		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  80	} else {
  81		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
  82		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  83			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  84		else
  85			tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  86	}
  87
  88	iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
  89
  90	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  91	if (ieee80211_is_mgmt(fc)) {
  92		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  93			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  94		else
  95			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  96	} else {
  97		tx_cmd->timeout.pm_frame_timeout = 0;
  98	}
  99
 100	tx_cmd->driver_txop = 0;
 101	tx_cmd->tx_flags = tx_flags;
 102	tx_cmd->next_frame_len = 0;
 103}
 104
 105static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
 106				     struct iwl_tx_cmd *tx_cmd,
 107				     struct ieee80211_tx_info *info,
 108				     struct ieee80211_sta *sta,
 109				     __le16 fc)
 110{
 111	u32 rate_flags;
 112	int rate_idx;
 113	u8 rts_retry_limit;
 114	u8 data_retry_limit;
 115	u8 rate_plcp;
 116
 117	if (priv->wowlan) {
 118		rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
 119		data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
 120	} else {
 121		/* Set retry limit on RTS packets */
 122		rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
 123
 124		/* Set retry limit on DATA packets and Probe Responses*/
 125		if (ieee80211_is_probe_resp(fc)) {
 126			data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
 127			rts_retry_limit =
 128				min(data_retry_limit, rts_retry_limit);
 129		} else if (ieee80211_is_back_req(fc))
 130			data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
 131		else
 132			data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
 133	}
 134
 135	tx_cmd->data_retry_limit = data_retry_limit;
 136	tx_cmd->rts_retry_limit = rts_retry_limit;
 137
 138	/* DATA packets will use the uCode station table for rate/antenna
 139	 * selection */
 140	if (ieee80211_is_data(fc)) {
 141		tx_cmd->initial_rate_index = 0;
 142		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
 143		return;
 144	} else if (ieee80211_is_back_req(fc))
 145		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
 146
 147	/**
 148	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
 149	 * not really a TX rate.  Thus, we use the lowest supported rate for
 150	 * this band.  Also use the lowest supported rate if the stored rate
 151	 * index is invalid.
 152	 */
 153	rate_idx = info->control.rates[0].idx;
 154	if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
 155			(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
 156		rate_idx = rate_lowest_index(
 157				&priv->nvm_data->bands[info->band], sta);
 158	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
 159	if (info->band == NL80211_BAND_5GHZ)
 160		rate_idx += IWL_FIRST_OFDM_RATE;
 161	/* Get PLCP rate for tx_cmd->rate_n_flags */
 162	rate_plcp = iwl_rates[rate_idx].plcp;
 163	/* Zero out flags for this packet */
 164	rate_flags = 0;
 165
 166	/* Set CCK flag as needed */
 167	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
 168		rate_flags |= RATE_MCS_CCK_MSK;
 169
 170	/* Set up antennas */
 171	if (priv->lib->bt_params &&
 172	    priv->lib->bt_params->advanced_bt_coexist &&
 173	    priv->bt_full_concurrent) {
 174		/* operated as 1x1 in full concurrency mode */
 175		priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
 176				first_antenna(priv->nvm_data->valid_tx_ant));
 177	} else
 178		priv->mgmt_tx_ant = iwl_toggle_tx_ant(
 179					priv, priv->mgmt_tx_ant,
 180					priv->nvm_data->valid_tx_ant);
 181	rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
 182
 183	/* Set the rate in the TX cmd */
 184	tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
 185}
 186
 187static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
 188					 struct ieee80211_tx_info *info,
 189					 struct iwl_tx_cmd *tx_cmd,
 190					 struct sk_buff *skb_frag)
 191{
 192	struct ieee80211_key_conf *keyconf = info->control.hw_key;
 193
 194	switch (keyconf->cipher) {
 195	case WLAN_CIPHER_SUITE_CCMP:
 196		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
 197		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
 198		if (info->flags & IEEE80211_TX_CTL_AMPDU)
 199			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
 200		break;
 201
 202	case WLAN_CIPHER_SUITE_TKIP:
 203		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
 204		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
 205		break;
 206
 207	case WLAN_CIPHER_SUITE_WEP104:
 208		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
 209		fallthrough;
 210	case WLAN_CIPHER_SUITE_WEP40:
 211		tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
 212			(keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
 213
 214		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
 215
 216		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
 217			     "with key %d\n", keyconf->keyidx);
 218		break;
 219
 220	default:
 221		IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
 222		break;
 223	}
 224}
 225
 226/**
 227 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
 228 * @context: the current context
 229 * @sta: mac80211 station
 230 *
 231 * In certain circumstances mac80211 passes a station pointer
 232 * that may be %NULL, for example during TX or key setup. In
 233 * that case, we need to use the broadcast station, so this
 234 * inline wraps that pattern.
 235 */
 236static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
 237				   struct ieee80211_sta *sta)
 238{
 239	int sta_id;
 240
 241	if (!sta)
 242		return context->bcast_sta_id;
 243
 244	sta_id = iwl_sta_id(sta);
 245
 246	/*
 247	 * mac80211 should not be passing a partially
 248	 * initialised station!
 249	 */
 250	WARN_ON(sta_id == IWL_INVALID_STATION);
 251
 252	return sta_id;
 253}
 254
 255/*
 256 * start REPLY_TX command process
 257 */
 258int iwlagn_tx_skb(struct iwl_priv *priv,
 259		  struct ieee80211_sta *sta,
 260		  struct sk_buff *skb)
 261{
 262	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 263	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 264	struct iwl_station_priv *sta_priv = NULL;
 265	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 266	struct iwl_device_tx_cmd *dev_cmd;
 267	struct iwl_tx_cmd *tx_cmd;
 268	__le16 fc;
 269	u8 hdr_len;
 270	u16 len, seq_number = 0;
 271	u8 sta_id, tid = IWL_MAX_TID_COUNT;
 272	bool is_agg = false, is_data_qos = false;
 273	int txq_id;
 274
 275	if (info->control.vif)
 276		ctx = iwl_rxon_ctx_from_vif(info->control.vif);
 277
 278	if (iwl_is_rfkill(priv)) {
 279		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
 280		goto drop_unlock_priv;
 281	}
 282
 283	fc = hdr->frame_control;
 284
 285#ifdef CONFIG_IWLWIFI_DEBUG
 286	if (ieee80211_is_auth(fc))
 287		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
 288	else if (ieee80211_is_assoc_req(fc))
 289		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
 290	else if (ieee80211_is_reassoc_req(fc))
 291		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
 292#endif
 293
 294	if (unlikely(ieee80211_is_probe_resp(fc))) {
 295		struct iwl_wipan_noa_data *noa_data =
 296			rcu_dereference(priv->noa_data);
 297
 298		if (noa_data &&
 299		    pskb_expand_head(skb, 0, noa_data->length,
 300				     GFP_ATOMIC) == 0) {
 301			skb_put_data(skb, noa_data->data, noa_data->length);
 
 302			hdr = (struct ieee80211_hdr *)skb->data;
 303		}
 304	}
 305
 306	hdr_len = ieee80211_hdrlen(fc);
 307
 308	/* For management frames use broadcast id to do not break aggregation */
 309	if (!ieee80211_is_data(fc))
 310		sta_id = ctx->bcast_sta_id;
 311	else {
 312		/* Find index into station table for destination station */
 313		sta_id = iwl_sta_id_or_broadcast(ctx, sta);
 314		if (sta_id == IWL_INVALID_STATION) {
 315			IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
 316				       hdr->addr1);
 317			goto drop_unlock_priv;
 318		}
 319	}
 320
 321	if (sta)
 322		sta_priv = (void *)sta->drv_priv;
 323
 324	if (sta_priv && sta_priv->asleep &&
 325	    (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
 326		/*
 327		 * This sends an asynchronous command to the device,
 328		 * but we can rely on it being processed before the
 329		 * next frame is processed -- and the next frame to
 330		 * this station is the one that will consume this
 331		 * counter.
 332		 * For now set the counter to just 1 since we do not
 333		 * support uAPSD yet.
 334		 *
 335		 * FIXME: If we get two non-bufferable frames one
 336		 * after the other, we might only send out one of
 337		 * them because this is racy.
 338		 */
 339		iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
 340	}
 341
 342	dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
 343
 344	if (unlikely(!dev_cmd))
 345		goto drop_unlock_priv;
 346
 
 347	dev_cmd->hdr.cmd = REPLY_TX;
 348	tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
 349
 350	/* Total # bytes to be transmitted */
 351	len = (u16)skb->len;
 352	tx_cmd->len = cpu_to_le16(len);
 353
 354	if (info->control.hw_key)
 355		iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
 356
 357	/* TODO need this for burst mode later on */
 358	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
 359
 360	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
 361
 362	memset(&info->status, 0, sizeof(info->status));
 363	memset(info->driver_data, 0, sizeof(info->driver_data));
 364
 365	info->driver_data[0] = ctx;
 366	info->driver_data[1] = dev_cmd;
 367	/* From now on, we cannot access info->control */
 368
 369	spin_lock(&priv->sta_lock);
 370
 371	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
 372		u8 *qc = NULL;
 373		struct iwl_tid_data *tid_data;
 374		qc = ieee80211_get_qos_ctl(hdr);
 375		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
 376		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
 377			goto drop_unlock_sta;
 378		tid_data = &priv->tid_data[sta_id][tid];
 379
 380		/* aggregation is on for this <sta,tid> */
 381		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
 382		    tid_data->agg.state != IWL_AGG_ON) {
 383			IWL_ERR(priv,
 384				"TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
 385				info->flags, tid_data->agg.state);
 386			IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
 387				sta_id, tid,
 388				IEEE80211_SEQ_TO_SN(tid_data->seq_number));
 389			goto drop_unlock_sta;
 390		}
 391
 392		/* We can receive packets from the stack in IWL_AGG_{ON,OFF}
 393		 * only. Check this here.
 394		 */
 395		if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
 396			      tid_data->agg.state != IWL_AGG_OFF,
 397			      "Tx while agg.state = %d\n", tid_data->agg.state))
 398			goto drop_unlock_sta;
 399
 400		seq_number = tid_data->seq_number;
 401		seq_number &= IEEE80211_SCTL_SEQ;
 402		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 403		hdr->seq_ctrl |= cpu_to_le16(seq_number);
 404		seq_number += 0x10;
 405
 406		if (info->flags & IEEE80211_TX_CTL_AMPDU)
 407			is_agg = true;
 408		is_data_qos = true;
 409	}
 410
 411	/* Copy MAC header from skb into command buffer */
 412	memcpy(tx_cmd->hdr, hdr, hdr_len);
 413
 414	txq_id = info->hw_queue;
 415
 416	if (is_agg)
 417		txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
 418	else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
 419		/*
 420		 * The microcode will clear the more data
 421		 * bit in the last frame it transmits.
 422		 */
 423		hdr->frame_control |=
 424			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
 425	}
 426
 427	WARN_ON_ONCE(is_agg &&
 428		     priv->queue_to_mac80211[txq_id] != info->hw_queue);
 429
 430	IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
 431		     txq_id, seq_number);
 432
 433	if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
 434		goto drop_unlock_sta;
 435
 436	if (is_data_qos && !ieee80211_has_morefrags(fc))
 437		priv->tid_data[sta_id][tid].seq_number = seq_number;
 438
 439	spin_unlock(&priv->sta_lock);
 440
 441	/*
 442	 * Avoid atomic ops if it isn't an associated client.
 443	 * Also, if this is a packet for aggregation, don't
 444	 * increase the counter because the ucode will stop
 445	 * aggregation queues when their respective station
 446	 * goes to sleep.
 447	 */
 448	if (sta_priv && sta_priv->client && !is_agg)
 449		atomic_inc(&sta_priv->pending_frames);
 450
 451	return 0;
 452
 453drop_unlock_sta:
 454	if (dev_cmd)
 455		iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
 456	spin_unlock(&priv->sta_lock);
 457drop_unlock_priv:
 458	return -1;
 459}
 460
 461static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
 462{
 463	int q;
 464
 465	for (q = IWLAGN_FIRST_AMPDU_QUEUE;
 466	     q < priv->trans->trans_cfg->base_params->num_of_queues; q++) {
 467		if (!test_and_set_bit(q, priv->agg_q_alloc)) {
 468			priv->queue_to_mac80211[q] = mq;
 469			return q;
 470		}
 471	}
 472
 473	return -ENOSPC;
 474}
 475
 476static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
 477{
 478	clear_bit(q, priv->agg_q_alloc);
 479	priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
 480}
 481
 482int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
 483			struct ieee80211_sta *sta, u16 tid)
 484{
 485	struct iwl_tid_data *tid_data;
 486	int sta_id, txq_id;
 487	enum iwl_agg_state agg_state;
 488
 489	sta_id = iwl_sta_id(sta);
 490
 491	if (sta_id == IWL_INVALID_STATION) {
 492		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
 493		return -ENXIO;
 494	}
 495
 496	spin_lock_bh(&priv->sta_lock);
 497
 498	tid_data = &priv->tid_data[sta_id][tid];
 499	txq_id = tid_data->agg.txq_id;
 500
 501	switch (tid_data->agg.state) {
 502	case IWL_EMPTYING_HW_QUEUE_ADDBA:
 503		/*
 504		* This can happen if the peer stops aggregation
 505		* again before we've had a chance to drain the
 506		* queue we selected previously, i.e. before the
 507		* session was really started completely.
 508		*/
 509		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
 510		goto turn_off;
 511	case IWL_AGG_STARTING:
 512		/*
 513		 * This can happen when the session is stopped before
 514		 * we receive ADDBA response
 515		 */
 516		IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
 517		goto turn_off;
 518	case IWL_AGG_ON:
 519		break;
 520	default:
 521		IWL_WARN(priv,
 522			 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
 523			 sta_id, tid, tid_data->agg.state);
 524		spin_unlock_bh(&priv->sta_lock);
 525		return 0;
 526	}
 527
 528	tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 529
 530	/* There are still packets for this RA / TID in the HW */
 531	if (!test_bit(txq_id, priv->agg_q_alloc)) {
 532		IWL_DEBUG_TX_QUEUES(priv,
 533			"stopping AGG on STA/TID %d/%d but hwq %d not used\n",
 534			sta_id, tid, txq_id);
 535	} else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
 536		IWL_DEBUG_TX_QUEUES(priv,
 537				    "Can't proceed: ssn %d, next_recl = %d\n",
 538				    tid_data->agg.ssn,
 539				    tid_data->next_reclaimed);
 540		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
 541		spin_unlock_bh(&priv->sta_lock);
 542		return 0;
 543	}
 544
 545	IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
 546			    tid_data->agg.ssn);
 547turn_off:
 548	agg_state = tid_data->agg.state;
 549	tid_data->agg.state = IWL_AGG_OFF;
 550
 551	spin_unlock_bh(&priv->sta_lock);
 552
 553	if (test_bit(txq_id, priv->agg_q_alloc)) {
 554		/*
 555		 * If the transport didn't know that we wanted to start
 556		 * agreggation, don't tell it that we want to stop them.
 557		 * This can happen when we don't get the addBA response on
 558		 * time, or we hadn't time to drain the AC queues.
 559		 */
 560		if (agg_state == IWL_AGG_ON)
 561			iwl_trans_txq_disable(priv->trans, txq_id, true);
 562		else
 563			IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
 564					    agg_state);
 565		iwlagn_dealloc_agg_txq(priv, txq_id);
 566	}
 567
 568	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 569
 570	return 0;
 571}
 572
 573int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
 574			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 575{
 576	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 577	struct iwl_tid_data *tid_data;
 578	int sta_id, txq_id, ret;
 579
 580	IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
 581		     sta->addr, tid);
 582
 583	sta_id = iwl_sta_id(sta);
 584	if (sta_id == IWL_INVALID_STATION) {
 585		IWL_ERR(priv, "Start AGG on invalid station\n");
 586		return -ENXIO;
 587	}
 588	if (unlikely(tid >= IWL_MAX_TID_COUNT))
 589		return -EINVAL;
 590
 591	if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
 592		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
 593		return -ENXIO;
 594	}
 595
 596	txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
 597	if (txq_id < 0) {
 598		IWL_DEBUG_TX_QUEUES(priv,
 599			"No free aggregation queue for %pM/%d\n",
 600			sta->addr, tid);
 601		return txq_id;
 602	}
 603
 604	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
 605	if (ret)
 606		return ret;
 607
 608	spin_lock_bh(&priv->sta_lock);
 609	tid_data = &priv->tid_data[sta_id][tid];
 610	tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 611	tid_data->agg.txq_id = txq_id;
 612
 613	*ssn = tid_data->agg.ssn;
 614
 615	if (*ssn == tid_data->next_reclaimed) {
 616		IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
 617				    tid_data->agg.ssn);
 618		tid_data->agg.state = IWL_AGG_STARTING;
 619		ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
 620	} else {
 621		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
 622				    "next_reclaimed = %d\n",
 623				    tid_data->agg.ssn,
 624				    tid_data->next_reclaimed);
 625		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
 626	}
 627	spin_unlock_bh(&priv->sta_lock);
 628
 629	return ret;
 630}
 631
 632int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
 633			struct ieee80211_sta *sta, u16 tid)
 634{
 635	struct iwl_tid_data *tid_data;
 636	enum iwl_agg_state agg_state;
 637	int sta_id, txq_id;
 638	sta_id = iwl_sta_id(sta);
 639
 640	/*
 641	 * First set the agg state to OFF to avoid calling
 642	 * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty.
 643	 */
 644	spin_lock_bh(&priv->sta_lock);
 645
 646	tid_data = &priv->tid_data[sta_id][tid];
 647	txq_id = tid_data->agg.txq_id;
 648	agg_state = tid_data->agg.state;
 649	IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n",
 650			    sta_id, tid, txq_id, tid_data->agg.state);
 651
 652	tid_data->agg.state = IWL_AGG_OFF;
 653
 654	spin_unlock_bh(&priv->sta_lock);
 655
 656	if (iwlagn_txfifo_flush(priv, BIT(txq_id)))
 657		IWL_ERR(priv, "Couldn't flush the AGG queue\n");
 658
 659	if (test_bit(txq_id, priv->agg_q_alloc)) {
 660		/*
 661		 * If the transport didn't know that we wanted to start
 662		 * agreggation, don't tell it that we want to stop them.
 663		 * This can happen when we don't get the addBA response on
 664		 * time, or we hadn't time to drain the AC queues.
 665		 */
 666		if (agg_state == IWL_AGG_ON)
 667			iwl_trans_txq_disable(priv->trans, txq_id, true);
 668		else
 669			IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
 670					    agg_state);
 671		iwlagn_dealloc_agg_txq(priv, txq_id);
 672	}
 673
 674	return 0;
 675}
 676
 677int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
 678			struct ieee80211_sta *sta, u16 tid, u8 buf_size)
 679{
 680	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
 681	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 682	int q, fifo;
 683	u16 ssn;
 684
 685	buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
 686
 687	spin_lock_bh(&priv->sta_lock);
 688	ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
 689	q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
 690	priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
 691	spin_unlock_bh(&priv->sta_lock);
 692
 693	fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
 694
 695	iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
 696			     buf_size, ssn, 0);
 697
 698	/*
 699	 * If the limit is 0, then it wasn't initialised yet,
 700	 * use the default. We can do that since we take the
 701	 * minimum below, and we don't want to go above our
 702	 * default due to hardware restrictions.
 703	 */
 704	if (sta_priv->max_agg_bufsize == 0)
 705		sta_priv->max_agg_bufsize =
 706			LINK_QUAL_AGG_FRAME_LIMIT_DEF;
 707
 708	/*
 709	 * Even though in theory the peer could have different
 710	 * aggregation reorder buffer sizes for different sessions,
 711	 * our ucode doesn't allow for that and has a global limit
 712	 * for each station. Therefore, use the minimum of all the
 713	 * aggregation sessions and our default value.
 714	 */
 715	sta_priv->max_agg_bufsize =
 716		min(sta_priv->max_agg_bufsize, buf_size);
 717
 718	if (priv->hw_params.use_rts_for_aggregation) {
 719		/*
 720		 * switch to RTS/CTS if it is the prefer protection
 721		 * method for HT traffic
 722		 */
 723
 724		sta_priv->lq_sta.lq.general_params.flags |=
 725			LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
 726	}
 727	priv->agg_tids_count++;
 728	IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
 729		     priv->agg_tids_count);
 730
 731	sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
 732		sta_priv->max_agg_bufsize;
 733
 734	IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
 735		 sta->addr, tid);
 736
 737	return iwl_send_lq_cmd(priv, ctx,
 738			&sta_priv->lq_sta.lq, CMD_ASYNC, false);
 739}
 740
 741static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
 742{
 743	struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
 744	enum iwl_rxon_context_id ctx;
 745	struct ieee80211_vif *vif;
 746	u8 *addr;
 747
 748	lockdep_assert_held(&priv->sta_lock);
 749
 750	addr = priv->stations[sta_id].sta.sta.addr;
 751	ctx = priv->stations[sta_id].ctxid;
 752	vif = priv->contexts[ctx].vif;
 753
 754	switch (priv->tid_data[sta_id][tid].agg.state) {
 755	case IWL_EMPTYING_HW_QUEUE_DELBA:
 756		/* There are no packets for this RA / TID in the HW any more */
 757		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
 758			IWL_DEBUG_TX_QUEUES(priv,
 759				"Can continue DELBA flow ssn = next_recl = %d\n",
 760				tid_data->next_reclaimed);
 761			iwl_trans_txq_disable(priv->trans,
 762					      tid_data->agg.txq_id, true);
 763			iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
 764			tid_data->agg.state = IWL_AGG_OFF;
 765			ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
 766		}
 767		break;
 768	case IWL_EMPTYING_HW_QUEUE_ADDBA:
 769		/* There are no packets for this RA / TID in the HW any more */
 770		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
 771			IWL_DEBUG_TX_QUEUES(priv,
 772				"Can continue ADDBA flow ssn = next_recl = %d\n",
 773				tid_data->next_reclaimed);
 774			tid_data->agg.state = IWL_AGG_STARTING;
 775			ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
 776		}
 777		break;
 778	default:
 779		break;
 780	}
 781}
 782
 783static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
 784				     struct iwl_rxon_context *ctx,
 785				     const u8 *addr1)
 786{
 787	struct ieee80211_sta *sta;
 788	struct iwl_station_priv *sta_priv;
 789
 790	rcu_read_lock();
 791	sta = ieee80211_find_sta(ctx->vif, addr1);
 792	if (sta) {
 793		sta_priv = (void *)sta->drv_priv;
 794		/* avoid atomic ops if this isn't a client */
 795		if (sta_priv->client &&
 796		    atomic_dec_return(&sta_priv->pending_frames) == 0)
 797			ieee80211_sta_block_awake(priv->hw, sta, false);
 798	}
 799	rcu_read_unlock();
 800}
 801
 802/*
 803 * translate ucode response to mac80211 tx status control values
 804 */
 805static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
 806				  struct ieee80211_tx_info *info)
 807{
 808	struct ieee80211_tx_rate *r = &info->status.rates[0];
 809
 810	info->status.antenna =
 811		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
 812	if (rate_n_flags & RATE_MCS_HT_MSK)
 813		r->flags |= IEEE80211_TX_RC_MCS;
 814	if (rate_n_flags & RATE_MCS_GF_MSK)
 815		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
 816	if (rate_n_flags & RATE_MCS_HT40_MSK)
 817		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 818	if (rate_n_flags & RATE_MCS_DUP_MSK)
 819		r->flags |= IEEE80211_TX_RC_DUP_DATA;
 820	if (rate_n_flags & RATE_MCS_SGI_MSK)
 821		r->flags |= IEEE80211_TX_RC_SHORT_GI;
 822	r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
 823}
 824
 825#ifdef CONFIG_IWLWIFI_DEBUG
 826const char *iwl_get_tx_fail_reason(u32 status)
 827{
 828#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
 829#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
 830
 831	switch (status & TX_STATUS_MSK) {
 832	case TX_STATUS_SUCCESS:
 833		return "SUCCESS";
 834	TX_STATUS_POSTPONE(DELAY);
 835	TX_STATUS_POSTPONE(FEW_BYTES);
 836	TX_STATUS_POSTPONE(BT_PRIO);
 837	TX_STATUS_POSTPONE(QUIET_PERIOD);
 838	TX_STATUS_POSTPONE(CALC_TTAK);
 839	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
 840	TX_STATUS_FAIL(SHORT_LIMIT);
 841	TX_STATUS_FAIL(LONG_LIMIT);
 842	TX_STATUS_FAIL(FIFO_UNDERRUN);
 843	TX_STATUS_FAIL(DRAIN_FLOW);
 844	TX_STATUS_FAIL(RFKILL_FLUSH);
 845	TX_STATUS_FAIL(LIFE_EXPIRE);
 846	TX_STATUS_FAIL(DEST_PS);
 847	TX_STATUS_FAIL(HOST_ABORTED);
 848	TX_STATUS_FAIL(BT_RETRY);
 849	TX_STATUS_FAIL(STA_INVALID);
 850	TX_STATUS_FAIL(FRAG_DROPPED);
 851	TX_STATUS_FAIL(TID_DISABLE);
 852	TX_STATUS_FAIL(FIFO_FLUSHED);
 853	TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
 854	TX_STATUS_FAIL(PASSIVE_NO_RX);
 855	TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
 856	}
 857
 858	return "UNKNOWN";
 859
 860#undef TX_STATUS_FAIL
 861#undef TX_STATUS_POSTPONE
 862}
 863#endif /* CONFIG_IWLWIFI_DEBUG */
 864
 865static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
 866{
 867	status &= AGG_TX_STATUS_MSK;
 868
 869	switch (status) {
 870	case AGG_TX_STATE_UNDERRUN_MSK:
 871		priv->reply_agg_tx_stats.underrun++;
 872		break;
 873	case AGG_TX_STATE_BT_PRIO_MSK:
 874		priv->reply_agg_tx_stats.bt_prio++;
 875		break;
 876	case AGG_TX_STATE_FEW_BYTES_MSK:
 877		priv->reply_agg_tx_stats.few_bytes++;
 878		break;
 879	case AGG_TX_STATE_ABORT_MSK:
 880		priv->reply_agg_tx_stats.abort++;
 881		break;
 882	case AGG_TX_STATE_LAST_SENT_TTL_MSK:
 883		priv->reply_agg_tx_stats.last_sent_ttl++;
 884		break;
 885	case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
 886		priv->reply_agg_tx_stats.last_sent_try++;
 887		break;
 888	case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
 889		priv->reply_agg_tx_stats.last_sent_bt_kill++;
 890		break;
 891	case AGG_TX_STATE_SCD_QUERY_MSK:
 892		priv->reply_agg_tx_stats.scd_query++;
 893		break;
 894	case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
 895		priv->reply_agg_tx_stats.bad_crc32++;
 896		break;
 897	case AGG_TX_STATE_RESPONSE_MSK:
 898		priv->reply_agg_tx_stats.response++;
 899		break;
 900	case AGG_TX_STATE_DUMP_TX_MSK:
 901		priv->reply_agg_tx_stats.dump_tx++;
 902		break;
 903	case AGG_TX_STATE_DELAY_TX_MSK:
 904		priv->reply_agg_tx_stats.delay_tx++;
 905		break;
 906	default:
 907		priv->reply_agg_tx_stats.unknown++;
 908		break;
 909	}
 910}
 911
 912static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
 913{
 914	return le32_to_cpup((__le32 *)&tx_resp->status +
 915			    tx_resp->frame_count) & IEEE80211_MAX_SN;
 916}
 917
 918static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
 919				struct iwlagn_tx_resp *tx_resp)
 920{
 921	struct agg_tx_status *frame_status = &tx_resp->status;
 922	int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
 923		IWLAGN_TX_RES_TID_POS;
 924	int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
 925		IWLAGN_TX_RES_RA_POS;
 926	struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
 927	u32 status = le16_to_cpu(tx_resp->status.status);
 928	int i;
 929
 930	WARN_ON(tid == IWL_TID_NON_QOS);
 931
 932	if (agg->wait_for_ba)
 933		IWL_DEBUG_TX_REPLY(priv,
 934			"got tx response w/o block-ack\n");
 935
 936	agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
 937	agg->wait_for_ba = (tx_resp->frame_count > 1);
 938
 939	/*
 940	 * If the BT kill count is non-zero, we'll get this
 941	 * notification again.
 942	 */
 943	if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
 944	    priv->lib->bt_params &&
 945	    priv->lib->bt_params->advanced_bt_coexist) {
 946		IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
 947	}
 948
 949	if (tx_resp->frame_count == 1)
 950		return;
 951
 952	IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
 953			   agg->txq_id,
 954			   le32_to_cpu(tx_resp->rate_n_flags),
 955			   iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
 956
 957	/* Construct bit-map of pending frames within Tx window */
 958	for (i = 0; i < tx_resp->frame_count; i++) {
 959		u16 fstatus = le16_to_cpu(frame_status[i].status);
 960		u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
 961
 962		if (status & AGG_TX_STATUS_MSK)
 963			iwlagn_count_agg_tx_err_status(priv, fstatus);
 964
 965		if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
 966			      AGG_TX_STATE_ABORT_MSK))
 967			continue;
 968
 969		if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
 970			IWL_DEBUG_TX_REPLY(priv,
 971					   "%d: status %s (0x%04x), try-count (0x%01x)\n",
 972					   i,
 973					   iwl_get_agg_tx_fail_reason(fstatus),
 974					   fstatus & AGG_TX_STATUS_MSK,
 975					   retry_cnt);
 976	}
 977}
 978
 979#ifdef CONFIG_IWLWIFI_DEBUG
 980#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
 981
 982const char *iwl_get_agg_tx_fail_reason(u16 status)
 983{
 984	status &= AGG_TX_STATUS_MSK;
 985	switch (status) {
 986	case AGG_TX_STATE_TRANSMITTED:
 987		return "SUCCESS";
 988		AGG_TX_STATE_FAIL(UNDERRUN_MSK);
 989		AGG_TX_STATE_FAIL(BT_PRIO_MSK);
 990		AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
 991		AGG_TX_STATE_FAIL(ABORT_MSK);
 992		AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
 993		AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
 994		AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
 995		AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
 996		AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
 997		AGG_TX_STATE_FAIL(RESPONSE_MSK);
 998		AGG_TX_STATE_FAIL(DUMP_TX_MSK);
 999		AGG_TX_STATE_FAIL(DELAY_TX_MSK);
1000	}
1001
1002	return "UNKNOWN";
1003}
1004#endif /* CONFIG_IWLWIFI_DEBUG */
1005
1006static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
1007{
1008	status &= TX_STATUS_MSK;
1009
1010	switch (status) {
1011	case TX_STATUS_POSTPONE_DELAY:
1012		priv->reply_tx_stats.pp_delay++;
1013		break;
1014	case TX_STATUS_POSTPONE_FEW_BYTES:
1015		priv->reply_tx_stats.pp_few_bytes++;
1016		break;
1017	case TX_STATUS_POSTPONE_BT_PRIO:
1018		priv->reply_tx_stats.pp_bt_prio++;
1019		break;
1020	case TX_STATUS_POSTPONE_QUIET_PERIOD:
1021		priv->reply_tx_stats.pp_quiet_period++;
1022		break;
1023	case TX_STATUS_POSTPONE_CALC_TTAK:
1024		priv->reply_tx_stats.pp_calc_ttak++;
1025		break;
1026	case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
1027		priv->reply_tx_stats.int_crossed_retry++;
1028		break;
1029	case TX_STATUS_FAIL_SHORT_LIMIT:
1030		priv->reply_tx_stats.short_limit++;
1031		break;
1032	case TX_STATUS_FAIL_LONG_LIMIT:
1033		priv->reply_tx_stats.long_limit++;
1034		break;
1035	case TX_STATUS_FAIL_FIFO_UNDERRUN:
1036		priv->reply_tx_stats.fifo_underrun++;
1037		break;
1038	case TX_STATUS_FAIL_DRAIN_FLOW:
1039		priv->reply_tx_stats.drain_flow++;
1040		break;
1041	case TX_STATUS_FAIL_RFKILL_FLUSH:
1042		priv->reply_tx_stats.rfkill_flush++;
1043		break;
1044	case TX_STATUS_FAIL_LIFE_EXPIRE:
1045		priv->reply_tx_stats.life_expire++;
1046		break;
1047	case TX_STATUS_FAIL_DEST_PS:
1048		priv->reply_tx_stats.dest_ps++;
1049		break;
1050	case TX_STATUS_FAIL_HOST_ABORTED:
1051		priv->reply_tx_stats.host_abort++;
1052		break;
1053	case TX_STATUS_FAIL_BT_RETRY:
1054		priv->reply_tx_stats.bt_retry++;
1055		break;
1056	case TX_STATUS_FAIL_STA_INVALID:
1057		priv->reply_tx_stats.sta_invalid++;
1058		break;
1059	case TX_STATUS_FAIL_FRAG_DROPPED:
1060		priv->reply_tx_stats.frag_drop++;
1061		break;
1062	case TX_STATUS_FAIL_TID_DISABLE:
1063		priv->reply_tx_stats.tid_disable++;
1064		break;
1065	case TX_STATUS_FAIL_FIFO_FLUSHED:
1066		priv->reply_tx_stats.fifo_flush++;
1067		break;
1068	case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
1069		priv->reply_tx_stats.insuff_cf_poll++;
1070		break;
1071	case TX_STATUS_FAIL_PASSIVE_NO_RX:
1072		priv->reply_tx_stats.fail_hw_drop++;
1073		break;
1074	case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
1075		priv->reply_tx_stats.sta_color_mismatch++;
1076		break;
1077	default:
1078		priv->reply_tx_stats.unknown++;
1079		break;
1080	}
1081}
1082
1083static void iwlagn_set_tx_status(struct iwl_priv *priv,
1084				 struct ieee80211_tx_info *info,
1085				 struct iwlagn_tx_resp *tx_resp)
1086{
1087	u16 status = le16_to_cpu(tx_resp->status.status);
1088
1089	info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1090
1091	info->status.rates[0].count = tx_resp->failure_frame + 1;
1092	info->flags |= iwl_tx_status_to_mac80211(status);
1093	iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
1094				    info);
1095	if (!iwl_is_tx_success(status))
1096		iwlagn_count_tx_err_status(priv, status);
1097}
1098
1099static void iwl_check_abort_status(struct iwl_priv *priv,
1100			    u8 frame_count, u32 status)
1101{
1102	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
1103		IWL_ERR(priv, "Tx flush command to flush out all frames\n");
1104		if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
1105			queue_work(priv->workqueue, &priv->tx_flush);
1106	}
1107}
1108
1109void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
1110{
1111	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1112	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1113	int txq_id = SEQ_TO_QUEUE(sequence);
1114	int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
1115	struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
1116	struct ieee80211_hdr *hdr;
1117	u32 status = le16_to_cpu(tx_resp->status.status);
1118	u16 ssn = iwlagn_get_scd_ssn(tx_resp);
1119	int tid;
1120	int sta_id;
1121	int freed;
1122	struct ieee80211_tx_info *info;
1123	struct sk_buff_head skbs;
1124	struct sk_buff *skb;
1125	struct iwl_rxon_context *ctx;
1126	bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1127
1128	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
1129		IWLAGN_TX_RES_TID_POS;
1130	sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
1131		IWLAGN_TX_RES_RA_POS;
1132
1133	spin_lock_bh(&priv->sta_lock);
1134
1135	if (is_agg) {
1136		WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
1137			     tid >= IWL_MAX_TID_COUNT);
1138		if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
1139			IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
1140				priv->tid_data[sta_id][tid].agg.txq_id);
1141		iwl_rx_reply_tx_agg(priv, tx_resp);
1142	}
1143
1144	__skb_queue_head_init(&skbs);
1145
1146	if (tx_resp->frame_count == 1) {
1147		u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1148		next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
1149
1150		if (is_agg) {
1151			/* If this is an aggregation queue, we can rely on the
1152			 * ssn since the wifi sequence number corresponds to
1153			 * the index in the TFD ring (%256).
1154			 * The seq_ctl is the sequence control of the packet
1155			 * to which this Tx response relates. But if there is a
1156			 * hole in the bitmap of the BA we received, this Tx
1157			 * response may allow to reclaim the hole and all the
1158			 * subsequent packets that were already acked.
1159			 * In that case, seq_ctl != ssn, and the next packet
1160			 * to be reclaimed will be ssn and not seq_ctl.
1161			 */
1162			next_reclaimed = ssn;
1163		}
1164
1165		if (tid != IWL_TID_NON_QOS) {
1166			priv->tid_data[sta_id][tid].next_reclaimed =
1167				next_reclaimed;
1168			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1169						  next_reclaimed);
1170			iwlagn_check_ratid_empty(priv, sta_id, tid);
1171		}
1172
1173		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false);
1174
 
1175		freed = 0;
1176
1177		/* process frames */
1178		skb_queue_walk(&skbs, skb) {
1179			hdr = (struct ieee80211_hdr *)skb->data;
1180
1181			if (!ieee80211_is_data_qos(hdr->frame_control))
1182				priv->last_seq_ctl = tx_resp->seq_ctl;
1183
1184			info = IEEE80211_SKB_CB(skb);
1185			ctx = info->driver_data[0];
1186			iwl_trans_free_tx_cmd(priv->trans,
1187					      info->driver_data[1]);
1188
1189			memset(&info->status, 0, sizeof(info->status));
1190
1191			if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
1192			    ctx->vif &&
1193			    ctx->vif->type == NL80211_IFTYPE_STATION) {
1194				/* block and stop all queues */
1195				priv->passive_no_rx = true;
1196				IWL_DEBUG_TX_QUEUES(priv,
1197					"stop all queues: passive channel\n");
1198				ieee80211_stop_queues(priv->hw);
1199
1200				IWL_DEBUG_TX_REPLY(priv,
1201					   "TXQ %d status %s (0x%08x) "
1202					   "rate_n_flags 0x%x retries %d\n",
1203					   txq_id,
1204					   iwl_get_tx_fail_reason(status),
1205					   status,
1206					   le32_to_cpu(tx_resp->rate_n_flags),
1207					   tx_resp->failure_frame);
1208
1209				IWL_DEBUG_TX_REPLY(priv,
1210					   "FrameCnt = %d, idx=%d\n",
1211					   tx_resp->frame_count, cmd_index);
1212			}
1213
1214			/* check if BAR is needed */
1215			if (is_agg && !iwl_is_tx_success(status))
1216				info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1217			iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
1218				     tx_resp);
1219			if (!is_agg)
1220				iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1221
1222			freed++;
1223		}
1224
1225		if (tid != IWL_TID_NON_QOS) {
1226			priv->tid_data[sta_id][tid].next_reclaimed =
1227				next_reclaimed;
1228			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1229					   next_reclaimed);
1230		}
1231
1232		if (!is_agg && freed != 1)
1233			IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
1234
1235		IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
1236				   iwl_get_tx_fail_reason(status), status);
1237
1238		IWL_DEBUG_TX_REPLY(priv,
1239				   "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
1240				   le32_to_cpu(tx_resp->rate_n_flags),
1241				   tx_resp->failure_frame,
1242				   SEQ_TO_INDEX(sequence), ssn,
1243				   le16_to_cpu(tx_resp->seq_ctl));
1244	}
1245
1246	iwl_check_abort_status(priv, tx_resp->frame_count, status);
1247	spin_unlock_bh(&priv->sta_lock);
1248
1249	while (!skb_queue_empty(&skbs)) {
1250		skb = __skb_dequeue(&skbs);
1251		ieee80211_tx_status_skb(priv->hw, skb);
1252	}
1253}
1254
1255/*
1256 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1257 *
1258 * Handles block-acknowledge notification from device, which reports success
1259 * of frames sent via aggregation.
1260 */
1261void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1262				   struct iwl_rx_cmd_buffer *rxb)
1263{
1264	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1265	struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
1266	struct iwl_ht_agg *agg;
1267	struct sk_buff_head reclaimed_skbs;
1268	struct sk_buff *skb;
1269	int sta_id;
1270	int tid;
1271	int freed;
1272
1273	/* "flow" corresponds to Tx queue */
1274	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1275
1276	/* "ssn" is start of block-ack Tx window, corresponds to index
1277	 * (in Tx queue's circular buffer) of first TFD/frame in window */
1278	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1279
1280	if (scd_flow >= priv->trans->trans_cfg->base_params->num_of_queues) {
1281		IWL_ERR(priv,
1282			"BUG_ON scd_flow is bigger than number of queues\n");
1283		return;
1284	}
1285
1286	sta_id = ba_resp->sta_id;
1287	tid = ba_resp->tid;
1288	agg = &priv->tid_data[sta_id][tid].agg;
1289
1290	spin_lock_bh(&priv->sta_lock);
1291
1292	if (unlikely(!agg->wait_for_ba)) {
1293		if (unlikely(ba_resp->bitmap))
1294			IWL_ERR(priv, "Received BA when not expected\n");
1295		spin_unlock_bh(&priv->sta_lock);
1296		return;
1297	}
1298
1299	if (unlikely(scd_flow != agg->txq_id)) {
1300		/*
1301		 * FIXME: this is a uCode bug which need to be addressed,
1302		 * log the information and return for now.
1303		 * Since it is can possibly happen very often and in order
1304		 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1305		 */
1306		IWL_DEBUG_TX_QUEUES(priv,
1307				    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1308				    scd_flow, sta_id, tid, agg->txq_id);
1309		spin_unlock_bh(&priv->sta_lock);
1310		return;
1311	}
1312
1313	__skb_queue_head_init(&reclaimed_skbs);
1314
1315	/* Release all TFDs before the SSN, i.e. all TFDs in front of
1316	 * block-ack window (we assume that they've been successfully
1317	 * transmitted ... if not, it's too late anyway). */
1318	iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
1319			  &reclaimed_skbs, false);
1320
1321	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1322			   "sta_id = %d\n",
1323			   agg->wait_for_ba,
1324			   (u8 *) &ba_resp->sta_addr_lo32,
1325			   ba_resp->sta_id);
1326	IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
1327			   "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1328			   ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
1329			   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1330			   scd_flow, ba_resp_scd_ssn, ba_resp->txed,
1331			   ba_resp->txed_2_done);
1332
1333	/* Mark that the expected block-ack response arrived */
1334	agg->wait_for_ba = false;
1335
1336	/* Sanity check values reported by uCode */
1337	if (ba_resp->txed_2_done > ba_resp->txed) {
1338		IWL_DEBUG_TX_REPLY(priv,
1339			"bogus sent(%d) and ack(%d) count\n",
1340			ba_resp->txed, ba_resp->txed_2_done);
1341		/*
1342		 * set txed_2_done = txed,
1343		 * so it won't impact rate scale
1344		 */
1345		ba_resp->txed = ba_resp->txed_2_done;
1346	}
1347
1348	priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
1349
1350	iwlagn_check_ratid_empty(priv, sta_id, tid);
1351	freed = 0;
1352
1353	skb_queue_walk(&reclaimed_skbs, skb) {
1354		struct ieee80211_hdr *hdr = (void *)skb->data;
1355		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1356
1357		if (ieee80211_is_data_qos(hdr->frame_control))
1358			freed++;
1359		else
1360			WARN_ON_ONCE(1);
1361
1362		iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
1363
1364		memset(&info->status, 0, sizeof(info->status));
1365		/* Packet was transmitted successfully, failures come as single
1366		 * frames because before failing a frame the firmware transmits
1367		 * it without aggregation at least once.
1368		 */
1369		info->flags |= IEEE80211_TX_STAT_ACK;
1370
1371		if (freed == 1) {
1372			/* this is the first skb we deliver in this batch */
1373			/* put the rate scaling data there */
1374			info = IEEE80211_SKB_CB(skb);
1375			memset(&info->status, 0, sizeof(info->status));
1376			info->flags |= IEEE80211_TX_STAT_AMPDU;
1377			info->status.ampdu_ack_len = ba_resp->txed_2_done;
1378			info->status.ampdu_len = ba_resp->txed;
1379			iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
1380						    info);
1381		}
1382	}
1383
1384	spin_unlock_bh(&priv->sta_lock);
1385
1386	while (!skb_queue_empty(&reclaimed_skbs)) {
1387		skb = __skb_dequeue(&reclaimed_skbs);
1388		ieee80211_tx_status_skb(priv->hw, skb);
1389	}
1390}
v4.6
 
   1/******************************************************************************
   2 *
   3 * GPL LICENSE SUMMARY
   4 *
   5 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of version 2 of the GNU General Public License as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  19 * USA
  20 *
  21 * The full GNU General Public License is included in this distribution
  22 * in the file called COPYING.
  23 *
  24 * Contact Information:
  25 *  Intel Linux Wireless <linuxwifi@intel.com>
  26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27 *
  28 *****************************************************************************/
  29
  30#include <linux/kernel.h>
  31#include <linux/module.h>
  32#include <linux/sched.h>
  33#include <linux/ieee80211.h>
  34#include "iwl-io.h"
  35#include "iwl-trans.h"
  36#include "iwl-agn-hw.h"
  37#include "dev.h"
  38#include "agn.h"
  39
  40static const u8 tid_to_ac[] = {
  41	IEEE80211_AC_BE,
  42	IEEE80211_AC_BK,
  43	IEEE80211_AC_BK,
  44	IEEE80211_AC_BE,
  45	IEEE80211_AC_VI,
  46	IEEE80211_AC_VI,
  47	IEEE80211_AC_VO,
  48	IEEE80211_AC_VO,
  49};
  50
  51static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
  52				     struct ieee80211_tx_info *info,
  53				     __le16 fc, __le32 *tx_flags)
  54{
  55	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
  56	    info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
  57	    info->flags & IEEE80211_TX_CTL_AMPDU)
  58		*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
  59}
  60
  61/*
  62 * handle build REPLY_TX command notification.
  63 */
  64static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
  65				      struct sk_buff *skb,
  66				      struct iwl_tx_cmd *tx_cmd,
  67				      struct ieee80211_tx_info *info,
  68				      struct ieee80211_hdr *hdr, u8 sta_id)
  69{
  70	__le16 fc = hdr->frame_control;
  71	__le32 tx_flags = tx_cmd->tx_flags;
  72
  73	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  74
  75	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  76		tx_flags |= TX_CMD_FLG_ACK_MSK;
  77	else
  78		tx_flags &= ~TX_CMD_FLG_ACK_MSK;
  79
  80	if (ieee80211_is_probe_resp(fc))
  81		tx_flags |= TX_CMD_FLG_TSF_MSK;
  82	else if (ieee80211_is_back_req(fc))
  83		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  84	else if (info->band == IEEE80211_BAND_2GHZ &&
  85		 priv->lib->bt_params &&
  86		 priv->lib->bt_params->advanced_bt_coexist &&
  87		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
  88		 ieee80211_is_reassoc_req(fc) ||
  89		 info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
  90		tx_flags |= TX_CMD_FLG_IGNORE_BT;
  91
  92
  93	tx_cmd->sta_id = sta_id;
  94	if (ieee80211_has_morefrags(fc))
  95		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  96
  97	if (ieee80211_is_data_qos(fc)) {
  98		u8 *qc = ieee80211_get_qos_ctl(hdr);
  99		tx_cmd->tid_tspec = qc[0] & 0xf;
 100		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
 101	} else {
 102		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
 103		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
 104			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
 105		else
 106			tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
 107	}
 108
 109	iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
 110
 111	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
 112	if (ieee80211_is_mgmt(fc)) {
 113		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
 114			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
 115		else
 116			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
 117	} else {
 118		tx_cmd->timeout.pm_frame_timeout = 0;
 119	}
 120
 121	tx_cmd->driver_txop = 0;
 122	tx_cmd->tx_flags = tx_flags;
 123	tx_cmd->next_frame_len = 0;
 124}
 125
 126static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
 127				     struct iwl_tx_cmd *tx_cmd,
 128				     struct ieee80211_tx_info *info,
 129				     struct ieee80211_sta *sta,
 130				     __le16 fc)
 131{
 132	u32 rate_flags;
 133	int rate_idx;
 134	u8 rts_retry_limit;
 135	u8 data_retry_limit;
 136	u8 rate_plcp;
 137
 138	if (priv->wowlan) {
 139		rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
 140		data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
 141	} else {
 142		/* Set retry limit on RTS packets */
 143		rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
 144
 145		/* Set retry limit on DATA packets and Probe Responses*/
 146		if (ieee80211_is_probe_resp(fc)) {
 147			data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
 148			rts_retry_limit =
 149				min(data_retry_limit, rts_retry_limit);
 150		} else if (ieee80211_is_back_req(fc))
 151			data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
 152		else
 153			data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
 154	}
 155
 156	tx_cmd->data_retry_limit = data_retry_limit;
 157	tx_cmd->rts_retry_limit = rts_retry_limit;
 158
 159	/* DATA packets will use the uCode station table for rate/antenna
 160	 * selection */
 161	if (ieee80211_is_data(fc)) {
 162		tx_cmd->initial_rate_index = 0;
 163		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
 164		return;
 165	} else if (ieee80211_is_back_req(fc))
 166		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
 167
 168	/**
 169	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
 170	 * not really a TX rate.  Thus, we use the lowest supported rate for
 171	 * this band.  Also use the lowest supported rate if the stored rate
 172	 * index is invalid.
 173	 */
 174	rate_idx = info->control.rates[0].idx;
 175	if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
 176			(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
 177		rate_idx = rate_lowest_index(
 178				&priv->nvm_data->bands[info->band], sta);
 179	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
 180	if (info->band == IEEE80211_BAND_5GHZ)
 181		rate_idx += IWL_FIRST_OFDM_RATE;
 182	/* Get PLCP rate for tx_cmd->rate_n_flags */
 183	rate_plcp = iwl_rates[rate_idx].plcp;
 184	/* Zero out flags for this packet */
 185	rate_flags = 0;
 186
 187	/* Set CCK flag as needed */
 188	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
 189		rate_flags |= RATE_MCS_CCK_MSK;
 190
 191	/* Set up antennas */
 192	if (priv->lib->bt_params &&
 193	    priv->lib->bt_params->advanced_bt_coexist &&
 194	    priv->bt_full_concurrent) {
 195		/* operated as 1x1 in full concurrency mode */
 196		priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
 197				first_antenna(priv->nvm_data->valid_tx_ant));
 198	} else
 199		priv->mgmt_tx_ant = iwl_toggle_tx_ant(
 200					priv, priv->mgmt_tx_ant,
 201					priv->nvm_data->valid_tx_ant);
 202	rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
 203
 204	/* Set the rate in the TX cmd */
 205	tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
 206}
 207
 208static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
 209					 struct ieee80211_tx_info *info,
 210					 struct iwl_tx_cmd *tx_cmd,
 211					 struct sk_buff *skb_frag)
 212{
 213	struct ieee80211_key_conf *keyconf = info->control.hw_key;
 214
 215	switch (keyconf->cipher) {
 216	case WLAN_CIPHER_SUITE_CCMP:
 217		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
 218		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
 219		if (info->flags & IEEE80211_TX_CTL_AMPDU)
 220			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
 221		break;
 222
 223	case WLAN_CIPHER_SUITE_TKIP:
 224		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
 225		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
 226		break;
 227
 228	case WLAN_CIPHER_SUITE_WEP104:
 229		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
 230		/* fall through */
 231	case WLAN_CIPHER_SUITE_WEP40:
 232		tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
 233			(keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
 234
 235		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
 236
 237		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
 238			     "with key %d\n", keyconf->keyidx);
 239		break;
 240
 241	default:
 242		IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
 243		break;
 244	}
 245}
 246
 247/**
 248 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
 249 * @context: the current context
 250 * @sta: mac80211 station
 251 *
 252 * In certain circumstances mac80211 passes a station pointer
 253 * that may be %NULL, for example during TX or key setup. In
 254 * that case, we need to use the broadcast station, so this
 255 * inline wraps that pattern.
 256 */
 257static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
 258				   struct ieee80211_sta *sta)
 259{
 260	int sta_id;
 261
 262	if (!sta)
 263		return context->bcast_sta_id;
 264
 265	sta_id = iwl_sta_id(sta);
 266
 267	/*
 268	 * mac80211 should not be passing a partially
 269	 * initialised station!
 270	 */
 271	WARN_ON(sta_id == IWL_INVALID_STATION);
 272
 273	return sta_id;
 274}
 275
 276/*
 277 * start REPLY_TX command process
 278 */
 279int iwlagn_tx_skb(struct iwl_priv *priv,
 280		  struct ieee80211_sta *sta,
 281		  struct sk_buff *skb)
 282{
 283	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 284	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 285	struct iwl_station_priv *sta_priv = NULL;
 286	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 287	struct iwl_device_cmd *dev_cmd;
 288	struct iwl_tx_cmd *tx_cmd;
 289	__le16 fc;
 290	u8 hdr_len;
 291	u16 len, seq_number = 0;
 292	u8 sta_id, tid = IWL_MAX_TID_COUNT;
 293	bool is_agg = false, is_data_qos = false;
 294	int txq_id;
 295
 296	if (info->control.vif)
 297		ctx = iwl_rxon_ctx_from_vif(info->control.vif);
 298
 299	if (iwl_is_rfkill(priv)) {
 300		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
 301		goto drop_unlock_priv;
 302	}
 303
 304	fc = hdr->frame_control;
 305
 306#ifdef CONFIG_IWLWIFI_DEBUG
 307	if (ieee80211_is_auth(fc))
 308		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
 309	else if (ieee80211_is_assoc_req(fc))
 310		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
 311	else if (ieee80211_is_reassoc_req(fc))
 312		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
 313#endif
 314
 315	if (unlikely(ieee80211_is_probe_resp(fc))) {
 316		struct iwl_wipan_noa_data *noa_data =
 317			rcu_dereference(priv->noa_data);
 318
 319		if (noa_data &&
 320		    pskb_expand_head(skb, 0, noa_data->length,
 321				     GFP_ATOMIC) == 0) {
 322			memcpy(skb_put(skb, noa_data->length),
 323			       noa_data->data, noa_data->length);
 324			hdr = (struct ieee80211_hdr *)skb->data;
 325		}
 326	}
 327
 328	hdr_len = ieee80211_hdrlen(fc);
 329
 330	/* For management frames use broadcast id to do not break aggregation */
 331	if (!ieee80211_is_data(fc))
 332		sta_id = ctx->bcast_sta_id;
 333	else {
 334		/* Find index into station table for destination station */
 335		sta_id = iwl_sta_id_or_broadcast(ctx, sta);
 336		if (sta_id == IWL_INVALID_STATION) {
 337			IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
 338				       hdr->addr1);
 339			goto drop_unlock_priv;
 340		}
 341	}
 342
 343	if (sta)
 344		sta_priv = (void *)sta->drv_priv;
 345
 346	if (sta_priv && sta_priv->asleep &&
 347	    (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
 348		/*
 349		 * This sends an asynchronous command to the device,
 350		 * but we can rely on it being processed before the
 351		 * next frame is processed -- and the next frame to
 352		 * this station is the one that will consume this
 353		 * counter.
 354		 * For now set the counter to just 1 since we do not
 355		 * support uAPSD yet.
 356		 *
 357		 * FIXME: If we get two non-bufferable frames one
 358		 * after the other, we might only send out one of
 359		 * them because this is racy.
 360		 */
 361		iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
 362	}
 363
 364	dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
 365
 366	if (unlikely(!dev_cmd))
 367		goto drop_unlock_priv;
 368
 369	memset(dev_cmd, 0, sizeof(*dev_cmd));
 370	dev_cmd->hdr.cmd = REPLY_TX;
 371	tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
 372
 373	/* Total # bytes to be transmitted */
 374	len = (u16)skb->len;
 375	tx_cmd->len = cpu_to_le16(len);
 376
 377	if (info->control.hw_key)
 378		iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
 379
 380	/* TODO need this for burst mode later on */
 381	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
 382
 383	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
 384
 385	memset(&info->status, 0, sizeof(info->status));
 386	memset(info->driver_data, 0, sizeof(info->driver_data));
 387
 388	info->driver_data[0] = ctx;
 389	info->driver_data[1] = dev_cmd;
 390	/* From now on, we cannot access info->control */
 391
 392	spin_lock(&priv->sta_lock);
 393
 394	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
 395		u8 *qc = NULL;
 396		struct iwl_tid_data *tid_data;
 397		qc = ieee80211_get_qos_ctl(hdr);
 398		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
 399		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
 400			goto drop_unlock_sta;
 401		tid_data = &priv->tid_data[sta_id][tid];
 402
 403		/* aggregation is on for this <sta,tid> */
 404		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
 405		    tid_data->agg.state != IWL_AGG_ON) {
 406			IWL_ERR(priv,
 407				"TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
 408				info->flags, tid_data->agg.state);
 409			IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
 410				sta_id, tid,
 411				IEEE80211_SEQ_TO_SN(tid_data->seq_number));
 412			goto drop_unlock_sta;
 413		}
 414
 415		/* We can receive packets from the stack in IWL_AGG_{ON,OFF}
 416		 * only. Check this here.
 417		 */
 418		if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
 419			      tid_data->agg.state != IWL_AGG_OFF,
 420			      "Tx while agg.state = %d\n", tid_data->agg.state))
 421			goto drop_unlock_sta;
 422
 423		seq_number = tid_data->seq_number;
 424		seq_number &= IEEE80211_SCTL_SEQ;
 425		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 426		hdr->seq_ctrl |= cpu_to_le16(seq_number);
 427		seq_number += 0x10;
 428
 429		if (info->flags & IEEE80211_TX_CTL_AMPDU)
 430			is_agg = true;
 431		is_data_qos = true;
 432	}
 433
 434	/* Copy MAC header from skb into command buffer */
 435	memcpy(tx_cmd->hdr, hdr, hdr_len);
 436
 437	txq_id = info->hw_queue;
 438
 439	if (is_agg)
 440		txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
 441	else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
 442		/*
 443		 * The microcode will clear the more data
 444		 * bit in the last frame it transmits.
 445		 */
 446		hdr->frame_control |=
 447			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
 448	}
 449
 450	WARN_ON_ONCE(is_agg &&
 451		     priv->queue_to_mac80211[txq_id] != info->hw_queue);
 452
 453	IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
 454		     txq_id, seq_number);
 455
 456	if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
 457		goto drop_unlock_sta;
 458
 459	if (is_data_qos && !ieee80211_has_morefrags(fc))
 460		priv->tid_data[sta_id][tid].seq_number = seq_number;
 461
 462	spin_unlock(&priv->sta_lock);
 463
 464	/*
 465	 * Avoid atomic ops if it isn't an associated client.
 466	 * Also, if this is a packet for aggregation, don't
 467	 * increase the counter because the ucode will stop
 468	 * aggregation queues when their respective station
 469	 * goes to sleep.
 470	 */
 471	if (sta_priv && sta_priv->client && !is_agg)
 472		atomic_inc(&sta_priv->pending_frames);
 473
 474	return 0;
 475
 476drop_unlock_sta:
 477	if (dev_cmd)
 478		iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
 479	spin_unlock(&priv->sta_lock);
 480drop_unlock_priv:
 481	return -1;
 482}
 483
 484static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
 485{
 486	int q;
 487
 488	for (q = IWLAGN_FIRST_AMPDU_QUEUE;
 489	     q < priv->cfg->base_params->num_of_queues; q++) {
 490		if (!test_and_set_bit(q, priv->agg_q_alloc)) {
 491			priv->queue_to_mac80211[q] = mq;
 492			return q;
 493		}
 494	}
 495
 496	return -ENOSPC;
 497}
 498
 499static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
 500{
 501	clear_bit(q, priv->agg_q_alloc);
 502	priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
 503}
 504
 505int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
 506			struct ieee80211_sta *sta, u16 tid)
 507{
 508	struct iwl_tid_data *tid_data;
 509	int sta_id, txq_id;
 510	enum iwl_agg_state agg_state;
 511
 512	sta_id = iwl_sta_id(sta);
 513
 514	if (sta_id == IWL_INVALID_STATION) {
 515		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
 516		return -ENXIO;
 517	}
 518
 519	spin_lock_bh(&priv->sta_lock);
 520
 521	tid_data = &priv->tid_data[sta_id][tid];
 522	txq_id = tid_data->agg.txq_id;
 523
 524	switch (tid_data->agg.state) {
 525	case IWL_EMPTYING_HW_QUEUE_ADDBA:
 526		/*
 527		* This can happen if the peer stops aggregation
 528		* again before we've had a chance to drain the
 529		* queue we selected previously, i.e. before the
 530		* session was really started completely.
 531		*/
 532		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
 533		goto turn_off;
 534	case IWL_AGG_STARTING:
 535		/*
 536		 * This can happen when the session is stopped before
 537		 * we receive ADDBA response
 538		 */
 539		IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
 540		goto turn_off;
 541	case IWL_AGG_ON:
 542		break;
 543	default:
 544		IWL_WARN(priv,
 545			 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
 546			 sta_id, tid, tid_data->agg.state);
 547		spin_unlock_bh(&priv->sta_lock);
 548		return 0;
 549	}
 550
 551	tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 552
 553	/* There are still packets for this RA / TID in the HW */
 554	if (!test_bit(txq_id, priv->agg_q_alloc)) {
 555		IWL_DEBUG_TX_QUEUES(priv,
 556			"stopping AGG on STA/TID %d/%d but hwq %d not used\n",
 557			sta_id, tid, txq_id);
 558	} else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
 559		IWL_DEBUG_TX_QUEUES(priv,
 560				    "Can't proceed: ssn %d, next_recl = %d\n",
 561				    tid_data->agg.ssn,
 562				    tid_data->next_reclaimed);
 563		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
 564		spin_unlock_bh(&priv->sta_lock);
 565		return 0;
 566	}
 567
 568	IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
 569			    tid_data->agg.ssn);
 570turn_off:
 571	agg_state = tid_data->agg.state;
 572	tid_data->agg.state = IWL_AGG_OFF;
 573
 574	spin_unlock_bh(&priv->sta_lock);
 575
 576	if (test_bit(txq_id, priv->agg_q_alloc)) {
 577		/*
 578		 * If the transport didn't know that we wanted to start
 579		 * agreggation, don't tell it that we want to stop them.
 580		 * This can happen when we don't get the addBA response on
 581		 * time, or we hadn't time to drain the AC queues.
 582		 */
 583		if (agg_state == IWL_AGG_ON)
 584			iwl_trans_txq_disable(priv->trans, txq_id, true);
 585		else
 586			IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
 587					    agg_state);
 588		iwlagn_dealloc_agg_txq(priv, txq_id);
 589	}
 590
 591	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 592
 593	return 0;
 594}
 595
 596int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
 597			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 598{
 599	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 600	struct iwl_tid_data *tid_data;
 601	int sta_id, txq_id, ret;
 602
 603	IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
 604		     sta->addr, tid);
 605
 606	sta_id = iwl_sta_id(sta);
 607	if (sta_id == IWL_INVALID_STATION) {
 608		IWL_ERR(priv, "Start AGG on invalid station\n");
 609		return -ENXIO;
 610	}
 611	if (unlikely(tid >= IWL_MAX_TID_COUNT))
 612		return -EINVAL;
 613
 614	if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
 615		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
 616		return -ENXIO;
 617	}
 618
 619	txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
 620	if (txq_id < 0) {
 621		IWL_DEBUG_TX_QUEUES(priv,
 622			"No free aggregation queue for %pM/%d\n",
 623			sta->addr, tid);
 624		return txq_id;
 625	}
 626
 627	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
 628	if (ret)
 629		return ret;
 630
 631	spin_lock_bh(&priv->sta_lock);
 632	tid_data = &priv->tid_data[sta_id][tid];
 633	tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 634	tid_data->agg.txq_id = txq_id;
 635
 636	*ssn = tid_data->agg.ssn;
 637
 638	if (*ssn == tid_data->next_reclaimed) {
 639		IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
 640				    tid_data->agg.ssn);
 641		tid_data->agg.state = IWL_AGG_STARTING;
 642		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 643	} else {
 644		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
 645				    "next_reclaimed = %d\n",
 646				    tid_data->agg.ssn,
 647				    tid_data->next_reclaimed);
 648		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
 649	}
 650	spin_unlock_bh(&priv->sta_lock);
 651
 652	return ret;
 653}
 654
 655int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
 656			struct ieee80211_sta *sta, u16 tid)
 657{
 658	struct iwl_tid_data *tid_data;
 659	enum iwl_agg_state agg_state;
 660	int sta_id, txq_id;
 661	sta_id = iwl_sta_id(sta);
 662
 663	/*
 664	 * First set the agg state to OFF to avoid calling
 665	 * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty.
 666	 */
 667	spin_lock_bh(&priv->sta_lock);
 668
 669	tid_data = &priv->tid_data[sta_id][tid];
 670	txq_id = tid_data->agg.txq_id;
 671	agg_state = tid_data->agg.state;
 672	IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n",
 673			    sta_id, tid, txq_id, tid_data->agg.state);
 674
 675	tid_data->agg.state = IWL_AGG_OFF;
 676
 677	spin_unlock_bh(&priv->sta_lock);
 678
 679	if (iwlagn_txfifo_flush(priv, BIT(txq_id)))
 680		IWL_ERR(priv, "Couldn't flush the AGG queue\n");
 681
 682	if (test_bit(txq_id, priv->agg_q_alloc)) {
 683		/*
 684		 * If the transport didn't know that we wanted to start
 685		 * agreggation, don't tell it that we want to stop them.
 686		 * This can happen when we don't get the addBA response on
 687		 * time, or we hadn't time to drain the AC queues.
 688		 */
 689		if (agg_state == IWL_AGG_ON)
 690			iwl_trans_txq_disable(priv->trans, txq_id, true);
 691		else
 692			IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
 693					    agg_state);
 694		iwlagn_dealloc_agg_txq(priv, txq_id);
 695	}
 696
 697	return 0;
 698}
 699
 700int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
 701			struct ieee80211_sta *sta, u16 tid, u8 buf_size)
 702{
 703	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
 704	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 705	int q, fifo;
 706	u16 ssn;
 707
 708	buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
 709
 710	spin_lock_bh(&priv->sta_lock);
 711	ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
 712	q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
 713	priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
 714	spin_unlock_bh(&priv->sta_lock);
 715
 716	fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
 717
 718	iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
 719			     buf_size, ssn, 0);
 720
 721	/*
 722	 * If the limit is 0, then it wasn't initialised yet,
 723	 * use the default. We can do that since we take the
 724	 * minimum below, and we don't want to go above our
 725	 * default due to hardware restrictions.
 726	 */
 727	if (sta_priv->max_agg_bufsize == 0)
 728		sta_priv->max_agg_bufsize =
 729			LINK_QUAL_AGG_FRAME_LIMIT_DEF;
 730
 731	/*
 732	 * Even though in theory the peer could have different
 733	 * aggregation reorder buffer sizes for different sessions,
 734	 * our ucode doesn't allow for that and has a global limit
 735	 * for each station. Therefore, use the minimum of all the
 736	 * aggregation sessions and our default value.
 737	 */
 738	sta_priv->max_agg_bufsize =
 739		min(sta_priv->max_agg_bufsize, buf_size);
 740
 741	if (priv->hw_params.use_rts_for_aggregation) {
 742		/*
 743		 * switch to RTS/CTS if it is the prefer protection
 744		 * method for HT traffic
 745		 */
 746
 747		sta_priv->lq_sta.lq.general_params.flags |=
 748			LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
 749	}
 750	priv->agg_tids_count++;
 751	IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
 752		     priv->agg_tids_count);
 753
 754	sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
 755		sta_priv->max_agg_bufsize;
 756
 757	IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
 758		 sta->addr, tid);
 759
 760	return iwl_send_lq_cmd(priv, ctx,
 761			&sta_priv->lq_sta.lq, CMD_ASYNC, false);
 762}
 763
 764static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
 765{
 766	struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
 767	enum iwl_rxon_context_id ctx;
 768	struct ieee80211_vif *vif;
 769	u8 *addr;
 770
 771	lockdep_assert_held(&priv->sta_lock);
 772
 773	addr = priv->stations[sta_id].sta.sta.addr;
 774	ctx = priv->stations[sta_id].ctxid;
 775	vif = priv->contexts[ctx].vif;
 776
 777	switch (priv->tid_data[sta_id][tid].agg.state) {
 778	case IWL_EMPTYING_HW_QUEUE_DELBA:
 779		/* There are no packets for this RA / TID in the HW any more */
 780		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
 781			IWL_DEBUG_TX_QUEUES(priv,
 782				"Can continue DELBA flow ssn = next_recl = %d\n",
 783				tid_data->next_reclaimed);
 784			iwl_trans_txq_disable(priv->trans,
 785					      tid_data->agg.txq_id, true);
 786			iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
 787			tid_data->agg.state = IWL_AGG_OFF;
 788			ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
 789		}
 790		break;
 791	case IWL_EMPTYING_HW_QUEUE_ADDBA:
 792		/* There are no packets for this RA / TID in the HW any more */
 793		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
 794			IWL_DEBUG_TX_QUEUES(priv,
 795				"Can continue ADDBA flow ssn = next_recl = %d\n",
 796				tid_data->next_reclaimed);
 797			tid_data->agg.state = IWL_AGG_STARTING;
 798			ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
 799		}
 800		break;
 801	default:
 802		break;
 803	}
 804}
 805
 806static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
 807				     struct iwl_rxon_context *ctx,
 808				     const u8 *addr1)
 809{
 810	struct ieee80211_sta *sta;
 811	struct iwl_station_priv *sta_priv;
 812
 813	rcu_read_lock();
 814	sta = ieee80211_find_sta(ctx->vif, addr1);
 815	if (sta) {
 816		sta_priv = (void *)sta->drv_priv;
 817		/* avoid atomic ops if this isn't a client */
 818		if (sta_priv->client &&
 819		    atomic_dec_return(&sta_priv->pending_frames) == 0)
 820			ieee80211_sta_block_awake(priv->hw, sta, false);
 821	}
 822	rcu_read_unlock();
 823}
 824
 825/**
 826 * translate ucode response to mac80211 tx status control values
 827 */
 828static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
 829				  struct ieee80211_tx_info *info)
 830{
 831	struct ieee80211_tx_rate *r = &info->status.rates[0];
 832
 833	info->status.antenna =
 834		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
 835	if (rate_n_flags & RATE_MCS_HT_MSK)
 836		r->flags |= IEEE80211_TX_RC_MCS;
 837	if (rate_n_flags & RATE_MCS_GF_MSK)
 838		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
 839	if (rate_n_flags & RATE_MCS_HT40_MSK)
 840		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 841	if (rate_n_flags & RATE_MCS_DUP_MSK)
 842		r->flags |= IEEE80211_TX_RC_DUP_DATA;
 843	if (rate_n_flags & RATE_MCS_SGI_MSK)
 844		r->flags |= IEEE80211_TX_RC_SHORT_GI;
 845	r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
 846}
 847
 848#ifdef CONFIG_IWLWIFI_DEBUG
 849const char *iwl_get_tx_fail_reason(u32 status)
 850{
 851#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
 852#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
 853
 854	switch (status & TX_STATUS_MSK) {
 855	case TX_STATUS_SUCCESS:
 856		return "SUCCESS";
 857	TX_STATUS_POSTPONE(DELAY);
 858	TX_STATUS_POSTPONE(FEW_BYTES);
 859	TX_STATUS_POSTPONE(BT_PRIO);
 860	TX_STATUS_POSTPONE(QUIET_PERIOD);
 861	TX_STATUS_POSTPONE(CALC_TTAK);
 862	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
 863	TX_STATUS_FAIL(SHORT_LIMIT);
 864	TX_STATUS_FAIL(LONG_LIMIT);
 865	TX_STATUS_FAIL(FIFO_UNDERRUN);
 866	TX_STATUS_FAIL(DRAIN_FLOW);
 867	TX_STATUS_FAIL(RFKILL_FLUSH);
 868	TX_STATUS_FAIL(LIFE_EXPIRE);
 869	TX_STATUS_FAIL(DEST_PS);
 870	TX_STATUS_FAIL(HOST_ABORTED);
 871	TX_STATUS_FAIL(BT_RETRY);
 872	TX_STATUS_FAIL(STA_INVALID);
 873	TX_STATUS_FAIL(FRAG_DROPPED);
 874	TX_STATUS_FAIL(TID_DISABLE);
 875	TX_STATUS_FAIL(FIFO_FLUSHED);
 876	TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
 877	TX_STATUS_FAIL(PASSIVE_NO_RX);
 878	TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
 879	}
 880
 881	return "UNKNOWN";
 882
 883#undef TX_STATUS_FAIL
 884#undef TX_STATUS_POSTPONE
 885}
 886#endif /* CONFIG_IWLWIFI_DEBUG */
 887
 888static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
 889{
 890	status &= AGG_TX_STATUS_MSK;
 891
 892	switch (status) {
 893	case AGG_TX_STATE_UNDERRUN_MSK:
 894		priv->reply_agg_tx_stats.underrun++;
 895		break;
 896	case AGG_TX_STATE_BT_PRIO_MSK:
 897		priv->reply_agg_tx_stats.bt_prio++;
 898		break;
 899	case AGG_TX_STATE_FEW_BYTES_MSK:
 900		priv->reply_agg_tx_stats.few_bytes++;
 901		break;
 902	case AGG_TX_STATE_ABORT_MSK:
 903		priv->reply_agg_tx_stats.abort++;
 904		break;
 905	case AGG_TX_STATE_LAST_SENT_TTL_MSK:
 906		priv->reply_agg_tx_stats.last_sent_ttl++;
 907		break;
 908	case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
 909		priv->reply_agg_tx_stats.last_sent_try++;
 910		break;
 911	case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
 912		priv->reply_agg_tx_stats.last_sent_bt_kill++;
 913		break;
 914	case AGG_TX_STATE_SCD_QUERY_MSK:
 915		priv->reply_agg_tx_stats.scd_query++;
 916		break;
 917	case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
 918		priv->reply_agg_tx_stats.bad_crc32++;
 919		break;
 920	case AGG_TX_STATE_RESPONSE_MSK:
 921		priv->reply_agg_tx_stats.response++;
 922		break;
 923	case AGG_TX_STATE_DUMP_TX_MSK:
 924		priv->reply_agg_tx_stats.dump_tx++;
 925		break;
 926	case AGG_TX_STATE_DELAY_TX_MSK:
 927		priv->reply_agg_tx_stats.delay_tx++;
 928		break;
 929	default:
 930		priv->reply_agg_tx_stats.unknown++;
 931		break;
 932	}
 933}
 934
 935static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
 936{
 937	return le32_to_cpup((__le32 *)&tx_resp->status +
 938			    tx_resp->frame_count) & IEEE80211_MAX_SN;
 939}
 940
 941static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
 942				struct iwlagn_tx_resp *tx_resp)
 943{
 944	struct agg_tx_status *frame_status = &tx_resp->status;
 945	int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
 946		IWLAGN_TX_RES_TID_POS;
 947	int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
 948		IWLAGN_TX_RES_RA_POS;
 949	struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
 950	u32 status = le16_to_cpu(tx_resp->status.status);
 951	int i;
 952
 953	WARN_ON(tid == IWL_TID_NON_QOS);
 954
 955	if (agg->wait_for_ba)
 956		IWL_DEBUG_TX_REPLY(priv,
 957			"got tx response w/o block-ack\n");
 958
 959	agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
 960	agg->wait_for_ba = (tx_resp->frame_count > 1);
 961
 962	/*
 963	 * If the BT kill count is non-zero, we'll get this
 964	 * notification again.
 965	 */
 966	if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
 967	    priv->lib->bt_params &&
 968	    priv->lib->bt_params->advanced_bt_coexist) {
 969		IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
 970	}
 971
 972	if (tx_resp->frame_count == 1)
 973		return;
 974
 975	IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
 976			   agg->txq_id,
 977			   le32_to_cpu(tx_resp->rate_n_flags),
 978			   iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
 979
 980	/* Construct bit-map of pending frames within Tx window */
 981	for (i = 0; i < tx_resp->frame_count; i++) {
 982		u16 fstatus = le16_to_cpu(frame_status[i].status);
 983		u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
 984
 985		if (status & AGG_TX_STATUS_MSK)
 986			iwlagn_count_agg_tx_err_status(priv, fstatus);
 987
 988		if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
 989			      AGG_TX_STATE_ABORT_MSK))
 990			continue;
 991
 992		if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
 993			IWL_DEBUG_TX_REPLY(priv,
 994					   "%d: status %s (0x%04x), try-count (0x%01x)\n",
 995					   i,
 996					   iwl_get_agg_tx_fail_reason(fstatus),
 997					   fstatus & AGG_TX_STATUS_MSK,
 998					   retry_cnt);
 999	}
1000}
1001
1002#ifdef CONFIG_IWLWIFI_DEBUG
1003#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
1004
1005const char *iwl_get_agg_tx_fail_reason(u16 status)
1006{
1007	status &= AGG_TX_STATUS_MSK;
1008	switch (status) {
1009	case AGG_TX_STATE_TRANSMITTED:
1010		return "SUCCESS";
1011		AGG_TX_STATE_FAIL(UNDERRUN_MSK);
1012		AGG_TX_STATE_FAIL(BT_PRIO_MSK);
1013		AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
1014		AGG_TX_STATE_FAIL(ABORT_MSK);
1015		AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
1016		AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
1017		AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
1018		AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
1019		AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
1020		AGG_TX_STATE_FAIL(RESPONSE_MSK);
1021		AGG_TX_STATE_FAIL(DUMP_TX_MSK);
1022		AGG_TX_STATE_FAIL(DELAY_TX_MSK);
1023	}
1024
1025	return "UNKNOWN";
1026}
1027#endif /* CONFIG_IWLWIFI_DEBUG */
1028
1029static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
1030{
1031	status &= TX_STATUS_MSK;
1032
1033	switch (status) {
1034	case TX_STATUS_POSTPONE_DELAY:
1035		priv->reply_tx_stats.pp_delay++;
1036		break;
1037	case TX_STATUS_POSTPONE_FEW_BYTES:
1038		priv->reply_tx_stats.pp_few_bytes++;
1039		break;
1040	case TX_STATUS_POSTPONE_BT_PRIO:
1041		priv->reply_tx_stats.pp_bt_prio++;
1042		break;
1043	case TX_STATUS_POSTPONE_QUIET_PERIOD:
1044		priv->reply_tx_stats.pp_quiet_period++;
1045		break;
1046	case TX_STATUS_POSTPONE_CALC_TTAK:
1047		priv->reply_tx_stats.pp_calc_ttak++;
1048		break;
1049	case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
1050		priv->reply_tx_stats.int_crossed_retry++;
1051		break;
1052	case TX_STATUS_FAIL_SHORT_LIMIT:
1053		priv->reply_tx_stats.short_limit++;
1054		break;
1055	case TX_STATUS_FAIL_LONG_LIMIT:
1056		priv->reply_tx_stats.long_limit++;
1057		break;
1058	case TX_STATUS_FAIL_FIFO_UNDERRUN:
1059		priv->reply_tx_stats.fifo_underrun++;
1060		break;
1061	case TX_STATUS_FAIL_DRAIN_FLOW:
1062		priv->reply_tx_stats.drain_flow++;
1063		break;
1064	case TX_STATUS_FAIL_RFKILL_FLUSH:
1065		priv->reply_tx_stats.rfkill_flush++;
1066		break;
1067	case TX_STATUS_FAIL_LIFE_EXPIRE:
1068		priv->reply_tx_stats.life_expire++;
1069		break;
1070	case TX_STATUS_FAIL_DEST_PS:
1071		priv->reply_tx_stats.dest_ps++;
1072		break;
1073	case TX_STATUS_FAIL_HOST_ABORTED:
1074		priv->reply_tx_stats.host_abort++;
1075		break;
1076	case TX_STATUS_FAIL_BT_RETRY:
1077		priv->reply_tx_stats.bt_retry++;
1078		break;
1079	case TX_STATUS_FAIL_STA_INVALID:
1080		priv->reply_tx_stats.sta_invalid++;
1081		break;
1082	case TX_STATUS_FAIL_FRAG_DROPPED:
1083		priv->reply_tx_stats.frag_drop++;
1084		break;
1085	case TX_STATUS_FAIL_TID_DISABLE:
1086		priv->reply_tx_stats.tid_disable++;
1087		break;
1088	case TX_STATUS_FAIL_FIFO_FLUSHED:
1089		priv->reply_tx_stats.fifo_flush++;
1090		break;
1091	case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
1092		priv->reply_tx_stats.insuff_cf_poll++;
1093		break;
1094	case TX_STATUS_FAIL_PASSIVE_NO_RX:
1095		priv->reply_tx_stats.fail_hw_drop++;
1096		break;
1097	case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
1098		priv->reply_tx_stats.sta_color_mismatch++;
1099		break;
1100	default:
1101		priv->reply_tx_stats.unknown++;
1102		break;
1103	}
1104}
1105
1106static void iwlagn_set_tx_status(struct iwl_priv *priv,
1107				 struct ieee80211_tx_info *info,
1108				 struct iwlagn_tx_resp *tx_resp)
1109{
1110	u16 status = le16_to_cpu(tx_resp->status.status);
1111
1112	info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1113
1114	info->status.rates[0].count = tx_resp->failure_frame + 1;
1115	info->flags |= iwl_tx_status_to_mac80211(status);
1116	iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
1117				    info);
1118	if (!iwl_is_tx_success(status))
1119		iwlagn_count_tx_err_status(priv, status);
1120}
1121
1122static void iwl_check_abort_status(struct iwl_priv *priv,
1123			    u8 frame_count, u32 status)
1124{
1125	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
1126		IWL_ERR(priv, "Tx flush command to flush out all frames\n");
1127		if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
1128			queue_work(priv->workqueue, &priv->tx_flush);
1129	}
1130}
1131
1132void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
1133{
1134	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1135	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1136	int txq_id = SEQ_TO_QUEUE(sequence);
1137	int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
1138	struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
1139	struct ieee80211_hdr *hdr;
1140	u32 status = le16_to_cpu(tx_resp->status.status);
1141	u16 ssn = iwlagn_get_scd_ssn(tx_resp);
1142	int tid;
1143	int sta_id;
1144	int freed;
1145	struct ieee80211_tx_info *info;
1146	struct sk_buff_head skbs;
1147	struct sk_buff *skb;
1148	struct iwl_rxon_context *ctx;
1149	bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1150
1151	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
1152		IWLAGN_TX_RES_TID_POS;
1153	sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
1154		IWLAGN_TX_RES_RA_POS;
1155
1156	spin_lock_bh(&priv->sta_lock);
1157
1158	if (is_agg) {
1159		WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
1160			     tid >= IWL_MAX_TID_COUNT);
1161		if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
1162			IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
1163				priv->tid_data[sta_id][tid].agg.txq_id);
1164		iwl_rx_reply_tx_agg(priv, tx_resp);
1165	}
1166
1167	__skb_queue_head_init(&skbs);
1168
1169	if (tx_resp->frame_count == 1) {
1170		u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1171		next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
1172
1173		if (is_agg) {
1174			/* If this is an aggregation queue, we can rely on the
1175			 * ssn since the wifi sequence number corresponds to
1176			 * the index in the TFD ring (%256).
1177			 * The seq_ctl is the sequence control of the packet
1178			 * to which this Tx response relates. But if there is a
1179			 * hole in the bitmap of the BA we received, this Tx
1180			 * response may allow to reclaim the hole and all the
1181			 * subsequent packets that were already acked.
1182			 * In that case, seq_ctl != ssn, and the next packet
1183			 * to be reclaimed will be ssn and not seq_ctl.
1184			 */
1185			next_reclaimed = ssn;
1186		}
1187
1188		if (tid != IWL_TID_NON_QOS) {
1189			priv->tid_data[sta_id][tid].next_reclaimed =
1190				next_reclaimed;
1191			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1192						  next_reclaimed);
 
1193		}
1194
1195		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1196
1197		iwlagn_check_ratid_empty(priv, sta_id, tid);
1198		freed = 0;
1199
1200		/* process frames */
1201		skb_queue_walk(&skbs, skb) {
1202			hdr = (struct ieee80211_hdr *)skb->data;
1203
1204			if (!ieee80211_is_data_qos(hdr->frame_control))
1205				priv->last_seq_ctl = tx_resp->seq_ctl;
1206
1207			info = IEEE80211_SKB_CB(skb);
1208			ctx = info->driver_data[0];
1209			iwl_trans_free_tx_cmd(priv->trans,
1210					      info->driver_data[1]);
1211
1212			memset(&info->status, 0, sizeof(info->status));
1213
1214			if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
1215			    ctx->vif &&
1216			    ctx->vif->type == NL80211_IFTYPE_STATION) {
1217				/* block and stop all queues */
1218				priv->passive_no_rx = true;
1219				IWL_DEBUG_TX_QUEUES(priv,
1220					"stop all queues: passive channel\n");
1221				ieee80211_stop_queues(priv->hw);
1222
1223				IWL_DEBUG_TX_REPLY(priv,
1224					   "TXQ %d status %s (0x%08x) "
1225					   "rate_n_flags 0x%x retries %d\n",
1226					   txq_id,
1227					   iwl_get_tx_fail_reason(status),
1228					   status,
1229					   le32_to_cpu(tx_resp->rate_n_flags),
1230					   tx_resp->failure_frame);
1231
1232				IWL_DEBUG_TX_REPLY(priv,
1233					   "FrameCnt = %d, idx=%d\n",
1234					   tx_resp->frame_count, cmd_index);
1235			}
1236
1237			/* check if BAR is needed */
1238			if (is_agg && !iwl_is_tx_success(status))
1239				info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1240			iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
1241				     tx_resp);
1242			if (!is_agg)
1243				iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1244
1245			freed++;
1246		}
1247
1248		if (tid != IWL_TID_NON_QOS) {
1249			priv->tid_data[sta_id][tid].next_reclaimed =
1250				next_reclaimed;
1251			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1252					   next_reclaimed);
1253		}
1254
1255		if (!is_agg && freed != 1)
1256			IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
1257
1258		IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
1259				   iwl_get_tx_fail_reason(status), status);
1260
1261		IWL_DEBUG_TX_REPLY(priv,
1262				   "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
1263				   le32_to_cpu(tx_resp->rate_n_flags),
1264				   tx_resp->failure_frame,
1265				   SEQ_TO_INDEX(sequence), ssn,
1266				   le16_to_cpu(tx_resp->seq_ctl));
1267	}
1268
1269	iwl_check_abort_status(priv, tx_resp->frame_count, status);
1270	spin_unlock_bh(&priv->sta_lock);
1271
1272	while (!skb_queue_empty(&skbs)) {
1273		skb = __skb_dequeue(&skbs);
1274		ieee80211_tx_status(priv->hw, skb);
1275	}
1276}
1277
1278/**
1279 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1280 *
1281 * Handles block-acknowledge notification from device, which reports success
1282 * of frames sent via aggregation.
1283 */
1284void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1285				   struct iwl_rx_cmd_buffer *rxb)
1286{
1287	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1288	struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
1289	struct iwl_ht_agg *agg;
1290	struct sk_buff_head reclaimed_skbs;
1291	struct sk_buff *skb;
1292	int sta_id;
1293	int tid;
1294	int freed;
1295
1296	/* "flow" corresponds to Tx queue */
1297	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1298
1299	/* "ssn" is start of block-ack Tx window, corresponds to index
1300	 * (in Tx queue's circular buffer) of first TFD/frame in window */
1301	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1302
1303	if (scd_flow >= priv->cfg->base_params->num_of_queues) {
1304		IWL_ERR(priv,
1305			"BUG_ON scd_flow is bigger than number of queues\n");
1306		return;
1307	}
1308
1309	sta_id = ba_resp->sta_id;
1310	tid = ba_resp->tid;
1311	agg = &priv->tid_data[sta_id][tid].agg;
1312
1313	spin_lock_bh(&priv->sta_lock);
1314
1315	if (unlikely(!agg->wait_for_ba)) {
1316		if (unlikely(ba_resp->bitmap))
1317			IWL_ERR(priv, "Received BA when not expected\n");
1318		spin_unlock_bh(&priv->sta_lock);
1319		return;
1320	}
1321
1322	if (unlikely(scd_flow != agg->txq_id)) {
1323		/*
1324		 * FIXME: this is a uCode bug which need to be addressed,
1325		 * log the information and return for now.
1326		 * Since it is can possibly happen very often and in order
1327		 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1328		 */
1329		IWL_DEBUG_TX_QUEUES(priv,
1330				    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1331				    scd_flow, sta_id, tid, agg->txq_id);
1332		spin_unlock_bh(&priv->sta_lock);
1333		return;
1334	}
1335
1336	__skb_queue_head_init(&reclaimed_skbs);
1337
1338	/* Release all TFDs before the SSN, i.e. all TFDs in front of
1339	 * block-ack window (we assume that they've been successfully
1340	 * transmitted ... if not, it's too late anyway). */
1341	iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
1342			  &reclaimed_skbs);
1343
1344	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1345			   "sta_id = %d\n",
1346			   agg->wait_for_ba,
1347			   (u8 *) &ba_resp->sta_addr_lo32,
1348			   ba_resp->sta_id);
1349	IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
1350			   "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1351			   ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
1352			   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1353			   scd_flow, ba_resp_scd_ssn, ba_resp->txed,
1354			   ba_resp->txed_2_done);
1355
1356	/* Mark that the expected block-ack response arrived */
1357	agg->wait_for_ba = false;
1358
1359	/* Sanity check values reported by uCode */
1360	if (ba_resp->txed_2_done > ba_resp->txed) {
1361		IWL_DEBUG_TX_REPLY(priv,
1362			"bogus sent(%d) and ack(%d) count\n",
1363			ba_resp->txed, ba_resp->txed_2_done);
1364		/*
1365		 * set txed_2_done = txed,
1366		 * so it won't impact rate scale
1367		 */
1368		ba_resp->txed = ba_resp->txed_2_done;
1369	}
1370
1371	priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
1372
1373	iwlagn_check_ratid_empty(priv, sta_id, tid);
1374	freed = 0;
1375
1376	skb_queue_walk(&reclaimed_skbs, skb) {
1377		struct ieee80211_hdr *hdr = (void *)skb->data;
1378		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1379
1380		if (ieee80211_is_data_qos(hdr->frame_control))
1381			freed++;
1382		else
1383			WARN_ON_ONCE(1);
1384
1385		iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
1386
1387		memset(&info->status, 0, sizeof(info->status));
1388		/* Packet was transmitted successfully, failures come as single
1389		 * frames because before failing a frame the firmware transmits
1390		 * it without aggregation at least once.
1391		 */
1392		info->flags |= IEEE80211_TX_STAT_ACK;
1393
1394		if (freed == 1) {
1395			/* this is the first skb we deliver in this batch */
1396			/* put the rate scaling data there */
1397			info = IEEE80211_SKB_CB(skb);
1398			memset(&info->status, 0, sizeof(info->status));
1399			info->flags |= IEEE80211_TX_STAT_AMPDU;
1400			info->status.ampdu_ack_len = ba_resp->txed_2_done;
1401			info->status.ampdu_len = ba_resp->txed;
1402			iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
1403						    info);
1404		}
1405	}
1406
1407	spin_unlock_bh(&priv->sta_lock);
1408
1409	while (!skb_queue_empty(&reclaimed_skbs)) {
1410		skb = __skb_dequeue(&reclaimed_skbs);
1411		ieee80211_tx_status(priv->hw, skb);
1412	}
1413}