Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
   4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
   5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
   6 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7#include <linux/ieee80211.h>
   8#include <linux/etherdevice.h>
   9#include <linux/tcp.h>
  10#include <net/gso.h>
  11#include <net/ip.h>
  12#include <net/ipv6.h>
  13
  14#include "iwl-trans.h"
  15#include "iwl-eeprom-parse.h"
  16#include "mvm.h"
  17#include "sta.h"
  18#include "time-sync.h"
  19
  20static void
  21iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
  22			  u16 tid, u16 ssn)
  23{
  24	struct iwl_fw_dbg_trigger_tlv *trig;
  25	struct iwl_fw_dbg_trigger_ba *ba_trig;
  26
  27	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
  28	if (!trig)
  29		return;
  30
  31	ba_trig = (void *)trig->data;
  32
  33	if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
  34		return;
  35
  36	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
  37				"BAR sent to %pM, tid %d, ssn %d",
  38				addr, tid, ssn);
  39}
  40
  41#define OPT_HDR(type, skb, off) \
  42	(type *)(skb_network_header(skb) + (off))
  43
  44static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
 
  45			   struct ieee80211_tx_info *info,
  46			   bool amsdu)
  47{
  48	struct ieee80211_hdr *hdr = (void *)skb->data;
  49	u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
  50	u16 offload_assist = 0;
  51#if IS_ENABLED(CONFIG_INET)
 
  52	u8 protocol = 0;
  53
  54	/* Do not compute checksum if already computed */
  55	if (skb->ip_summed != CHECKSUM_PARTIAL)
 
 
 
  56		goto out;
  57
  58	/* We do not expect to be requested to csum stuff we do not support */
  59	if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
  60		      (skb->protocol != htons(ETH_P_IP) &&
  61		       skb->protocol != htons(ETH_P_IPV6)),
  62		      "No support for requested checksum\n")) {
  63		skb_checksum_help(skb);
  64		goto out;
  65	}
  66
  67	if (skb->protocol == htons(ETH_P_IP)) {
  68		protocol = ip_hdr(skb)->protocol;
  69	} else {
  70#if IS_ENABLED(CONFIG_IPV6)
  71		struct ipv6hdr *ipv6h =
  72			(struct ipv6hdr *)skb_network_header(skb);
  73		unsigned int off = sizeof(*ipv6h);
  74
  75		protocol = ipv6h->nexthdr;
  76		while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
  77			struct ipv6_opt_hdr *hp;
  78
  79			/* only supported extension headers */
  80			if (protocol != NEXTHDR_ROUTING &&
  81			    protocol != NEXTHDR_HOP &&
  82			    protocol != NEXTHDR_DEST) {
  83				skb_checksum_help(skb);
  84				goto out;
  85			}
  86
  87			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
  88			protocol = hp->nexthdr;
  89			off += ipv6_optlen(hp);
  90		}
  91		/* if we get here - protocol now should be TCP/UDP */
  92#endif
  93	}
  94
  95	if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
  96		WARN_ON_ONCE(1);
  97		skb_checksum_help(skb);
  98		goto out;
  99	}
 100
 101	/* enable L4 csum */
 102	offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
 103
 104	/*
 105	 * Set offset to IP header (snap).
 106	 * We don't support tunneling so no need to take care of inner header.
 107	 * Size is in words.
 108	 */
 109	offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
 110
 111	/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
 112	if (skb->protocol == htons(ETH_P_IP) && amsdu) {
 
 113		ip_hdr(skb)->check = 0;
 114		offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
 115	}
 116
 117	/* reset UDP/TCP header csum */
 118	if (protocol == IPPROTO_TCP)
 119		tcp_hdr(skb)->check = 0;
 120	else
 121		udp_hdr(skb)->check = 0;
 122
 123out:
 124#endif
 125	/*
 126	 * mac header len should include IV, size is in words unless
 127	 * the IV is added by the firmware like in WEP.
 128	 * In new Tx API, the IV is always added by the firmware.
 129	 */
 130	if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
 131	    info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
 132	    info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
 133		mh_len += info->control.hw_key->iv_len;
 134	mh_len /= 2;
 135	offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
 136
 137	if (amsdu)
 138		offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
 139	else if (ieee80211_hdrlen(hdr->frame_control) % 4)
 140		/* padding is inserted later in transport */
 141		offload_assist |= BIT(TX_CMD_OFFLD_PAD);
 142
 143	return offload_assist;
 144}
 145
 146/*
 147 * Sets most of the Tx cmd's fields
 148 */
 149void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 150			struct iwl_tx_cmd *tx_cmd,
 151			struct ieee80211_tx_info *info, u8 sta_id)
 152{
 153	struct ieee80211_hdr *hdr = (void *)skb->data;
 154	__le16 fc = hdr->frame_control;
 155	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
 156	u32 len = skb->len + FCS_LEN;
 157	bool amsdu = false;
 158	u8 ac;
 159
 160	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
 161	    (ieee80211_is_probe_resp(fc) &&
 162	     !is_multicast_ether_addr(hdr->addr1)))
 163		tx_flags |= TX_CMD_FLG_ACK;
 164	else
 165		tx_flags &= ~TX_CMD_FLG_ACK;
 166
 167	if (ieee80211_is_probe_resp(fc))
 168		tx_flags |= TX_CMD_FLG_TSF;
 169
 170	if (ieee80211_has_morefrags(fc))
 171		tx_flags |= TX_CMD_FLG_MORE_FRAG;
 172
 173	if (ieee80211_is_data_qos(fc)) {
 174		u8 *qc = ieee80211_get_qos_ctl(hdr);
 175		tx_cmd->tid_tspec = qc[0] & 0xf;
 176		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
 177		amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
 
 178	} else if (ieee80211_is_back_req(fc)) {
 179		struct ieee80211_bar *bar = (void *)skb->data;
 180		u16 control = le16_to_cpu(bar->control);
 181		u16 ssn = le16_to_cpu(bar->start_seq_num);
 182
 183		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
 184		tx_cmd->tid_tspec = (control &
 185				     IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
 186			IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
 187		WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
 188		iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
 189					  ssn);
 190	} else {
 191		if (ieee80211_is_data(fc))
 192			tx_cmd->tid_tspec = IWL_TID_NON_QOS;
 193		else
 194			tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
 195
 196		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
 197			tx_flags |= TX_CMD_FLG_SEQ_CTL;
 198		else
 199			tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
 200	}
 201
 202	/* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
 203	if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
 204		ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
 205	else
 206		ac = tid_to_mac80211_ac[0];
 207
 208	tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
 209			TX_CMD_FLG_BT_PRIO_POS;
 210
 211	if (ieee80211_is_mgmt(fc)) {
 212		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
 213			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
 214		else if (ieee80211_is_action(fc))
 215			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 216		else
 217			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 218
 219		/* The spec allows Action frames in A-MPDU, we don't support
 220		 * it
 221		 */
 222		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
 223	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
 224		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 225	} else {
 226		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 227	}
 228
 229	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
 230	    !is_multicast_ether_addr(hdr->addr1))
 231		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
 232
 233	if (fw_has_capa(&mvm->fw->ucode_capa,
 234			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
 235	    ieee80211_action_contains_tpc(skb))
 236		tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
 237
 238	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
 239	/* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
 240	tx_cmd->len = cpu_to_le16((u16)skb->len);
 241	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
 242	tx_cmd->sta_id = sta_id;
 243
 244	tx_cmd->offload_assist =
 245		cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, info, amsdu));
 
 
 
 
 
 
 246}
 247
 248static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
 249			      struct ieee80211_tx_info *info,
 250			      struct ieee80211_sta *sta, __le16 fc)
 251{
 252	if (info->band == NL80211_BAND_2GHZ &&
 253	    !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
 254		return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
 255
 256	if (sta && ieee80211_is_data(fc)) {
 257		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 258
 259		return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
 260	}
 261
 262	return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
 263}
 264
 265static u32 iwl_mvm_convert_rate_idx(struct iwl_mvm *mvm,
 266				    struct ieee80211_tx_info *info,
 267				    int rate_idx)
 268{
 269	u32 rate_flags = 0;
 270	u8 rate_plcp;
 271	bool is_cck;
 272
 
 
 
 
 
 
 
 273	/* if the rate isn't a well known legacy rate, take the lowest one */
 274	if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
 275		rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm,
 276							    info,
 277							    info->control.vif);
 278
 279	/* Get PLCP rate for tx_cmd->rate_n_flags */
 280	rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx);
 281	is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) &&
 282		 (rate_idx <= IWL_LAST_CCK_RATE);
 283
 284	/* Set CCK or OFDM flag */
 285	if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
 286		if (!is_cck)
 287			rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
 288		else
 289			rate_flags |= RATE_MCS_CCK_MSK;
 290	} else if (is_cck) {
 291		rate_flags |= RATE_MCS_CCK_MSK_V1;
 292	}
 293
 294	return (u32)rate_plcp | rate_flags;
 295}
 296
 297static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm,
 298				      struct ieee80211_tx_info *info,
 299				      struct ieee80211_sta *sta,
 300				      __le16 fc)
 301{
 302	struct ieee80211_tx_rate *rate = &info->control.rates[0];
 303	u32 result;
 304
 305	/*
 306	 * we only care about legacy/HT/VHT so far, so we can
 307	 * build in v1 and use iwl_new_rate_from_v1()
 308	 */
 309
 310	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
 311		u8 mcs = ieee80211_rate_get_vht_mcs(rate);
 312		u8 nss = ieee80211_rate_get_vht_nss(rate);
 313
 314		result = RATE_MCS_VHT_MSK_V1;
 315		result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
 316		result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
 317		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
 318			result |= RATE_MCS_SGI_MSK_V1;
 319		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
 320			result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
 321		else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
 322			result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
 323		else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
 324			result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
 325
 326		if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
 327			result = iwl_new_rate_from_v1(result);
 328	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
 329		result = RATE_MCS_HT_MSK_V1;
 330		result |= u32_encode_bits(rate->idx,
 331					  RATE_HT_MCS_RATE_CODE_MSK_V1 |
 332					  RATE_HT_MCS_NSS_MSK_V1);
 333		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
 334			result |= RATE_MCS_SGI_MSK_V1;
 335		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
 336			result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
 337		if (info->flags & IEEE80211_TX_CTL_LDPC)
 338			result |= RATE_MCS_LDPC_MSK_V1;
 339		if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
 340			result |= RATE_MCS_STBC_MSK;
 341
 342		if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
 343			result = iwl_new_rate_from_v1(result);
 344	} else {
 345		int rate_idx = info->control.rates[0].idx;
 346
 347		result = iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
 348	}
 349
 350	if (info->control.antennas)
 351		result |= u32_encode_bits(info->control.antennas,
 352					  RATE_MCS_ANT_AB_MSK);
 353	else
 354		result |= iwl_mvm_get_tx_ant(mvm, info, sta, fc);
 355
 356	return result;
 357}
 358
 359static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
 360			       struct ieee80211_tx_info *info,
 361			       struct ieee80211_sta *sta, __le16 fc)
 362{
 363	int rate_idx = -1;
 364
 365	if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
 366		/* info->control is only relevant for non HW rate control */
 367
 368		/* HT rate doesn't make sense for a non data frame */
 369		WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
 370			  !ieee80211_is_data(fc),
 371			  "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
 372			  info->control.rates[0].flags,
 373			  info->control.rates[0].idx,
 374			  le16_to_cpu(fc),
 375			  sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
 376
 377		rate_idx = info->control.rates[0].idx;
 378
 379		/* For non 2 GHZ band, remap mac80211 rate indices into driver
 380		 * indices.
 381		 */
 382		if (info->band != NL80211_BAND_2GHZ ||
 383		    (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
 384			rate_idx += IWL_FIRST_OFDM_RATE;
 385
 386		/* For 2.4 GHZ band, check that there is no need to remap */
 387		BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
 388	}
 389
 390	return iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
 391}
 392
 393static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
 394				       struct ieee80211_tx_info *info,
 395				       struct ieee80211_sta *sta, __le16 fc)
 396{
 397	if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
 398		return iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc);
 399
 400	return iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
 401		iwl_mvm_get_tx_ant(mvm, info, sta, fc);
 402}
 403
 404/*
 405 * Sets the fields in the Tx cmd that are rate related
 406 */
 407void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 408			    struct ieee80211_tx_info *info,
 409			    struct ieee80211_sta *sta, __le16 fc)
 410{
 411	/* Set retry limit on RTS packets */
 412	tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
 413
 414	/* Set retry limit on DATA packets and Probe Responses*/
 415	if (ieee80211_is_probe_resp(fc)) {
 416		tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
 417		tx_cmd->rts_retry_limit =
 418			min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
 419	} else if (ieee80211_is_back_req(fc)) {
 420		tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
 421	} else {
 422		tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
 423	}
 424
 425	/*
 426	 * for data packets, rate info comes from the table inside the fw. This
 427	 * table is controlled by LINK_QUALITY commands
 428	 */
 429
 430	if (likely(ieee80211_is_data(fc) && sta &&
 431		   !(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))) {
 432		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 433
 434		if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
 435			tx_cmd->initial_rate_index = 0;
 436			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
 437			return;
 438		}
 439	} else if (ieee80211_is_back_req(fc)) {
 440		tx_cmd->tx_flags |=
 441			cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
 442	}
 443
 444	/* Set the rate in the TX cmd */
 445	tx_cmd->rate_n_flags =
 446		cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
 447}
 448
 449static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
 450					 u8 *crypto_hdr)
 451{
 452	struct ieee80211_key_conf *keyconf = info->control.hw_key;
 453	u64 pn;
 454
 455	pn = atomic64_inc_return(&keyconf->tx_pn);
 456	crypto_hdr[0] = pn;
 457	crypto_hdr[2] = 0;
 458	crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
 459	crypto_hdr[1] = pn >> 8;
 460	crypto_hdr[4] = pn >> 16;
 461	crypto_hdr[5] = pn >> 24;
 462	crypto_hdr[6] = pn >> 32;
 463	crypto_hdr[7] = pn >> 40;
 464}
 465
 466/*
 467 * Sets the fields in the Tx cmd that are crypto related
 468 */
 469static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
 470				      struct ieee80211_tx_info *info,
 471				      struct iwl_tx_cmd *tx_cmd,
 472				      struct sk_buff *skb_frag,
 473				      int hdrlen)
 474{
 475	struct ieee80211_key_conf *keyconf = info->control.hw_key;
 476	u8 *crypto_hdr = skb_frag->data + hdrlen;
 477	enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
 478	u64 pn;
 479
 480	switch (keyconf->cipher) {
 481	case WLAN_CIPHER_SUITE_CCMP:
 482		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
 483		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
 484		break;
 485
 486	case WLAN_CIPHER_SUITE_TKIP:
 487		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
 488		pn = atomic64_inc_return(&keyconf->tx_pn);
 489		ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
 490		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
 491		break;
 492
 493	case WLAN_CIPHER_SUITE_WEP104:
 494		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
 495		fallthrough;
 496	case WLAN_CIPHER_SUITE_WEP40:
 497		tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
 498			((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
 499			  TX_CMD_SEC_WEP_KEY_IDX_MSK);
 500
 501		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
 502		break;
 503	case WLAN_CIPHER_SUITE_GCMP:
 504	case WLAN_CIPHER_SUITE_GCMP_256:
 505		type = TX_CMD_SEC_GCMP;
 506		fallthrough;
 507	case WLAN_CIPHER_SUITE_CCMP_256:
 508		/* TODO: Taking the key from the table might introduce a race
 509		 * when PTK rekeying is done, having an old packets with a PN
 510		 * based on the old key but the message encrypted with a new
 511		 * one.
 512		 * Need to handle this.
 513		 */
 514		tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
 515		tx_cmd->key[0] = keyconf->hw_key_idx;
 516		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
 517		break;
 518	default:
 519		tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
 520	}
 521}
 522
 523static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
 524			     const u8 *addr3_override)
 525{
 526	struct ieee80211_hdr *out_hdr = cmd;
 527
 528	memcpy(cmd, hdr, hdrlen);
 529	if (addr3_override)
 530		memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
 531}
 532
 533/*
 534 * Allocates and sets the Tx cmd the driver data pointers in the skb
 535 */
 536static struct iwl_device_tx_cmd *
 537iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 538		      struct ieee80211_tx_info *info, int hdrlen,
 539		      struct ieee80211_sta *sta, u8 sta_id,
 540		      const u8 *addr3_override)
 541{
 542	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 543	struct iwl_device_tx_cmd *dev_cmd;
 544	struct iwl_tx_cmd *tx_cmd;
 545
 546	dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
 547
 548	if (unlikely(!dev_cmd))
 549		return NULL;
 550
 
 
 
 
 
 551	dev_cmd->hdr.cmd = TX_CMD;
 552
 553	if (iwl_mvm_has_new_tx_api(mvm)) {
 
 554		u32 rate_n_flags = 0;
 555		u16 flags = 0;
 556		struct iwl_mvm_sta *mvmsta = sta ?
 557			iwl_mvm_sta_from_mac80211(sta) : NULL;
 558		bool amsdu = false;
 559
 560		if (ieee80211_is_data_qos(hdr->frame_control)) {
 561			u8 *qc = ieee80211_get_qos_ctl(hdr);
 562
 563			amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
 
 564		}
 565
 
 
 
 
 
 
 
 
 566		if (!info->control.hw_key)
 567			flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
 568
 569		/*
 570		 * For data and mgmt packets rate info comes from the fw. Only
 571		 * set rate/antenna for injected frames with fixed rate, or
 572		 * when no sta is given.
 573		 */
 574		if (unlikely(!sta ||
 575			     info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
 576			flags |= IWL_TX_FLAGS_CMD_RATE;
 577			rate_n_flags =
 578				iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
 579							    hdr->frame_control);
 580		} else if (!ieee80211_is_data(hdr->frame_control) ||
 581			   mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
 582			/* These are important frames */
 583			flags |= IWL_TX_FLAGS_HIGH_PRI;
 584		}
 585
 586		if (mvm->trans->trans_cfg->device_family >=
 587		    IWL_DEVICE_FAMILY_AX210) {
 588			struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
 589			u32 offload_assist = iwl_mvm_tx_csum(mvm, skb,
 590							     info, amsdu);
 591
 592			cmd->offload_assist = cpu_to_le32(offload_assist);
 593
 594			/* Total # bytes to be transmitted */
 595			cmd->len = cpu_to_le16((u16)skb->len);
 596
 597			/* Copy MAC header from skb into command buffer */
 598			iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
 599
 600			cmd->flags = cpu_to_le16(flags);
 601			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
 602		} else {
 603			struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
 604			u16 offload_assist = iwl_mvm_tx_csum(mvm, skb,
 605							     info, amsdu);
 606
 607			cmd->offload_assist = cpu_to_le16(offload_assist);
 608
 609			/* Total # bytes to be transmitted */
 610			cmd->len = cpu_to_le16((u16)skb->len);
 611
 612			/* Copy MAC header from skb into command buffer */
 613			iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
 614
 615			cmd->flags = cpu_to_le32(flags);
 616			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
 617		}
 618		goto out;
 619	}
 620
 621	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 622
 623	if (info->control.hw_key)
 624		iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
 625
 626	iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
 627
 628	iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
 629
 630	/* Copy MAC header from skb into command buffer */
 631	iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
 632
 633out:
 634	return dev_cmd;
 635}
 636
 637static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
 638				       struct iwl_device_tx_cmd *cmd)
 639{
 640	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 641
 642	memset(&skb_info->status, 0, sizeof(skb_info->status));
 643	memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
 644
 645	skb_info->driver_data[1] = cmd;
 646}
 647
 648static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
 649				      struct iwl_mvm_vif_link_info *link,
 650				      struct ieee80211_tx_info *info,
 651				      struct sk_buff *skb)
 652{
 653	struct ieee80211_hdr *hdr = (void *)skb->data;
 
 654	__le16 fc = hdr->frame_control;
 655
 656	switch (info->control.vif->type) {
 657	case NL80211_IFTYPE_AP:
 658	case NL80211_IFTYPE_ADHOC:
 659		/*
 660		 * Non-bufferable frames use the broadcast station, thus they
 661		 * use the probe queue.
 662		 * Also take care of the case where we send a deauth to a
 663		 * station that we don't have, or similarly an association
 664		 * response (with non-success status) for a station we can't
 665		 * accept.
 666		 * Also, disassociate frames might happen, particular with
 667		 * reason 7 ("Class 3 frame received from nonassociated STA").
 668		 */
 669		if (ieee80211_is_mgmt(fc) &&
 670		    (!ieee80211_is_bufferable_mmpdu(skb) ||
 671		     ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
 672			return link->mgmt_queue;
 673
 674		if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
 675		    is_multicast_ether_addr(hdr->addr1))
 676			return link->cab_queue;
 677
 678		WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
 679			  "fc=0x%02x", le16_to_cpu(fc));
 680		return link->mgmt_queue;
 681	case NL80211_IFTYPE_P2P_DEVICE:
 682		if (ieee80211_is_mgmt(fc))
 683			return mvm->p2p_dev_queue;
 684
 685		WARN_ON_ONCE(1);
 686		return mvm->p2p_dev_queue;
 687	default:
 688		WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
 689		return -1;
 690	}
 691}
 692
 693static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
 694				       struct sk_buff *skb)
 695{
 696	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 697	struct iwl_mvm_vif *mvmvif =
 698		iwl_mvm_vif_from_mac80211(info->control.vif);
 699	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
 700	int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
 701	struct iwl_probe_resp_data *resp_data;
 702	const u8 *ie;
 703	u8 *pos;
 704	u8 match[] = {
 705		(WLAN_OUI_WFA >> 16) & 0xff,
 706		(WLAN_OUI_WFA >> 8) & 0xff,
 707		WLAN_OUI_WFA & 0xff,
 708		WLAN_OUI_TYPE_WFA_P2P,
 709	};
 710
 711	rcu_read_lock();
 712
 713	resp_data = rcu_dereference(mvmvif->deflink.probe_resp_data);
 714	if (!resp_data)
 715		goto out;
 716
 717	if (!resp_data->notif.noa_active)
 718		goto out;
 719
 720	ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
 721				    mgmt->u.probe_resp.variable,
 722				    skb->len - base_len,
 723				    match, 4, 2);
 724	if (!ie) {
 725		IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
 726		goto out;
 727	}
 728
 729	if (skb_tailroom(skb) < resp_data->noa_len) {
 730		if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
 731			IWL_ERR(mvm,
 732				"Failed to reallocate probe resp\n");
 733			goto out;
 734		}
 735	}
 736
 737	pos = skb_put(skb, resp_data->noa_len);
 738
 739	*pos++ = WLAN_EID_VENDOR_SPECIFIC;
 740	/* Set length of IE body (not including ID and length itself) */
 741	*pos++ = resp_data->noa_len - 2;
 742	*pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
 743	*pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
 744	*pos++ = WLAN_OUI_WFA & 0xff;
 745	*pos++ = WLAN_OUI_TYPE_WFA_P2P;
 746
 747	memcpy(pos, &resp_data->notif.noa_attr,
 748	       resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
 749
 750out:
 751	rcu_read_unlock();
 752}
 753
 754int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 755{
 756	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 757	struct ieee80211_tx_info info;
 758	struct iwl_device_tx_cmd *dev_cmd;
 759	u8 sta_id;
 760	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 761	__le16 fc = hdr->frame_control;
 762	bool offchannel = IEEE80211_SKB_CB(skb)->flags &
 763		IEEE80211_TX_CTL_TX_OFFCHAN;
 764	int queue = -1;
 765
 766	if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
 767		return -1;
 768
 769	memcpy(&info, skb->cb, sizeof(info));
 770
 771	if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
 772		return -1;
 773
 774	if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
 775		return -1;
 776
 777	if (info.control.vif) {
 778		struct iwl_mvm_vif *mvmvif =
 779			iwl_mvm_vif_from_mac80211(info.control.vif);
 780
 781		if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
 782		    info.control.vif->type == NL80211_IFTYPE_AP ||
 783		    info.control.vif->type == NL80211_IFTYPE_ADHOC) {
 784			u32 link_id = u32_get_bits(info.control.flags,
 785						   IEEE80211_TX_CTRL_MLO_LINK);
 786			struct iwl_mvm_vif_link_info *link;
 787
 788			if (link_id == IEEE80211_LINK_UNSPECIFIED) {
 789				if (info.control.vif->active_links)
 790					link_id = ffs(info.control.vif->active_links) - 1;
 791				else
 792					link_id = 0;
 793			}
 794
 795			link = mvmvif->link[link_id];
 796			if (WARN_ON(!link))
 797				return -1;
 798
 799			if (!ieee80211_is_data(hdr->frame_control))
 800				sta_id = link->bcast_sta.sta_id;
 801			else
 802				sta_id = link->mcast_sta.sta_id;
 803
 804			queue = iwl_mvm_get_ctrl_vif_queue(mvm, link, &info,
 805							   skb);
 806		} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
 807			queue = mvm->snif_queue;
 808			sta_id = mvm->snif_sta.sta_id;
 809		} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
 810			   offchannel) {
 811			/*
 812			 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
 813			 * that can be used in 2 different types of vifs, P2P &
 814			 * STATION.
 815			 * P2P uses the offchannel queue.
 816			 * STATION (HS2.0) uses the auxiliary context of the FW,
 817			 * and hence needs to be sent on the aux queue.
 818			 */
 819			sta_id = mvm->aux_sta.sta_id;
 820			queue = mvm->aux_queue;
 821		}
 822	}
 823
 824	if (queue < 0) {
 825		IWL_ERR(mvm, "No queue was found. Dropping TX\n");
 826		return -1;
 827	}
 828
 829	if (unlikely(ieee80211_is_probe_resp(fc)))
 830		iwl_mvm_probe_resp_set_noa(mvm, skb);
 831
 832	IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
 833
 834	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id,
 835					NULL);
 836	if (!dev_cmd)
 837		return -1;
 838
 839	/* From now on, we cannot access info->control */
 840	iwl_mvm_skb_prepare_status(skb, dev_cmd);
 841
 842	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
 843		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
 844		return -1;
 845	}
 846
 847	return 0;
 848}
 849
 850unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
 851				    struct ieee80211_sta *sta, unsigned int tid)
 852{
 853	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
 854	u8 ac = tid_to_mac80211_ac[tid];
 855	enum nl80211_band band;
 856	unsigned int txf;
 857	unsigned int val;
 858	int lmac;
 
 
 
 859
 860	/* For HE redirect to trigger based fifos */
 861	if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
 862		ac += 4;
 863
 864	txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
 865
 866	/*
 867	 * Don't send an AMSDU that will be longer than the TXF.
 868	 * Add a security margin of 256 for the TX command + headers.
 869	 * We also want to have the start of the next packet inside the
 870	 * fifo to be able to send bursts.
 871	 */
 872	val = mvmsta->max_amsdu_len;
 873
 874	if (hweight16(sta->valid_links) <= 1) {
 875		if (sta->valid_links) {
 876			struct ieee80211_bss_conf *link_conf;
 877			unsigned int link = ffs(sta->valid_links) - 1;
 878
 879			rcu_read_lock();
 880			link_conf = rcu_dereference(mvmsta->vif->link_conf[link]);
 881			if (WARN_ON(!link_conf))
 882				band = NL80211_BAND_2GHZ;
 883			else
 884				band = link_conf->chandef.chan->band;
 885			rcu_read_unlock();
 886		} else {
 887			band = mvmsta->vif->bss_conf.chandef.chan->band;
 888		}
 889
 890		lmac = iwl_mvm_get_lmac_id(mvm, band);
 891	} else if (fw_has_capa(&mvm->fw->ucode_capa,
 892			       IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
 893		/* for real MLO restrict to both LMACs if they exist */
 894		lmac = IWL_LMAC_5G_INDEX;
 895		val = min_t(unsigned int, val,
 896			    mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
 897		lmac = IWL_LMAC_24G_INDEX;
 898	} else {
 899		lmac = IWL_LMAC_24G_INDEX;
 900	}
 901
 902	return min_t(unsigned int, val,
 903		     mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
 904}
 905
 906#ifdef CONFIG_INET
 907
 908static int
 909iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
 910		       netdev_features_t netdev_flags,
 911		       struct sk_buff_head *mpdus_skb)
 912{
 913	struct sk_buff *tmp, *next;
 914	struct ieee80211_hdr *hdr = (void *)skb->data;
 915	char cb[sizeof(skb->cb)];
 916	u16 i = 0;
 917	unsigned int tcp_payload_len;
 918	unsigned int mss = skb_shinfo(skb)->gso_size;
 919	bool ipv4 = (skb->protocol == htons(ETH_P_IP));
 920	bool qos = ieee80211_is_data_qos(hdr->frame_control);
 921	u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
 922
 923	skb_shinfo(skb)->gso_size = num_subframes * mss;
 924	memcpy(cb, skb->cb, sizeof(cb));
 925
 926	next = skb_gso_segment(skb, netdev_flags);
 927	skb_shinfo(skb)->gso_size = mss;
 928	skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
 929	if (WARN_ON_ONCE(IS_ERR(next)))
 930		return -EINVAL;
 931	else if (next)
 932		consume_skb(skb);
 933
 934	skb_list_walk_safe(next, tmp, next) {
 
 
 
 935		memcpy(tmp->cb, cb, sizeof(tmp->cb));
 936		/*
 937		 * Compute the length of all the data added for the A-MSDU.
 938		 * This will be used to compute the length to write in the TX
 939		 * command. We have: SNAP + IP + TCP for n -1 subframes and
 940		 * ETH header for n subframes.
 941		 */
 942		tcp_payload_len = skb_tail_pointer(tmp) -
 943			skb_transport_header(tmp) -
 944			tcp_hdrlen(tmp) + tmp->data_len;
 945
 946		if (ipv4)
 947			ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
 948
 949		if (tcp_payload_len > mss) {
 950			skb_shinfo(tmp)->gso_size = mss;
 951			skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
 952							   SKB_GSO_TCPV6;
 953		} else {
 954			if (qos) {
 955				u8 *qc;
 956
 957				if (ipv4)
 958					ip_send_check(ip_hdr(tmp));
 959
 960				qc = ieee80211_get_qos_ctl((void *)tmp->data);
 961				*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
 962			}
 963			skb_shinfo(tmp)->gso_size = 0;
 964		}
 965
 966		skb_mark_not_on_list(tmp);
 
 
 967		__skb_queue_tail(mpdus_skb, tmp);
 968		i++;
 969	}
 970
 971	return 0;
 972}
 973
 974static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
 975			  struct ieee80211_tx_info *info,
 976			  struct ieee80211_sta *sta,
 977			  struct sk_buff_head *mpdus_skb)
 978{
 979	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 980	struct ieee80211_hdr *hdr = (void *)skb->data;
 981	unsigned int mss = skb_shinfo(skb)->gso_size;
 982	unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
 983	u16 snap_ip_tcp, pad;
 984	netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
 985	u8 tid;
 986
 987	snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
 988		tcp_hdrlen(skb);
 989
 990	if (!mvmsta->max_amsdu_len ||
 991	    !ieee80211_is_data_qos(hdr->frame_control) ||
 992	    !mvmsta->amsdu_enabled)
 993		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
 994
 995	/*
 996	 * Do not build AMSDU for IPv6 with extension headers.
 997	 * ask stack to segment and checkum the generated MPDUs for us.
 998	 */
 999	if (skb->protocol == htons(ETH_P_IPV6) &&
1000	    ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
1001	    IPPROTO_TCP) {
1002		netdev_flags &= ~NETIF_F_CSUM_MASK;
1003		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
1004	}
1005
1006	tid = ieee80211_get_tid(hdr);
1007	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1008		return -EINVAL;
1009
1010	/*
1011	 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
1012	 * during an BA session.
1013	 */
1014	if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
1015	     !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
 
 
 
1016	    !(mvmsta->amsdu_enabled & BIT(tid)))
1017		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
1018
1019	/*
1020	 * Take the min of ieee80211 station and mvm station
1021	 */
1022	max_amsdu_len =
1023		min_t(unsigned int, sta->cur->max_amsdu_len,
1024		      iwl_mvm_max_amsdu_size(mvm, sta, tid));
1025
1026	/*
1027	 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
1028	 * supported. This is a spec requirement (IEEE 802.11-2015
1029	 * section 8.7.3 NOTE 3).
1030	 */
1031	if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1032	    !sta->deflink.vht_cap.vht_supported)
1033		max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
1034
1035	/* Sub frame header + SNAP + IP header + TCP header + MSS */
1036	subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
1037	pad = (4 - subf_len) & 0x3;
1038
1039	/*
1040	 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
1041	 * N * subf_len + (N - 1) * pad.
1042	 */
1043	num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
1044
1045	if (sta->max_amsdu_subframes &&
1046	    num_subframes > sta->max_amsdu_subframes)
1047		num_subframes = sta->max_amsdu_subframes;
1048
1049	tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1050		tcp_hdrlen(skb) + skb->data_len;
1051
1052	/*
1053	 * Make sure we have enough TBs for the A-MSDU:
1054	 *	2 for each subframe
1055	 *	1 more for each fragment
1056	 *	1 more for the potential data in the header
1057	 */
1058	if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
1059	    mvm->trans->max_skb_frags)
1060		num_subframes = 1;
1061
1062	if (num_subframes > 1)
1063		*ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1064
1065	/* This skb fits in one single A-MSDU */
1066	if (num_subframes * mss >= tcp_payload_len) {
1067		__skb_queue_tail(mpdus_skb, skb);
1068		return 0;
1069	}
1070
1071	/*
1072	 * Trick the segmentation function to make it
1073	 * create SKBs that can fit into one A-MSDU.
1074	 */
1075	return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
1076				      mpdus_skb);
1077}
1078#else /* CONFIG_INET */
1079static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
1080			  struct ieee80211_tx_info *info,
1081			  struct ieee80211_sta *sta,
1082			  struct sk_buff_head *mpdus_skb)
1083{
1084	/* Impossible to get TSO with CONFIG_INET */
1085	WARN_ON(1);
1086
1087	return -1;
1088}
1089#endif
1090
1091/* Check if there are any timed-out TIDs on a given shared TXQ */
1092static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
1093{
1094	unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
1095	unsigned long now = jiffies;
1096	int tid;
1097
1098	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1099		return false;
1100
1101	for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1102		if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
1103				IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1104			return true;
1105	}
1106
1107	return false;
1108}
1109
1110static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
1111			       struct iwl_mvm_sta *mvmsta,
1112			       int airtime)
1113{
1114	int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1115	struct iwl_mvm_tcm_mac *mdata;
1116
1117	if (mac >= NUM_MAC_INDEX_DRIVER)
1118		return;
1119
1120	mdata = &mvm->tcm.data[mac];
1121
1122	if (mvm->tcm.paused)
1123		return;
1124
1125	if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
1126		schedule_delayed_work(&mvm->tcm.work, 0);
1127
1128	mdata->tx.airtime += airtime;
1129}
1130
1131static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
1132				 struct iwl_mvm_sta *mvmsta, int tid)
1133{
1134	u32 ac = tid_to_mac80211_ac[tid];
1135	int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1136	struct iwl_mvm_tcm_mac *mdata;
1137
1138	if (mac >= NUM_MAC_INDEX_DRIVER)
1139		return -EINVAL;
1140
1141	mdata = &mvm->tcm.data[mac];
1142
1143	mdata->tx.pkts[ac]++;
1144
1145	return 0;
1146}
1147
1148/*
1149 * Sets the fields in the Tx cmd that are crypto related.
1150 *
1151 * This function must be called with BHs disabled.
1152 */
1153static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
1154			   struct ieee80211_tx_info *info,
1155			   struct ieee80211_sta *sta,
1156			   const u8 *addr3_override)
1157{
1158	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1159	struct iwl_mvm_sta *mvmsta;
1160	struct iwl_device_tx_cmd *dev_cmd;
1161	__le16 fc;
1162	u16 seq_number = 0;
1163	u8 tid = IWL_MAX_TID_COUNT;
1164	u16 txq_id;
1165	bool is_ampdu = false;
1166	int hdrlen;
1167
1168	mvmsta = iwl_mvm_sta_from_mac80211(sta);
1169	fc = hdr->frame_control;
1170	hdrlen = ieee80211_hdrlen(fc);
1171
1172	if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
1173		return -1;
1174
1175	if (WARN_ON_ONCE(!mvmsta))
1176		return -1;
1177
1178	if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
1179		return -1;
1180
1181	if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he)
1182		return -1;
1183
1184	if (unlikely(ieee80211_is_probe_resp(fc)))
1185		iwl_mvm_probe_resp_set_noa(mvm, skb);
1186
1187	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
1188					sta, mvmsta->deflink.sta_id,
1189					addr3_override);
1190	if (!dev_cmd)
1191		goto drop;
1192
1193	/*
1194	 * we handle that entirely ourselves -- for uAPSD the firmware
1195	 * will always send a notification, and for PS-Poll responses
1196	 * we'll notify mac80211 when getting frame status
1197	 */
1198	info->flags &= ~IEEE80211_TX_STATUS_EOSP;
1199
1200	spin_lock(&mvmsta->lock);
1201
1202	/* nullfunc frames should go to the MGMT queue regardless of QOS,
1203	 * the conditions of !ieee80211_is_qos_nullfunc(fc) and
1204	 * !ieee80211_is_data_qos(fc) keep the default assignment of MGMT TID
1205	 */
1206	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
1207		tid = ieee80211_get_tid(hdr);
1208		if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid))
1209			goto drop_unlock_sta;
1210
1211		is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
1212		if (WARN_ONCE(is_ampdu &&
1213			      mvmsta->tid_data[tid].state != IWL_AGG_ON,
1214			      "Invalid internal agg state %d for TID %d",
1215			       mvmsta->tid_data[tid].state, tid))
1216			goto drop_unlock_sta;
1217
1218		seq_number = mvmsta->tid_data[tid].seq_number;
1219		seq_number &= IEEE80211_SCTL_SEQ;
1220
1221		if (!iwl_mvm_has_new_tx_api(mvm)) {
1222			struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1223
1224			hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1225			hdr->seq_ctrl |= cpu_to_le16(seq_number);
1226			/* update the tx_cmd hdr as it was already copied */
1227			tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
1228		}
1229	} else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc) &&
1230		   !ieee80211_is_nullfunc(fc)) {
1231		tid = IWL_TID_NON_QOS;
1232	}
1233
1234	txq_id = mvmsta->tid_data[tid].txq_id;
1235
1236	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1237
1238	if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
1239		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1240		spin_unlock(&mvmsta->lock);
1241		return -1;
1242	}
1243
1244	if (!iwl_mvm_has_new_tx_api(mvm)) {
1245		/* Keep track of the time of the last frame for this RA/TID */
1246		mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
1247
1248		/*
1249		 * If we have timed-out TIDs - schedule the worker that will
1250		 * reconfig the queues and update them
1251		 *
1252		 * Note that the no lock is taken here in order to not serialize
1253		 * the TX flow. This isn't dangerous because scheduling
1254		 * mvm->add_stream_wk can't ruin the state, and if we DON'T
1255		 * schedule it due to some race condition then next TX we get
1256		 * here we will.
1257		 */
1258		if (unlikely(mvm->queue_info[txq_id].status ==
1259			     IWL_MVM_QUEUE_SHARED &&
1260			     iwl_mvm_txq_should_update(mvm, txq_id)))
1261			schedule_work(&mvm->add_stream_wk);
1262	}
1263
1264	IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n",
1265		     mvmsta->deflink.sta_id, tid, txq_id,
1266		     IEEE80211_SEQ_TO_SN(seq_number), skb->len);
1267
1268	/* From now on, we cannot access info->control */
1269	iwl_mvm_skb_prepare_status(skb, dev_cmd);
1270
1271	/*
1272	 * The IV is introduced by the HW for new tx api, and it is not present
1273	 * in the skb, hence, don't tell iwl_mvm_mei_tx_copy_to_csme about the
1274	 * IV for those devices.
1275	 */
1276	if (ieee80211_is_data(fc))
1277		iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
1278					    info->control.hw_key &&
1279					    !iwl_mvm_has_new_tx_api(mvm) ?
1280					    info->control.hw_key->iv_len : 0);
1281
1282	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
1283		goto drop_unlock_sta;
1284
1285	if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
1286		mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
1287
1288	spin_unlock(&mvmsta->lock);
1289
1290	if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
1291				  tid == IWL_MAX_TID_COUNT ? 0 : tid))
1292		goto drop;
1293
1294	return 0;
1295
1296drop_unlock_sta:
1297	iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1298	spin_unlock(&mvmsta->lock);
1299drop:
1300	IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->deflink.sta_id,
1301		     tid);
1302	return -1;
1303}
1304
1305int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
1306		       struct ieee80211_sta *sta)
1307{
1308	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1309	struct ieee80211_tx_info info;
1310	struct sk_buff_head mpdus_skbs;
1311	struct ieee80211_vif *vif;
1312	unsigned int payload_len;
1313	int ret;
1314	struct sk_buff *orig_skb = skb;
1315	const u8 *addr3;
1316
1317	if (WARN_ON_ONCE(!mvmsta))
1318		return -1;
1319
1320	if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
1321		return -1;
1322
1323	memcpy(&info, skb->cb, sizeof(info));
1324
1325	if (!skb_is_gso(skb))
1326		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
1327
1328	payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1329		tcp_hdrlen(skb) + skb->data_len;
1330
1331	if (payload_len <= skb_shinfo(skb)->gso_size)
1332		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
1333
1334	__skb_queue_head_init(&mpdus_skbs);
1335
1336	vif = info.control.vif;
1337	if (!vif)
1338		return -1;
1339
1340	ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
1341	if (ret)
1342		return ret;
1343
1344	WARN_ON(skb_queue_empty(&mpdus_skbs));
1345
1346	/*
1347	 * As described in IEEE sta 802.11-2020, table 9-30 (Address
1348	 * field contents), A-MSDU address 3 should contain the BSSID
1349	 * address.
1350	 * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it
1351	 * in the command header. We need to preserve the original
1352	 * address 3 in the skb header to correctly create all the
1353	 * A-MSDU subframe headers from it.
1354	 */
1355	switch (vif->type) {
1356	case NL80211_IFTYPE_STATION:
1357		addr3 = vif->cfg.ap_addr;
1358		break;
1359	case NL80211_IFTYPE_AP:
1360		addr3 = vif->addr;
1361		break;
1362	default:
1363		addr3 = NULL;
1364		break;
1365	}
1366
1367	while (!skb_queue_empty(&mpdus_skbs)) {
1368		struct ieee80211_hdr *hdr;
1369		bool amsdu;
1370
1371		skb = __skb_dequeue(&mpdus_skbs);
1372		hdr = (void *)skb->data;
1373		amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
1374			(*ieee80211_get_qos_ctl(hdr) &
1375			 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
1376
1377		ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta,
1378				      amsdu ? addr3 : NULL);
1379		if (ret) {
1380			/* Free skbs created as part of TSO logic that have not yet been dequeued */
1381			__skb_queue_purge(&mpdus_skbs);
1382			/* skb here is not necessarily same as skb that entered this method,
1383			 * so free it explicitly.
1384			 */
1385			if (skb == orig_skb)
1386				ieee80211_free_txskb(mvm->hw, skb);
1387			else
1388				kfree_skb(skb);
1389			/* there was error, but we consumed skb one way or another, so return 0 */
1390			return 0;
1391		}
1392	}
1393
1394	return 0;
1395}
1396
1397static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1398				      struct ieee80211_sta *sta, u8 tid)
1399{
1400	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1401	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1402	struct ieee80211_vif *vif = mvmsta->vif;
1403	u16 normalized_ssn;
1404
1405	lockdep_assert_held(&mvmsta->lock);
1406
1407	if ((tid_data->state == IWL_AGG_ON ||
1408	     tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
1409	    iwl_mvm_tid_queued(mvm, tid_data) == 0) {
1410		/*
1411		 * Now that this aggregation or DQA queue is empty tell
1412		 * mac80211 so it knows we no longer have frames buffered for
1413		 * the station on this TID (for the TIM bitmap calculation.)
1414		 */
1415		ieee80211_sta_set_buffered(sta, tid, false);
1416	}
1417
1418	/*
1419	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
1420	 * to align the wrap around of ssn so we compare relevant values.
1421	 */
1422	normalized_ssn = tid_data->ssn;
1423	if (mvm->trans->trans_cfg->gen2)
1424		normalized_ssn &= 0xff;
1425
1426	if (normalized_ssn != tid_data->next_reclaimed)
1427		return;
1428
1429	switch (tid_data->state) {
1430	case IWL_EMPTYING_HW_QUEUE_ADDBA:
1431		IWL_DEBUG_TX_QUEUES(mvm,
1432				    "Can continue addBA flow ssn = next_recl = %d\n",
1433				    tid_data->next_reclaimed);
1434		tid_data->state = IWL_AGG_STARTING;
1435		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1436		break;
1437
1438	case IWL_EMPTYING_HW_QUEUE_DELBA:
1439		IWL_DEBUG_TX_QUEUES(mvm,
1440				    "Can continue DELBA flow ssn = next_recl = %d\n",
1441				    tid_data->next_reclaimed);
1442		tid_data->state = IWL_AGG_OFF;
1443		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1444		break;
1445
1446	default:
1447		break;
1448	}
1449}
1450
1451#ifdef CONFIG_IWLWIFI_DEBUG
1452const char *iwl_mvm_get_tx_fail_reason(u32 status)
1453{
1454#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1455#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1456
1457	switch (status & TX_STATUS_MSK) {
1458	case TX_STATUS_SUCCESS:
1459		return "SUCCESS";
1460	TX_STATUS_POSTPONE(DELAY);
1461	TX_STATUS_POSTPONE(FEW_BYTES);
1462	TX_STATUS_POSTPONE(BT_PRIO);
1463	TX_STATUS_POSTPONE(QUIET_PERIOD);
1464	TX_STATUS_POSTPONE(CALC_TTAK);
1465	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1466	TX_STATUS_FAIL(SHORT_LIMIT);
1467	TX_STATUS_FAIL(LONG_LIMIT);
1468	TX_STATUS_FAIL(UNDERRUN);
1469	TX_STATUS_FAIL(DRAIN_FLOW);
1470	TX_STATUS_FAIL(RFKILL_FLUSH);
1471	TX_STATUS_FAIL(LIFE_EXPIRE);
1472	TX_STATUS_FAIL(DEST_PS);
1473	TX_STATUS_FAIL(HOST_ABORTED);
1474	TX_STATUS_FAIL(BT_RETRY);
1475	TX_STATUS_FAIL(STA_INVALID);
1476	TX_STATUS_FAIL(FRAG_DROPPED);
1477	TX_STATUS_FAIL(TID_DISABLE);
1478	TX_STATUS_FAIL(FIFO_FLUSHED);
1479	TX_STATUS_FAIL(SMALL_CF_POLL);
1480	TX_STATUS_FAIL(FW_DROP);
1481	TX_STATUS_FAIL(STA_COLOR_MISMATCH);
1482	}
1483
1484	return "UNKNOWN";
1485
1486#undef TX_STATUS_FAIL
1487#undef TX_STATUS_POSTPONE
1488}
1489#endif /* CONFIG_IWLWIFI_DEBUG */
1490
1491static int iwl_mvm_get_hwrate_chan_width(u32 chan_width)
 
 
1492{
1493	switch (chan_width) {
 
 
1494	case RATE_MCS_CHAN_WIDTH_20:
1495		return 0;
1496	case RATE_MCS_CHAN_WIDTH_40:
1497		return IEEE80211_TX_RC_40_MHZ_WIDTH;
 
1498	case RATE_MCS_CHAN_WIDTH_80:
1499		return IEEE80211_TX_RC_80_MHZ_WIDTH;
 
1500	case RATE_MCS_CHAN_WIDTH_160:
1501		return IEEE80211_TX_RC_160_MHZ_WIDTH;
1502	default:
1503		return 0;
1504	}
1505}
1506
1507void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1508			       enum nl80211_band band,
1509			       struct ieee80211_tx_rate *r)
1510{
1511	u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
1512	u32 rate = format == RATE_MCS_HT_MSK ?
1513		RATE_HT_MCS_INDEX(rate_n_flags) :
1514		rate_n_flags & RATE_MCS_CODE_MSK;
1515
1516	r->flags |=
1517		iwl_mvm_get_hwrate_chan_width(rate_n_flags &
1518					      RATE_MCS_CHAN_WIDTH_MSK);
1519
1520	if (rate_n_flags & RATE_MCS_SGI_MSK)
1521		r->flags |= IEEE80211_TX_RC_SHORT_GI;
1522	if (format ==  RATE_MCS_HT_MSK) {
1523		r->flags |= IEEE80211_TX_RC_MCS;
1524		r->idx = rate;
1525	} else if (format ==  RATE_MCS_VHT_MSK) {
1526		ieee80211_rate_set_vht(r, rate,
1527				       FIELD_GET(RATE_MCS_NSS_MSK,
1528						 rate_n_flags) + 1);
1529		r->flags |= IEEE80211_TX_RC_VHT_MCS;
1530	} else if (format == RATE_MCS_HE_MSK) {
1531		/* mac80211 cannot do this without ieee80211_tx_status_ext()
1532		 * but it only matters for radiotap */
1533		r->idx = 0;
1534	} else {
1535		r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1536							       band);
1537	}
1538}
1539
1540void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags,
1541				  enum nl80211_band band,
1542				  struct ieee80211_tx_rate *r)
1543{
1544	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1545		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1546
1547	r->flags |=
1548		iwl_mvm_get_hwrate_chan_width(rate_n_flags &
1549					      RATE_MCS_CHAN_WIDTH_MSK_V1);
1550
1551	if (rate_n_flags & RATE_MCS_SGI_MSK_V1)
1552		r->flags |= IEEE80211_TX_RC_SHORT_GI;
1553	if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
1554		r->flags |= IEEE80211_TX_RC_MCS;
1555		r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1;
1556	} else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) {
1557		ieee80211_rate_set_vht(
1558			r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
1559			FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1);
 
1560		r->flags |= IEEE80211_TX_RC_VHT_MCS;
1561	} else {
1562		r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1563							     band);
1564	}
1565}
1566
1567/*
1568 * translate ucode response to mac80211 tx status control values
1569 */
1570static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
1571					u32 rate_n_flags,
1572					struct ieee80211_tx_info *info)
1573{
1574	struct ieee80211_tx_rate *r = &info->status.rates[0];
1575
1576	if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
1577				    TX_CMD, 0) <= 6)
1578		rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
1579
1580	info->status.antenna =
1581		((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
1582	iwl_mvm_hwrate_to_tx_rate(rate_n_flags,
1583				  info->band, r);
1584}
1585
1586static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
1587					    u32 status, __le16 frame_control)
1588{
1589	struct iwl_fw_dbg_trigger_tlv *trig;
1590	struct iwl_fw_dbg_trigger_tx_status *status_trig;
1591	int i;
1592
1593	if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) {
1594		enum iwl_fw_ini_time_point tp =
1595			IWL_FW_INI_TIME_POINT_TX_FAILED;
1596
1597		if (ieee80211_is_action(frame_control))
1598			tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
1599
1600		iwl_dbg_tlv_time_point(&mvm->fwrt,
1601				       tp, NULL);
1602		return;
1603	}
1604
1605	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1606				     FW_DBG_TRIGGER_TX_STATUS);
1607	if (!trig)
1608		return;
1609
1610	status_trig = (void *)trig->data;
1611
1612	for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1613		/* don't collect on status 0 */
1614		if (!status_trig->statuses[i].status)
1615			break;
1616
1617		if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1618			continue;
1619
1620		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1621					"Tx status %d was received",
1622					status & TX_STATUS_MSK);
1623		break;
1624	}
1625}
1626
1627/*
1628 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1629 * @tx_resp: the Tx response from the fw (agg or non-agg)
1630 *
1631 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
1632 * it can't know that everything will go well until the end of the AMPDU, it
1633 * can't know in advance the number of MPDUs that will be sent in the current
1634 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1635 * Hence, it can't know in advance what the SSN of the SCD will be at the end
1636 * of the batch. This is why the SSN of the SCD is written at the end of the
1637 * whole struct at a variable offset. This function knows how to cope with the
1638 * variable offset and returns the SSN of the SCD.
1639 */
1640static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
1641				      struct iwl_mvm_tx_resp *tx_resp)
1642{
1643	return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
1644			    tx_resp->frame_count) & 0xfff;
1645}
1646
1647static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1648				     struct iwl_rx_packet *pkt)
1649{
1650	struct ieee80211_sta *sta;
1651	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1652	int txq_id = SEQ_TO_QUEUE(sequence);
1653	/* struct iwl_mvm_tx_resp_v3 is almost the same */
1654	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1655	int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1656	int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1657	struct agg_tx_status *agg_status =
1658		iwl_mvm_get_agg_status(mvm, tx_resp);
1659	u32 status = le16_to_cpu(agg_status->status);
1660	u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
1661	struct sk_buff_head skbs;
1662	u8 skb_freed = 0;
1663	u8 lq_color;
1664	u16 next_reclaimed, seq_ctl;
1665	bool is_ndp = false;
1666
1667	__skb_queue_head_init(&skbs);
1668
1669	if (iwl_mvm_has_new_tx_api(mvm))
1670		txq_id = le16_to_cpu(tx_resp->tx_queue);
1671
1672	seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1673
1674	/* we can free until ssn % q.n_bd not inclusive */
1675	iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
1676
1677	while (!skb_queue_empty(&skbs)) {
1678		struct sk_buff *skb = __skb_dequeue(&skbs);
1679		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1680		struct ieee80211_hdr *hdr = (void *)skb->data;
1681		bool flushed = false;
1682
1683		skb_freed++;
1684
1685		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1686
1687		memset(&info->status, 0, sizeof(info->status));
1688		info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
1689
1690		/* inform mac80211 about what happened with the frame */
1691		switch (status & TX_STATUS_MSK) {
1692		case TX_STATUS_SUCCESS:
1693		case TX_STATUS_DIRECT_DONE:
1694			info->flags |= IEEE80211_TX_STAT_ACK;
1695			break;
1696		case TX_STATUS_FAIL_FIFO_FLUSHED:
1697		case TX_STATUS_FAIL_DRAIN_FLOW:
1698			flushed = true;
1699			break;
1700		case TX_STATUS_FAIL_DEST_PS:
1701			/* the FW should have stopped the queue and not
1702			 * return this status
1703			 */
1704			IWL_ERR_LIMIT(mvm,
1705				      "FW reported TX filtered, status=0x%x, FC=0x%x\n",
1706				      status, le16_to_cpu(hdr->frame_control));
1707			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1708			break;
1709		default:
1710			break;
1711		}
1712
1713		if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1714		    ieee80211_is_mgmt(hdr->frame_control))
1715			iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
1716
1717		/*
1718		 * If we are freeing multiple frames, mark all the frames
1719		 * but the first one as acked, since they were acknowledged
1720		 * before
1721		 * */
1722		if (skb_freed > 1)
1723			info->flags |= IEEE80211_TX_STAT_ACK;
1724
1725		iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control);
1726
1727		info->status.rates[0].count = tx_resp->failure_frame + 1;
1728
1729		iwl_mvm_hwrate_to_tx_status(mvm->fw,
1730					    le32_to_cpu(tx_resp->initial_rate),
1731					    info);
1732
1733		/* Don't assign the converted initial_rate, because driver
1734		 * TLC uses this and doesn't support the new FW rate
1735		 */
1736		info->status.status_driver_data[1] =
1737			(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
1738
1739		/* Single frame failure in an AMPDU queue => send BAR */
1740		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1741		    !(info->flags & IEEE80211_TX_STAT_ACK) &&
1742		    !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
1743			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1744		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1745
1746		/* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
1747		if (ieee80211_is_back_req(hdr->frame_control))
1748			seq_ctl = 0;
1749		else if (status != TX_STATUS_SUCCESS)
1750			seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1751
1752		if (unlikely(!seq_ctl)) {
 
 
1753			/*
1754			 * If it is an NDP, we can't update next_reclaim since
1755			 * its sequence control is 0. Note that for that same
1756			 * reason, NDPs are never sent to A-MPDU'able queues
1757			 * so that we can never have more than one freed frame
1758			 * for a single Tx resonse (see WARN_ON below).
1759			 */
1760			if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1761				is_ndp = true;
1762		}
1763
1764		/*
1765		 * TODO: this is not accurate if we are freeing more than one
1766		 * packet.
1767		 */
1768		info->status.tx_time =
1769			le16_to_cpu(tx_resp->wireless_media_time);
1770		BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1771		lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1772		info->status.status_driver_data[0] =
1773			RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1774
1775		if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1)))
1776			ieee80211_tx_status_skb(mvm->hw, skb);
1777	}
1778
1779	/* This is an aggregation queue or might become one, so we use
1780	 * the ssn since: ssn = wifi seq_num % 256.
1781	 * The seq_ctl is the sequence control of the packet to which
1782	 * this Tx response relates. But if there is a hole in the
1783	 * bitmap of the BA we received, this Tx response may allow to
1784	 * reclaim the hole and all the subsequent packets that were
1785	 * already acked. In that case, seq_ctl != ssn, and the next
1786	 * packet to be reclaimed will be ssn and not seq_ctl. In that
1787	 * case, several packets will be reclaimed even if
1788	 * frame_count = 1.
1789	 *
1790	 * The ssn is the index (% 256) of the latest packet that has
1791	 * treated (acked / dropped) + 1.
1792	 */
1793	next_reclaimed = ssn;
1794
1795	IWL_DEBUG_TX_REPLY(mvm,
1796			   "TXQ %d status %s (0x%08x)\n",
1797			   txq_id, iwl_mvm_get_tx_fail_reason(status), status);
1798
1799	IWL_DEBUG_TX_REPLY(mvm,
1800			   "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1801			   le32_to_cpu(tx_resp->initial_rate),
1802			   tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1803			   ssn, next_reclaimed, seq_ctl);
1804
1805	rcu_read_lock();
1806
1807	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1808	/*
1809	 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1810	 * the firmware while we still have packets for it in the Tx queues.
1811	 */
1812	if (WARN_ON_ONCE(!sta))
1813		goto out;
1814
1815	if (!IS_ERR(sta)) {
1816		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1817
1818		iwl_mvm_tx_airtime(mvm, mvmsta,
1819				   le16_to_cpu(tx_resp->wireless_media_time));
1820
1821		if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1822		    mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
1823			iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
1824
1825		if (sta->wme && tid != IWL_MGMT_TID) {
1826			struct iwl_mvm_tid_data *tid_data =
1827				&mvmsta->tid_data[tid];
1828			bool send_eosp_ndp = false;
1829
1830			spin_lock_bh(&mvmsta->lock);
1831
1832			if (!is_ndp) {
1833				tid_data->next_reclaimed = next_reclaimed;
1834				IWL_DEBUG_TX_REPLY(mvm,
1835						   "Next reclaimed packet:%d\n",
1836						   next_reclaimed);
1837			} else {
1838				IWL_DEBUG_TX_REPLY(mvm,
1839						   "NDP - don't update next_reclaimed\n");
1840			}
1841
1842			iwl_mvm_check_ratid_empty(mvm, sta, tid);
1843
1844			if (mvmsta->sleep_tx_count) {
1845				mvmsta->sleep_tx_count--;
1846				if (mvmsta->sleep_tx_count &&
1847				    !iwl_mvm_tid_queued(mvm, tid_data)) {
1848					/*
1849					 * The number of frames in the queue
1850					 * dropped to 0 even if we sent less
1851					 * frames than we thought we had on the
1852					 * Tx queue.
1853					 * This means we had holes in the BA
1854					 * window that we just filled, ask
1855					 * mac80211 to send EOSP since the
1856					 * firmware won't know how to do that.
1857					 * Send NDP and the firmware will send
1858					 * EOSP notification that will trigger
1859					 * a call to ieee80211_sta_eosp().
1860					 */
1861					send_eosp_ndp = true;
1862				}
1863			}
1864
1865			spin_unlock_bh(&mvmsta->lock);
1866			if (send_eosp_ndp) {
1867				iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
1868					IEEE80211_FRAME_RELEASE_UAPSD,
1869					1, tid, false, false);
1870				mvmsta->sleep_tx_count = 0;
1871				ieee80211_send_eosp_nullfunc(sta, tid);
1872			}
1873		}
1874
1875		if (mvmsta->next_status_eosp) {
1876			mvmsta->next_status_eosp = false;
1877			ieee80211_sta_eosp(sta);
1878		}
1879	}
1880out:
1881	rcu_read_unlock();
1882}
1883
1884#ifdef CONFIG_IWLWIFI_DEBUG
1885#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1886static const char *iwl_get_agg_tx_status(u16 status)
1887{
1888	switch (status & AGG_TX_STATE_STATUS_MSK) {
1889	AGG_TX_STATE_(TRANSMITTED);
1890	AGG_TX_STATE_(UNDERRUN);
1891	AGG_TX_STATE_(BT_PRIO);
1892	AGG_TX_STATE_(FEW_BYTES);
1893	AGG_TX_STATE_(ABORT);
1894	AGG_TX_STATE_(TX_ON_AIR_DROP);
1895	AGG_TX_STATE_(LAST_SENT_TRY_CNT);
1896	AGG_TX_STATE_(LAST_SENT_BT_KILL);
1897	AGG_TX_STATE_(SCD_QUERY);
1898	AGG_TX_STATE_(TEST_BAD_CRC32);
1899	AGG_TX_STATE_(RESPONSE);
1900	AGG_TX_STATE_(DUMP_TX);
1901	AGG_TX_STATE_(DELAY_TX);
1902	}
1903
1904	return "UNKNOWN";
1905}
1906
1907static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1908				      struct iwl_rx_packet *pkt)
1909{
1910	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1911	struct agg_tx_status *frame_status =
1912		iwl_mvm_get_agg_status(mvm, tx_resp);
1913	int i;
1914	bool tirgger_timepoint = false;
1915
1916	for (i = 0; i < tx_resp->frame_count; i++) {
1917		u16 fstatus = le16_to_cpu(frame_status[i].status);
1918		/* In case one frame wasn't transmitted trigger time point */
1919		tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) !=
1920				      AGG_TX_STATE_TRANSMITTED);
1921		IWL_DEBUG_TX_REPLY(mvm,
1922				   "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1923				   iwl_get_agg_tx_status(fstatus),
1924				   fstatus & AGG_TX_STATE_STATUS_MSK,
1925				   (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
1926					AGG_TX_STATE_TRY_CNT_POS,
1927				   le16_to_cpu(frame_status[i].sequence));
1928	}
1929
1930	if (tirgger_timepoint)
1931		iwl_dbg_tlv_time_point(&mvm->fwrt,
1932				       IWL_FW_INI_TIME_POINT_TX_FAILED, NULL);
1933
1934}
1935#else
1936static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1937				      struct iwl_rx_packet *pkt)
1938{}
1939#endif /* CONFIG_IWLWIFI_DEBUG */
1940
1941static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1942				  struct iwl_rx_packet *pkt)
1943{
1944	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1945	int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1946	int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1947	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1948	struct iwl_mvm_sta *mvmsta;
1949	int queue = SEQ_TO_QUEUE(sequence);
1950	struct ieee80211_sta *sta;
1951
1952	if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
1953			 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
1954		return;
1955
1956	iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
1957
1958	rcu_read_lock();
1959
1960	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1961
1962	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1963	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) {
1964		rcu_read_unlock();
1965		return;
1966	}
1967
1968	if (!WARN_ON_ONCE(!mvmsta)) {
1969		mvmsta->tid_data[tid].rate_n_flags =
1970			le32_to_cpu(tx_resp->initial_rate);
1971		mvmsta->tid_data[tid].tx_time =
1972			le16_to_cpu(tx_resp->wireless_media_time);
1973		mvmsta->tid_data[tid].lq_color =
1974			TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1975		iwl_mvm_tx_airtime(mvm, mvmsta,
1976				   le16_to_cpu(tx_resp->wireless_media_time));
1977	}
1978
1979	rcu_read_unlock();
1980}
1981
1982void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1983{
1984	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1985	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1986
1987	if (tx_resp->frame_count == 1)
1988		iwl_mvm_rx_tx_cmd_single(mvm, pkt);
1989	else
1990		iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
1991}
1992
1993static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1994			       int txq, int index,
1995			       struct ieee80211_tx_info *tx_info, u32 rate,
1996			       bool is_flush)
1997{
1998	struct sk_buff_head reclaimed_skbs;
1999	struct iwl_mvm_tid_data *tid_data = NULL;
2000	struct ieee80211_sta *sta;
2001	struct iwl_mvm_sta *mvmsta = NULL;
2002	struct sk_buff *skb;
2003	int freed;
2004
2005	if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations ||
2006		      tid > IWL_MAX_TID_COUNT,
2007		      "sta_id %d tid %d", sta_id, tid))
2008		return;
2009
2010	rcu_read_lock();
2011
2012	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
2013
2014	/* Reclaiming frames for a station that has been deleted ? */
2015	if (WARN_ON_ONCE(!sta)) {
2016		rcu_read_unlock();
2017		return;
2018	}
2019
2020	__skb_queue_head_init(&reclaimed_skbs);
2021
2022	/*
2023	 * Release all TFDs before the SSN, i.e. all TFDs in front of
2024	 * block-ack window (we assume that they've been successfully
2025	 * transmitted ... if not, it's too late anyway).
2026	 */
2027	iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
2028
2029	skb_queue_walk(&reclaimed_skbs, skb) {
2030		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2031
2032		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
2033
2034		memset(&info->status, 0, sizeof(info->status));
2035		/* Packet was transmitted successfully, failures come as single
2036		 * frames because before failing a frame the firmware transmits
2037		 * it without aggregation at least once.
2038		 */
2039		if (!is_flush)
2040			info->flags |= IEEE80211_TX_STAT_ACK;
2041		else
2042			info->flags &= ~IEEE80211_TX_STAT_ACK;
2043	}
2044
2045	/*
2046	 * It's possible to get a BA response after invalidating the rcu (rcu is
2047	 * invalidated in order to prevent new Tx from being sent, but there may
2048	 * be some frames already in-flight).
2049	 * In this case we just want to reclaim, and could skip all the
2050	 * sta-dependent stuff since it's in the middle of being removed
2051	 * anyways.
2052	 */
2053	if (IS_ERR(sta))
2054		goto out;
2055
2056	mvmsta = iwl_mvm_sta_from_mac80211(sta);
2057	tid_data = &mvmsta->tid_data[tid];
2058
2059	if (tid_data->txq_id != txq) {
2060		IWL_ERR(mvm,
2061			"invalid reclaim request: Q %d, tid %d\n",
2062			tid_data->txq_id, tid);
2063		rcu_read_unlock();
2064		return;
2065	}
2066
 
 
 
 
 
 
 
 
 
2067	spin_lock_bh(&mvmsta->lock);
2068
2069	tid_data->next_reclaimed = index;
2070
2071	iwl_mvm_check_ratid_empty(mvm, sta, tid);
2072
2073	freed = 0;
2074
2075	/* pack lq color from tid_data along the reduced txp */
2076	tx_info->status.status_driver_data[0] =
2077		RS_DRV_DATA_PACK(tid_data->lq_color,
2078				 tx_info->status.status_driver_data[0]);
2079	tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
2080
2081	skb_queue_walk(&reclaimed_skbs, skb) {
2082		struct ieee80211_hdr *hdr = (void *)skb->data;
2083		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2084
2085		if (!is_flush) {
2086			if (ieee80211_is_data_qos(hdr->frame_control))
2087				freed++;
2088			else
2089				WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
2090		}
 
 
 
 
 
 
 
2091
2092		/* this is the first skb we deliver in this batch */
2093		/* put the rate scaling data there */
2094		if (freed == 1) {
2095			info->flags |= IEEE80211_TX_STAT_AMPDU;
2096			memcpy(&info->status, &tx_info->status,
2097			       sizeof(tx_info->status));
2098			iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info);
2099		}
2100	}
2101
2102	spin_unlock_bh(&mvmsta->lock);
2103
2104	/* We got a BA notif with 0 acked or scd_ssn didn't progress which is
2105	 * possible (i.e. first MPDU in the aggregation wasn't acked)
2106	 * Still it's important to update RS about sent vs. acked.
2107	 */
2108	if (!is_flush && skb_queue_empty(&reclaimed_skbs) &&
2109	    !iwl_mvm_has_tlc_offload(mvm)) {
2110		struct ieee80211_chanctx_conf *chanctx_conf = NULL;
2111
2112		/* no TLC offload, so non-MLD mode */
2113		if (mvmsta->vif)
2114			chanctx_conf =
2115				rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf);
2116
2117		if (WARN_ON_ONCE(!chanctx_conf))
2118			goto out;
2119
2120		tx_info->band = chanctx_conf->def.chan->band;
2121		iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info);
2122
2123		IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
2124		iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false);
 
 
 
2125	}
2126
2127out:
2128	rcu_read_unlock();
2129
2130	while (!skb_queue_empty(&reclaimed_skbs)) {
2131		skb = __skb_dequeue(&reclaimed_skbs);
2132		ieee80211_tx_status_skb(mvm->hw, skb);
2133	}
2134}
2135
2136void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2137{
2138	struct iwl_rx_packet *pkt = rxb_addr(rxb);
2139	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
2140	int sta_id, tid, txq, index;
2141	struct ieee80211_tx_info ba_info = {};
2142	struct iwl_mvm_ba_notif *ba_notif;
2143	struct iwl_mvm_tid_data *tid_data;
2144	struct iwl_mvm_sta *mvmsta;
2145
2146	ba_info.flags = IEEE80211_TX_STAT_AMPDU;
2147
2148	if (iwl_mvm_has_new_tx_api(mvm)) {
2149		struct iwl_mvm_compressed_ba_notif *ba_res =
2150			(void *)pkt->data;
2151		u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
2152		u16 tfd_cnt;
2153		int i;
2154
2155		if (IWL_FW_CHECK(mvm, sizeof(*ba_res) > pkt_len,
2156				 "short BA notification (%d)\n", pkt_len))
2157			return;
2158
2159		sta_id = ba_res->sta_id;
2160		ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
2161		ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
2162		ba_info.status.tx_time =
2163			(u16)le32_to_cpu(ba_res->wireless_time);
2164		ba_info.status.status_driver_data[0] =
2165			(void *)(uintptr_t)ba_res->reduced_txp;
2166
2167		tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
2168		if (!tfd_cnt)
2169			return;
2170
2171		if (IWL_FW_CHECK(mvm,
2172				 struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
2173				 "short BA notification (tfds:%d, size:%d)\n",
2174				 tfd_cnt, pkt_len))
2175			return;
2176
2177		rcu_read_lock();
2178
2179		mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
2180		/*
2181		 * It's possible to get a BA response after invalidating the rcu
2182		 * (rcu is invalidated in order to prevent new Tx from being
2183		 * sent, but there may be some frames already in-flight).
2184		 * In this case we just want to reclaim, and could skip all the
2185		 * sta-dependent stuff since it's in the middle of being removed
2186		 * anyways.
2187		 */
2188
2189		/* Free per TID */
2190		for (i = 0; i < tfd_cnt; i++) {
2191			struct iwl_mvm_compressed_ba_tfd *ba_tfd =
2192				&ba_res->tfd[i];
2193
2194			tid = ba_tfd->tid;
2195			if (tid == IWL_MGMT_TID)
2196				tid = IWL_MAX_TID_COUNT;
2197
2198			if (mvmsta)
2199				mvmsta->tid_data[i].lq_color = lq_color;
2200
2201			iwl_mvm_tx_reclaim(mvm, sta_id, tid,
2202					   (int)(le16_to_cpu(ba_tfd->q_num)),
2203					   le16_to_cpu(ba_tfd->tfd_index),
2204					   &ba_info,
2205					   le32_to_cpu(ba_res->tx_rate), false);
2206		}
2207
2208		if (mvmsta)
2209			iwl_mvm_tx_airtime(mvm, mvmsta,
2210					   le32_to_cpu(ba_res->wireless_time));
2211		rcu_read_unlock();
2212
2213		IWL_DEBUG_TX_REPLY(mvm,
2214				   "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
2215				   sta_id, le32_to_cpu(ba_res->flags),
2216				   le16_to_cpu(ba_res->txed),
2217				   le16_to_cpu(ba_res->done));
2218		return;
2219	}
2220
2221	ba_notif = (void *)pkt->data;
2222	sta_id = ba_notif->sta_id;
2223	tid = ba_notif->tid;
2224	/* "flow" corresponds to Tx queue */
2225	txq = le16_to_cpu(ba_notif->scd_flow);
2226	/* "ssn" is start of block-ack Tx window, corresponds to index
2227	 * (in Tx queue's circular buffer) of first TFD/frame in window */
2228	index = le16_to_cpu(ba_notif->scd_ssn);
2229
2230	rcu_read_lock();
2231	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
2232	if (IWL_FW_CHECK(mvm, !mvmsta,
2233			 "invalid STA ID %d in BA notif\n",
2234			 sta_id)) {
2235		rcu_read_unlock();
2236		return;
2237	}
2238
2239	tid_data = &mvmsta->tid_data[tid];
2240
2241	ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
2242	ba_info.status.ampdu_len = ba_notif->txed;
2243	ba_info.status.tx_time = tid_data->tx_time;
2244	ba_info.status.status_driver_data[0] =
2245		(void *)(uintptr_t)ba_notif->reduced_txp;
2246
2247	rcu_read_unlock();
2248
2249	iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
2250			   tid_data->rate_n_flags, false);
2251
2252	IWL_DEBUG_TX_REPLY(mvm,
2253			   "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
2254			   ba_notif->sta_addr, ba_notif->sta_id);
2255
2256	IWL_DEBUG_TX_REPLY(mvm,
2257			   "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
2258			   ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
2259			   le64_to_cpu(ba_notif->bitmap), txq, index,
2260			   ba_notif->txed, ba_notif->txed_2_done);
2261
2262	IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
2263			   ba_notif->reduced_txp);
2264}
2265
2266/*
2267 * Note that there are transports that buffer frames before they reach
2268 * the firmware. This means that after flush_tx_path is called, the
2269 * queue might not be empty. The race-free way to handle this is to:
2270 * 1) set the station as draining
2271 * 2) flush the Tx path
2272 * 3) wait for the transport queues to be empty
2273 */
2274int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk)
2275{
2276	int ret;
2277	struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
2278		.queues_ctl = cpu_to_le32(tfd_msk),
2279		.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
2280	};
2281
2282	WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2283	ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0,
 
2284				   sizeof(flush_cmd), &flush_cmd);
2285	if (ret)
2286		IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
2287	return ret;
2288}
2289
2290int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
 
2291{
2292	int ret;
2293	struct iwl_tx_path_flush_cmd_rsp *rsp;
2294	struct iwl_tx_path_flush_cmd flush_cmd = {
2295		.sta_id = cpu_to_le32(sta_id),
2296		.tid_mask = cpu_to_le16(tids),
2297	};
2298
2299	struct iwl_host_cmd cmd = {
2300		.id = TXPATH_FLUSH,
2301		.len = { sizeof(flush_cmd), },
2302		.data = { &flush_cmd, },
2303	};
2304
2305	WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2306
2307	if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
2308		cmd.flags |= CMD_WANT_SKB | CMD_SEND_IN_RFKILL;
2309
2310	IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
2311			    sta_id, tids);
2312
2313	ret = iwl_mvm_send_cmd(mvm, &cmd);
2314
2315	if (ret) {
2316		IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
2317		return ret;
2318	}
2319
2320	if (cmd.flags & CMD_WANT_SKB) {
2321		int i;
2322		int num_flushed_queues;
2323
2324		if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) {
2325			ret = -EIO;
2326			goto free_rsp;
2327		}
2328
2329		rsp = (void *)cmd.resp_pkt->data;
2330
2331		if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id,
2332			      "sta_id %d != rsp_sta_id %d",
2333			      sta_id, le16_to_cpu(rsp->sta_id))) {
2334			ret = -EIO;
2335			goto free_rsp;
2336		}
2337
2338		num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
2339		if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
2340			      "num_flushed_queues %d", num_flushed_queues)) {
2341			ret = -EIO;
2342			goto free_rsp;
2343		}
2344
2345		for (i = 0; i < num_flushed_queues; i++) {
2346			struct ieee80211_tx_info tx_info = {};
2347			struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
2348			int tid = le16_to_cpu(queue_info->tid);
2349			int read_before = le16_to_cpu(queue_info->read_before_flush);
2350			int read_after = le16_to_cpu(queue_info->read_after_flush);
2351			int queue_num = le16_to_cpu(queue_info->queue_num);
2352
2353			if (tid == IWL_MGMT_TID)
2354				tid = IWL_MAX_TID_COUNT;
2355
2356			IWL_DEBUG_TX_QUEUES(mvm,
2357					    "tid %d queue_id %d read-before %d read-after %d\n",
2358					    tid, queue_num, read_before, read_after);
2359
2360			iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after,
2361					   &tx_info, 0, true);
2362		}
2363free_rsp:
2364		iwl_free_resp(&cmd);
2365	}
2366	return ret;
2367}
2368
2369int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask)
2370{
 
 
 
 
 
 
2371	if (iwl_mvm_has_new_tx_api(mvm))
2372		return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff);
 
 
 
 
 
2373
2374	return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask);
2375}
v5.4
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  11 * Copyright(c) 2018 - 2019 Intel Corporation
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of version 2 of the GNU General Public License as
  15 * published by the Free Software Foundation.
  16 *
  17 * This program is distributed in the hope that it will be useful, but
  18 * WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 * General Public License for more details.
  21 *
  22 * The full GNU General Public License is included in this distribution
  23 * in the file called COPYING.
  24 *
  25 * Contact Information:
  26 *  Intel Linux Wireless <linuxwifi@intel.com>
  27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  28 *
  29 * BSD LICENSE
  30 *
  31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  34 * Copyright(c) 2018 - 2019 Intel Corporation
  35 * All rights reserved.
  36 *
  37 * Redistribution and use in source and binary forms, with or without
  38 * modification, are permitted provided that the following conditions
  39 * are met:
  40 *
  41 *  * Redistributions of source code must retain the above copyright
  42 *    notice, this list of conditions and the following disclaimer.
  43 *  * Redistributions in binary form must reproduce the above copyright
  44 *    notice, this list of conditions and the following disclaimer in
  45 *    the documentation and/or other materials provided with the
  46 *    distribution.
  47 *  * Neither the name Intel Corporation nor the names of its
  48 *    contributors may be used to endorse or promote products derived
  49 *    from this software without specific prior written permission.
  50 *
  51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  62 *
  63 *****************************************************************************/
  64#include <linux/ieee80211.h>
  65#include <linux/etherdevice.h>
  66#include <linux/tcp.h>
 
  67#include <net/ip.h>
  68#include <net/ipv6.h>
  69
  70#include "iwl-trans.h"
  71#include "iwl-eeprom-parse.h"
  72#include "mvm.h"
  73#include "sta.h"
 
  74
  75static void
  76iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
  77			  u16 tid, u16 ssn)
  78{
  79	struct iwl_fw_dbg_trigger_tlv *trig;
  80	struct iwl_fw_dbg_trigger_ba *ba_trig;
  81
  82	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
  83	if (!trig)
  84		return;
  85
  86	ba_trig = (void *)trig->data;
  87
  88	if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
  89		return;
  90
  91	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
  92				"BAR sent to %pM, tid %d, ssn %d",
  93				addr, tid, ssn);
  94}
  95
  96#define OPT_HDR(type, skb, off) \
  97	(type *)(skb_network_header(skb) + (off))
  98
  99static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
 100			   struct ieee80211_hdr *hdr,
 101			   struct ieee80211_tx_info *info,
 102			   u16 offload_assist)
 103{
 
 
 
 104#if IS_ENABLED(CONFIG_INET)
 105	u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
 106	u8 protocol = 0;
 107
 108	/*
 109	 * Do not compute checksum if already computed or if transport will
 110	 * compute it
 111	 */
 112	if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
 113		goto out;
 114
 115	/* We do not expect to be requested to csum stuff we do not support */
 116	if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
 117		      (skb->protocol != htons(ETH_P_IP) &&
 118		       skb->protocol != htons(ETH_P_IPV6)),
 119		      "No support for requested checksum\n")) {
 120		skb_checksum_help(skb);
 121		goto out;
 122	}
 123
 124	if (skb->protocol == htons(ETH_P_IP)) {
 125		protocol = ip_hdr(skb)->protocol;
 126	} else {
 127#if IS_ENABLED(CONFIG_IPV6)
 128		struct ipv6hdr *ipv6h =
 129			(struct ipv6hdr *)skb_network_header(skb);
 130		unsigned int off = sizeof(*ipv6h);
 131
 132		protocol = ipv6h->nexthdr;
 133		while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
 134			struct ipv6_opt_hdr *hp;
 135
 136			/* only supported extension headers */
 137			if (protocol != NEXTHDR_ROUTING &&
 138			    protocol != NEXTHDR_HOP &&
 139			    protocol != NEXTHDR_DEST) {
 140				skb_checksum_help(skb);
 141				goto out;
 142			}
 143
 144			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
 145			protocol = hp->nexthdr;
 146			off += ipv6_optlen(hp);
 147		}
 148		/* if we get here - protocol now should be TCP/UDP */
 149#endif
 150	}
 151
 152	if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
 153		WARN_ON_ONCE(1);
 154		skb_checksum_help(skb);
 155		goto out;
 156	}
 157
 158	/* enable L4 csum */
 159	offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
 160
 161	/*
 162	 * Set offset to IP header (snap).
 163	 * We don't support tunneling so no need to take care of inner header.
 164	 * Size is in words.
 165	 */
 166	offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
 167
 168	/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
 169	if (skb->protocol == htons(ETH_P_IP) &&
 170	    (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
 171		ip_hdr(skb)->check = 0;
 172		offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
 173	}
 174
 175	/* reset UDP/TCP header csum */
 176	if (protocol == IPPROTO_TCP)
 177		tcp_hdr(skb)->check = 0;
 178	else
 179		udp_hdr(skb)->check = 0;
 180
 
 
 181	/*
 182	 * mac header len should include IV, size is in words unless
 183	 * the IV is added by the firmware like in WEP.
 184	 * In new Tx API, the IV is always added by the firmware.
 185	 */
 186	if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
 187	    info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
 188	    info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
 189		mh_len += info->control.hw_key->iv_len;
 190	mh_len /= 2;
 191	offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
 192
 193out:
 194#endif
 
 
 
 
 195	return offload_assist;
 196}
 197
 198/*
 199 * Sets most of the Tx cmd's fields
 200 */
 201void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 202			struct iwl_tx_cmd *tx_cmd,
 203			struct ieee80211_tx_info *info, u8 sta_id)
 204{
 205	struct ieee80211_hdr *hdr = (void *)skb->data;
 206	__le16 fc = hdr->frame_control;
 207	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
 208	u32 len = skb->len + FCS_LEN;
 209	u16 offload_assist = 0;
 210	u8 ac;
 211
 212	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
 213	    (ieee80211_is_probe_resp(fc) &&
 214	     !is_multicast_ether_addr(hdr->addr1)))
 215		tx_flags |= TX_CMD_FLG_ACK;
 216	else
 217		tx_flags &= ~TX_CMD_FLG_ACK;
 218
 219	if (ieee80211_is_probe_resp(fc))
 220		tx_flags |= TX_CMD_FLG_TSF;
 221
 222	if (ieee80211_has_morefrags(fc))
 223		tx_flags |= TX_CMD_FLG_MORE_FRAG;
 224
 225	if (ieee80211_is_data_qos(fc)) {
 226		u8 *qc = ieee80211_get_qos_ctl(hdr);
 227		tx_cmd->tid_tspec = qc[0] & 0xf;
 228		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
 229		if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
 230			offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
 231	} else if (ieee80211_is_back_req(fc)) {
 232		struct ieee80211_bar *bar = (void *)skb->data;
 233		u16 control = le16_to_cpu(bar->control);
 234		u16 ssn = le16_to_cpu(bar->start_seq_num);
 235
 236		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
 237		tx_cmd->tid_tspec = (control &
 238				     IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
 239			IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
 240		WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
 241		iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
 242					  ssn);
 243	} else {
 244		if (ieee80211_is_data(fc))
 245			tx_cmd->tid_tspec = IWL_TID_NON_QOS;
 246		else
 247			tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
 248
 249		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
 250			tx_flags |= TX_CMD_FLG_SEQ_CTL;
 251		else
 252			tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
 253	}
 254
 255	/* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
 256	if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
 257		ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
 258	else
 259		ac = tid_to_mac80211_ac[0];
 260
 261	tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
 262			TX_CMD_FLG_BT_PRIO_POS;
 263
 264	if (ieee80211_is_mgmt(fc)) {
 265		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
 266			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
 267		else if (ieee80211_is_action(fc))
 268			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 269		else
 270			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 271
 272		/* The spec allows Action frames in A-MPDU, we don't support
 273		 * it
 274		 */
 275		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
 276	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
 277		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 278	} else {
 279		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 280	}
 281
 282	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
 283	    !is_multicast_ether_addr(hdr->addr1))
 284		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
 285
 286	if (fw_has_capa(&mvm->fw->ucode_capa,
 287			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
 288	    ieee80211_action_contains_tpc(skb))
 289		tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
 290
 291	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
 292	/* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
 293	tx_cmd->len = cpu_to_le16((u16)skb->len);
 294	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
 295	tx_cmd->sta_id = sta_id;
 296
 297	/* padding is inserted later in transport */
 298	if (ieee80211_hdrlen(fc) % 4 &&
 299	    !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
 300		offload_assist |= BIT(TX_CMD_OFFLD_PAD);
 301
 302	tx_cmd->offload_assist |=
 303		cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info,
 304					    offload_assist));
 305}
 306
 307static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
 308			      struct ieee80211_tx_info *info,
 309			      struct ieee80211_sta *sta, __le16 fc)
 310{
 311	if (info->band == NL80211_BAND_2GHZ &&
 312	    !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
 313		return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
 314
 315	if (sta && ieee80211_is_data(fc)) {
 316		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 317
 318		return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
 319	}
 320
 321	return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
 322}
 323
 324static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
 325			       struct ieee80211_tx_info *info,
 326			       struct ieee80211_sta *sta)
 327{
 328	int rate_idx;
 329	u8 rate_plcp;
 330	u32 rate_flags = 0;
 331
 332	/* HT rate doesn't make sense for a non data frame */
 333	WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
 334		  "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
 335		  info->control.rates[0].flags,
 336		  info->control.rates[0].idx);
 337
 338	rate_idx = info->control.rates[0].idx;
 339	/* if the rate isn't a well known legacy rate, take the lowest one */
 340	if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
 341		rate_idx = rate_lowest_index(
 342				&mvm->nvm_data->bands[info->band], sta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 343
 344	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
 345	if (info->band == NL80211_BAND_5GHZ)
 346		rate_idx += IWL_FIRST_OFDM_RATE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347
 348	/* For 2.4 GHZ band, check that there is no need to remap */
 349	BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 350
 351	/* Get PLCP rate for tx_cmd->rate_n_flags */
 352	rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
 
 
 
 
 
 
 
 
 353
 354	/* Set CCK flag as needed */
 355	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
 356		rate_flags |= RATE_MCS_CCK_MSK;
 
 
 
 357
 358	return (u32)rate_plcp | rate_flags;
 
 
 
 
 359}
 360
 361static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
 362				       struct ieee80211_tx_info *info,
 363				       struct ieee80211_sta *sta, __le16 fc)
 364{
 365	return iwl_mvm_get_tx_rate(mvm, info, sta) |
 
 
 
 366		iwl_mvm_get_tx_ant(mvm, info, sta, fc);
 367}
 368
 369/*
 370 * Sets the fields in the Tx cmd that are rate related
 371 */
 372void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 373			    struct ieee80211_tx_info *info,
 374			    struct ieee80211_sta *sta, __le16 fc)
 375{
 376	/* Set retry limit on RTS packets */
 377	tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
 378
 379	/* Set retry limit on DATA packets and Probe Responses*/
 380	if (ieee80211_is_probe_resp(fc)) {
 381		tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
 382		tx_cmd->rts_retry_limit =
 383			min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
 384	} else if (ieee80211_is_back_req(fc)) {
 385		tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
 386	} else {
 387		tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
 388	}
 389
 390	/*
 391	 * for data packets, rate info comes from the table inside the fw. This
 392	 * table is controlled by LINK_QUALITY commands
 393	 */
 394
 395	if (ieee80211_is_data(fc) && sta) {
 
 396		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 397
 398		if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
 399			tx_cmd->initial_rate_index = 0;
 400			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
 401			return;
 402		}
 403	} else if (ieee80211_is_back_req(fc)) {
 404		tx_cmd->tx_flags |=
 405			cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
 406	}
 407
 408	/* Set the rate in the TX cmd */
 409	tx_cmd->rate_n_flags =
 410		cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
 411}
 412
 413static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
 414					 u8 *crypto_hdr)
 415{
 416	struct ieee80211_key_conf *keyconf = info->control.hw_key;
 417	u64 pn;
 418
 419	pn = atomic64_inc_return(&keyconf->tx_pn);
 420	crypto_hdr[0] = pn;
 421	crypto_hdr[2] = 0;
 422	crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
 423	crypto_hdr[1] = pn >> 8;
 424	crypto_hdr[4] = pn >> 16;
 425	crypto_hdr[5] = pn >> 24;
 426	crypto_hdr[6] = pn >> 32;
 427	crypto_hdr[7] = pn >> 40;
 428}
 429
 430/*
 431 * Sets the fields in the Tx cmd that are crypto related
 432 */
 433static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
 434				      struct ieee80211_tx_info *info,
 435				      struct iwl_tx_cmd *tx_cmd,
 436				      struct sk_buff *skb_frag,
 437				      int hdrlen)
 438{
 439	struct ieee80211_key_conf *keyconf = info->control.hw_key;
 440	u8 *crypto_hdr = skb_frag->data + hdrlen;
 441	enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
 442	u64 pn;
 443
 444	switch (keyconf->cipher) {
 445	case WLAN_CIPHER_SUITE_CCMP:
 446		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
 447		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
 448		break;
 449
 450	case WLAN_CIPHER_SUITE_TKIP:
 451		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
 452		pn = atomic64_inc_return(&keyconf->tx_pn);
 453		ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
 454		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
 455		break;
 456
 457	case WLAN_CIPHER_SUITE_WEP104:
 458		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
 459		/* fall through */
 460	case WLAN_CIPHER_SUITE_WEP40:
 461		tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
 462			((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
 463			  TX_CMD_SEC_WEP_KEY_IDX_MSK);
 464
 465		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
 466		break;
 467	case WLAN_CIPHER_SUITE_GCMP:
 468	case WLAN_CIPHER_SUITE_GCMP_256:
 469		type = TX_CMD_SEC_GCMP;
 470		/* Fall through */
 471	case WLAN_CIPHER_SUITE_CCMP_256:
 472		/* TODO: Taking the key from the table might introduce a race
 473		 * when PTK rekeying is done, having an old packets with a PN
 474		 * based on the old key but the message encrypted with a new
 475		 * one.
 476		 * Need to handle this.
 477		 */
 478		tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
 479		tx_cmd->key[0] = keyconf->hw_key_idx;
 480		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
 481		break;
 482	default:
 483		tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
 484	}
 485}
 486
 
 
 
 
 
 
 
 
 
 
 487/*
 488 * Allocates and sets the Tx cmd the driver data pointers in the skb
 489 */
 490static struct iwl_device_cmd *
 491iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 492		      struct ieee80211_tx_info *info, int hdrlen,
 493		      struct ieee80211_sta *sta, u8 sta_id)
 
 494{
 495	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 496	struct iwl_device_cmd *dev_cmd;
 497	struct iwl_tx_cmd *tx_cmd;
 498
 499	dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
 500
 501	if (unlikely(!dev_cmd))
 502		return NULL;
 503
 504	/* Make sure we zero enough of dev_cmd */
 505	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
 506	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
 507
 508	memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
 509	dev_cmd->hdr.cmd = TX_CMD;
 510
 511	if (iwl_mvm_has_new_tx_api(mvm)) {
 512		u16 offload_assist = 0;
 513		u32 rate_n_flags = 0;
 514		u16 flags = 0;
 515		struct iwl_mvm_sta *mvmsta = sta ?
 516			iwl_mvm_sta_from_mac80211(sta) : NULL;
 
 517
 518		if (ieee80211_is_data_qos(hdr->frame_control)) {
 519			u8 *qc = ieee80211_get_qos_ctl(hdr);
 520
 521			if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
 522				offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
 523		}
 524
 525		offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info,
 526						 offload_assist);
 527
 528		/* padding is inserted later in transport */
 529		if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
 530		    !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
 531			offload_assist |= BIT(TX_CMD_OFFLD_PAD);
 532
 533		if (!info->control.hw_key)
 534			flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
 535
 536		/*
 537		 * For data packets rate info comes from the fw. Only
 538		 * set rate/antenna during connection establishment or in case
 539		 * no station is given.
 540		 */
 541		if (!sta || !ieee80211_is_data(hdr->frame_control) ||
 542		    mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
 543			flags |= IWL_TX_FLAGS_CMD_RATE;
 544			rate_n_flags =
 545				iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
 546							    hdr->frame_control);
 
 
 
 
 547		}
 548
 549		if (mvm->trans->trans_cfg->device_family >=
 550		    IWL_DEVICE_FAMILY_22560) {
 551			struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
 
 
 552
 553			cmd->offload_assist |= cpu_to_le32(offload_assist);
 554
 555			/* Total # bytes to be transmitted */
 556			cmd->len = cpu_to_le16((u16)skb->len);
 557
 558			/* Copy MAC header from skb into command buffer */
 559			memcpy(cmd->hdr, hdr, hdrlen);
 560
 561			cmd->flags = cpu_to_le16(flags);
 562			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
 563		} else {
 564			struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
 
 
 565
 566			cmd->offload_assist |= cpu_to_le16(offload_assist);
 567
 568			/* Total # bytes to be transmitted */
 569			cmd->len = cpu_to_le16((u16)skb->len);
 570
 571			/* Copy MAC header from skb into command buffer */
 572			memcpy(cmd->hdr, hdr, hdrlen);
 573
 574			cmd->flags = cpu_to_le32(flags);
 575			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
 576		}
 577		goto out;
 578	}
 579
 580	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 581
 582	if (info->control.hw_key)
 583		iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
 584
 585	iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
 586
 587	iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
 588
 589	/* Copy MAC header from skb into command buffer */
 590	memcpy(tx_cmd->hdr, hdr, hdrlen);
 591
 592out:
 593	return dev_cmd;
 594}
 595
 596static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
 597				       struct iwl_device_cmd *cmd)
 598{
 599	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 600
 601	memset(&skb_info->status, 0, sizeof(skb_info->status));
 602	memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
 603
 604	skb_info->driver_data[1] = cmd;
 605}
 606
 607static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
 
 608				      struct ieee80211_tx_info *info,
 609				      struct ieee80211_hdr *hdr)
 610{
 611	struct iwl_mvm_vif *mvmvif =
 612		iwl_mvm_vif_from_mac80211(info->control.vif);
 613	__le16 fc = hdr->frame_control;
 614
 615	switch (info->control.vif->type) {
 616	case NL80211_IFTYPE_AP:
 617	case NL80211_IFTYPE_ADHOC:
 618		/*
 619		 * Non-bufferable frames use the broadcast station, thus they
 620		 * use the probe queue.
 621		 * Also take care of the case where we send a deauth to a
 622		 * station that we don't have, or similarly an association
 623		 * response (with non-success status) for a station we can't
 624		 * accept.
 625		 * Also, disassociate frames might happen, particular with
 626		 * reason 7 ("Class 3 frame received from nonassociated STA").
 627		 */
 628		if (ieee80211_is_mgmt(fc) &&
 629		    (!ieee80211_is_bufferable_mmpdu(fc) ||
 630		     ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
 631			return mvm->probe_queue;
 632
 633		if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
 634		    is_multicast_ether_addr(hdr->addr1))
 635			return mvmvif->cab_queue;
 636
 637		WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
 638			  "fc=0x%02x", le16_to_cpu(fc));
 639		return mvm->probe_queue;
 640	case NL80211_IFTYPE_P2P_DEVICE:
 641		if (ieee80211_is_mgmt(fc))
 642			return mvm->p2p_dev_queue;
 643
 644		WARN_ON_ONCE(1);
 645		return mvm->p2p_dev_queue;
 646	default:
 647		WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
 648		return -1;
 649	}
 650}
 651
 652static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
 653				       struct sk_buff *skb)
 654{
 655	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 656	struct iwl_mvm_vif *mvmvif =
 657		iwl_mvm_vif_from_mac80211(info->control.vif);
 658	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
 659	int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
 660	struct iwl_probe_resp_data *resp_data;
 661	u8 *ie, *pos;
 
 662	u8 match[] = {
 663		(WLAN_OUI_WFA >> 16) & 0xff,
 664		(WLAN_OUI_WFA >> 8) & 0xff,
 665		WLAN_OUI_WFA & 0xff,
 666		WLAN_OUI_TYPE_WFA_P2P,
 667	};
 668
 669	rcu_read_lock();
 670
 671	resp_data = rcu_dereference(mvmvif->probe_resp_data);
 672	if (!resp_data)
 673		goto out;
 674
 675	if (!resp_data->notif.noa_active)
 676		goto out;
 677
 678	ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
 679					  mgmt->u.probe_resp.variable,
 680					  skb->len - base_len,
 681					  match, 4, 2);
 682	if (!ie) {
 683		IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
 684		goto out;
 685	}
 686
 687	if (skb_tailroom(skb) < resp_data->noa_len) {
 688		if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
 689			IWL_ERR(mvm,
 690				"Failed to reallocate probe resp\n");
 691			goto out;
 692		}
 693	}
 694
 695	pos = skb_put(skb, resp_data->noa_len);
 696
 697	*pos++ = WLAN_EID_VENDOR_SPECIFIC;
 698	/* Set length of IE body (not including ID and length itself) */
 699	*pos++ = resp_data->noa_len - 2;
 700	*pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
 701	*pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
 702	*pos++ = WLAN_OUI_WFA & 0xff;
 703	*pos++ = WLAN_OUI_TYPE_WFA_P2P;
 704
 705	memcpy(pos, &resp_data->notif.noa_attr,
 706	       resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
 707
 708out:
 709	rcu_read_unlock();
 710}
 711
 712int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 713{
 714	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 715	struct ieee80211_tx_info info;
 716	struct iwl_device_cmd *dev_cmd;
 717	u8 sta_id;
 718	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 719	__le16 fc = hdr->frame_control;
 720	bool offchannel = IEEE80211_SKB_CB(skb)->flags &
 721		IEEE80211_TX_CTL_TX_OFFCHAN;
 722	int queue = -1;
 723
 724	if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
 725		return -1;
 726
 727	memcpy(&info, skb->cb, sizeof(info));
 728
 729	if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
 730		return -1;
 731
 732	if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
 733		return -1;
 734
 735	if (info.control.vif) {
 736		struct iwl_mvm_vif *mvmvif =
 737			iwl_mvm_vif_from_mac80211(info.control.vif);
 738
 739		if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
 740		    info.control.vif->type == NL80211_IFTYPE_AP ||
 741		    info.control.vif->type == NL80211_IFTYPE_ADHOC) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 742			if (!ieee80211_is_data(hdr->frame_control))
 743				sta_id = mvmvif->bcast_sta.sta_id;
 744			else
 745				sta_id = mvmvif->mcast_sta.sta_id;
 746
 747			queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
 
 748		} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
 749			queue = mvm->snif_queue;
 750			sta_id = mvm->snif_sta.sta_id;
 751		} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
 752			   offchannel) {
 753			/*
 754			 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
 755			 * that can be used in 2 different types of vifs, P2P &
 756			 * STATION.
 757			 * P2P uses the offchannel queue.
 758			 * STATION (HS2.0) uses the auxiliary context of the FW,
 759			 * and hence needs to be sent on the aux queue.
 760			 */
 761			sta_id = mvm->aux_sta.sta_id;
 762			queue = mvm->aux_queue;
 763		}
 764	}
 765
 766	if (queue < 0) {
 767		IWL_ERR(mvm, "No queue was found. Dropping TX\n");
 768		return -1;
 769	}
 770
 771	if (unlikely(ieee80211_is_probe_resp(fc)))
 772		iwl_mvm_probe_resp_set_noa(mvm, skb);
 773
 774	IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
 775
 776	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
 
 777	if (!dev_cmd)
 778		return -1;
 779
 780	/* From now on, we cannot access info->control */
 781	iwl_mvm_skb_prepare_status(skb, dev_cmd);
 782
 783	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
 784		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
 785		return -1;
 786	}
 787
 788	return 0;
 789}
 790
 791unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
 792				    struct ieee80211_sta *sta, unsigned int tid)
 793{
 794	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 795	enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
 796	u8 ac = tid_to_mac80211_ac[tid];
 
 797	unsigned int txf;
 798	int lmac = IWL_LMAC_24G_INDEX;
 799
 800	if (iwl_mvm_is_cdb_supported(mvm) &&
 801	    band == NL80211_BAND_5GHZ)
 802		lmac = IWL_LMAC_5G_INDEX;
 803
 804	/* For HE redirect to trigger based fifos */
 805	if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
 806		ac += 4;
 807
 808	txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
 809
 810	/*
 811	 * Don't send an AMSDU that will be longer than the TXF.
 812	 * Add a security margin of 256 for the TX command + headers.
 813	 * We also want to have the start of the next packet inside the
 814	 * fifo to be able to send bursts.
 815	 */
 816	return min_t(unsigned int, mvmsta->max_amsdu_len,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 817		     mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
 818}
 819
 820#ifdef CONFIG_INET
 821
 822static int
 823iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
 824		       netdev_features_t netdev_flags,
 825		       struct sk_buff_head *mpdus_skb)
 826{
 827	struct sk_buff *tmp, *next;
 828	struct ieee80211_hdr *hdr = (void *)skb->data;
 829	char cb[sizeof(skb->cb)];
 830	u16 i = 0;
 831	unsigned int tcp_payload_len;
 832	unsigned int mss = skb_shinfo(skb)->gso_size;
 833	bool ipv4 = (skb->protocol == htons(ETH_P_IP));
 834	bool qos = ieee80211_is_data_qos(hdr->frame_control);
 835	u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
 836
 837	skb_shinfo(skb)->gso_size = num_subframes * mss;
 838	memcpy(cb, skb->cb, sizeof(cb));
 839
 840	next = skb_gso_segment(skb, netdev_flags);
 841	skb_shinfo(skb)->gso_size = mss;
 
 842	if (WARN_ON_ONCE(IS_ERR(next)))
 843		return -EINVAL;
 844	else if (next)
 845		consume_skb(skb);
 846
 847	while (next) {
 848		tmp = next;
 849		next = tmp->next;
 850
 851		memcpy(tmp->cb, cb, sizeof(tmp->cb));
 852		/*
 853		 * Compute the length of all the data added for the A-MSDU.
 854		 * This will be used to compute the length to write in the TX
 855		 * command. We have: SNAP + IP + TCP for n -1 subframes and
 856		 * ETH header for n subframes.
 857		 */
 858		tcp_payload_len = skb_tail_pointer(tmp) -
 859			skb_transport_header(tmp) -
 860			tcp_hdrlen(tmp) + tmp->data_len;
 861
 862		if (ipv4)
 863			ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
 864
 865		if (tcp_payload_len > mss) {
 866			skb_shinfo(tmp)->gso_size = mss;
 
 
 867		} else {
 868			if (qos) {
 869				u8 *qc;
 870
 871				if (ipv4)
 872					ip_send_check(ip_hdr(tmp));
 873
 874				qc = ieee80211_get_qos_ctl((void *)tmp->data);
 875				*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
 876			}
 877			skb_shinfo(tmp)->gso_size = 0;
 878		}
 879
 880		tmp->prev = NULL;
 881		tmp->next = NULL;
 882
 883		__skb_queue_tail(mpdus_skb, tmp);
 884		i++;
 885	}
 886
 887	return 0;
 888}
 889
 890static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
 891			  struct ieee80211_tx_info *info,
 892			  struct ieee80211_sta *sta,
 893			  struct sk_buff_head *mpdus_skb)
 894{
 895	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 896	struct ieee80211_hdr *hdr = (void *)skb->data;
 897	unsigned int mss = skb_shinfo(skb)->gso_size;
 898	unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
 899	u16 snap_ip_tcp, pad;
 900	netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
 901	u8 tid;
 902
 903	snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
 904		tcp_hdrlen(skb);
 905
 906	if (!mvmsta->max_amsdu_len ||
 907	    !ieee80211_is_data_qos(hdr->frame_control) ||
 908	    !mvmsta->amsdu_enabled)
 909		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
 910
 911	/*
 912	 * Do not build AMSDU for IPv6 with extension headers.
 913	 * ask stack to segment and checkum the generated MPDUs for us.
 914	 */
 915	if (skb->protocol == htons(ETH_P_IPV6) &&
 916	    ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
 917	    IPPROTO_TCP) {
 918		netdev_flags &= ~NETIF_F_CSUM_MASK;
 919		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
 920	}
 921
 922	tid = ieee80211_get_tid(hdr);
 923	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
 924		return -EINVAL;
 925
 926	/*
 927	 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
 928	 * during an BA session.
 929	 */
 930	if (info->flags & IEEE80211_TX_CTL_AMPDU &&
 931	    !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
 932		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
 933
 934	if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) ||
 935	    !(mvmsta->amsdu_enabled & BIT(tid)))
 936		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
 937
 938	max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
 
 
 
 
 
 939
 940	/*
 941	 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
 942	 * supported. This is a spec requirement (IEEE 802.11-2015
 943	 * section 8.7.3 NOTE 3).
 944	 */
 945	if (info->flags & IEEE80211_TX_CTL_AMPDU &&
 946	    !sta->vht_cap.vht_supported)
 947		max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
 948
 949	/* Sub frame header + SNAP + IP header + TCP header + MSS */
 950	subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
 951	pad = (4 - subf_len) & 0x3;
 952
 953	/*
 954	 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
 955	 * N * subf_len + (N - 1) * pad.
 956	 */
 957	num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
 958
 959	if (sta->max_amsdu_subframes &&
 960	    num_subframes > sta->max_amsdu_subframes)
 961		num_subframes = sta->max_amsdu_subframes;
 962
 963	tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
 964		tcp_hdrlen(skb) + skb->data_len;
 965
 966	/*
 967	 * Make sure we have enough TBs for the A-MSDU:
 968	 *	2 for each subframe
 969	 *	1 more for each fragment
 970	 *	1 more for the potential data in the header
 971	 */
 972	if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
 973	    mvm->trans->max_skb_frags)
 974		num_subframes = 1;
 975
 976	if (num_subframes > 1)
 977		*ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
 978
 979	/* This skb fits in one single A-MSDU */
 980	if (num_subframes * mss >= tcp_payload_len) {
 981		__skb_queue_tail(mpdus_skb, skb);
 982		return 0;
 983	}
 984
 985	/*
 986	 * Trick the segmentation function to make it
 987	 * create SKBs that can fit into one A-MSDU.
 988	 */
 989	return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
 990				      mpdus_skb);
 991}
 992#else /* CONFIG_INET */
 993static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
 994			  struct ieee80211_tx_info *info,
 995			  struct ieee80211_sta *sta,
 996			  struct sk_buff_head *mpdus_skb)
 997{
 998	/* Impossible to get TSO with CONFIG_INET */
 999	WARN_ON(1);
1000
1001	return -1;
1002}
1003#endif
1004
1005/* Check if there are any timed-out TIDs on a given shared TXQ */
1006static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
1007{
1008	unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
1009	unsigned long now = jiffies;
1010	int tid;
1011
1012	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1013		return false;
1014
1015	for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1016		if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
1017				IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1018			return true;
1019	}
1020
1021	return false;
1022}
1023
1024static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
1025			       struct iwl_mvm_sta *mvmsta,
1026			       int airtime)
1027{
1028	int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1029	struct iwl_mvm_tcm_mac *mdata;
1030
1031	if (mac >= NUM_MAC_INDEX_DRIVER)
1032		return;
1033
1034	mdata = &mvm->tcm.data[mac];
1035
1036	if (mvm->tcm.paused)
1037		return;
1038
1039	if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
1040		schedule_delayed_work(&mvm->tcm.work, 0);
1041
1042	mdata->tx.airtime += airtime;
1043}
1044
1045static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
1046				 struct iwl_mvm_sta *mvmsta, int tid)
1047{
1048	u32 ac = tid_to_mac80211_ac[tid];
1049	int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1050	struct iwl_mvm_tcm_mac *mdata;
1051
1052	if (mac >= NUM_MAC_INDEX_DRIVER)
1053		return -EINVAL;
1054
1055	mdata = &mvm->tcm.data[mac];
1056
1057	mdata->tx.pkts[ac]++;
1058
1059	return 0;
1060}
1061
1062/*
1063 * Sets the fields in the Tx cmd that are crypto related.
1064 *
1065 * This function must be called with BHs disabled.
1066 */
1067static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
1068			   struct ieee80211_tx_info *info,
1069			   struct ieee80211_sta *sta)
 
1070{
1071	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1072	struct iwl_mvm_sta *mvmsta;
1073	struct iwl_device_cmd *dev_cmd;
1074	__le16 fc;
1075	u16 seq_number = 0;
1076	u8 tid = IWL_MAX_TID_COUNT;
1077	u16 txq_id;
1078	bool is_ampdu = false;
1079	int hdrlen;
1080
1081	mvmsta = iwl_mvm_sta_from_mac80211(sta);
1082	fc = hdr->frame_control;
1083	hdrlen = ieee80211_hdrlen(fc);
1084
1085	if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
1086		return -1;
1087
1088	if (WARN_ON_ONCE(!mvmsta))
1089		return -1;
1090
1091	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
 
 
 
1092		return -1;
1093
1094	if (unlikely(ieee80211_is_probe_resp(fc)))
1095		iwl_mvm_probe_resp_set_noa(mvm, skb);
1096
1097	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
1098					sta, mvmsta->sta_id);
 
1099	if (!dev_cmd)
1100		goto drop;
1101
1102	/*
1103	 * we handle that entirely ourselves -- for uAPSD the firmware
1104	 * will always send a notification, and for PS-Poll responses
1105	 * we'll notify mac80211 when getting frame status
1106	 */
1107	info->flags &= ~IEEE80211_TX_STATUS_EOSP;
1108
1109	spin_lock(&mvmsta->lock);
1110
1111	/* nullfunc frames should go to the MGMT queue regardless of QOS,
1112	 * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
1113	 * assignment of MGMT TID
1114	 */
1115	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
1116		tid = ieee80211_get_tid(hdr);
1117		if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid))
1118			goto drop_unlock_sta;
1119
1120		is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
1121		if (WARN_ONCE(is_ampdu &&
1122			      mvmsta->tid_data[tid].state != IWL_AGG_ON,
1123			      "Invalid internal agg state %d for TID %d",
1124			       mvmsta->tid_data[tid].state, tid))
1125			goto drop_unlock_sta;
1126
1127		seq_number = mvmsta->tid_data[tid].seq_number;
1128		seq_number &= IEEE80211_SCTL_SEQ;
1129
1130		if (!iwl_mvm_has_new_tx_api(mvm)) {
1131			struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1132
1133			hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1134			hdr->seq_ctrl |= cpu_to_le16(seq_number);
1135			/* update the tx_cmd hdr as it was already copied */
1136			tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
1137		}
1138	} else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) {
 
1139		tid = IWL_TID_NON_QOS;
1140	}
1141
1142	txq_id = mvmsta->tid_data[tid].txq_id;
1143
1144	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1145
1146	if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
1147		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1148		spin_unlock(&mvmsta->lock);
1149		return 0;
1150	}
1151
1152	if (!iwl_mvm_has_new_tx_api(mvm)) {
1153		/* Keep track of the time of the last frame for this RA/TID */
1154		mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
1155
1156		/*
1157		 * If we have timed-out TIDs - schedule the worker that will
1158		 * reconfig the queues and update them
1159		 *
1160		 * Note that the no lock is taken here in order to not serialize
1161		 * the TX flow. This isn't dangerous because scheduling
1162		 * mvm->add_stream_wk can't ruin the state, and if we DON'T
1163		 * schedule it due to some race condition then next TX we get
1164		 * here we will.
1165		 */
1166		if (unlikely(mvm->queue_info[txq_id].status ==
1167			     IWL_MVM_QUEUE_SHARED &&
1168			     iwl_mvm_txq_should_update(mvm, txq_id)))
1169			schedule_work(&mvm->add_stream_wk);
1170	}
1171
1172	IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n",
1173		     mvmsta->sta_id, tid, txq_id,
1174		     IEEE80211_SEQ_TO_SN(seq_number), skb->len);
1175
1176	/* From now on, we cannot access info->control */
1177	iwl_mvm_skb_prepare_status(skb, dev_cmd);
1178
 
 
 
 
 
 
 
 
 
 
 
1179	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
1180		goto drop_unlock_sta;
1181
1182	if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
1183		mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
1184
1185	spin_unlock(&mvmsta->lock);
1186
1187	if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
1188				  tid == IWL_MAX_TID_COUNT ? 0 : tid))
1189		goto drop;
1190
1191	return 0;
1192
1193drop_unlock_sta:
1194	iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1195	spin_unlock(&mvmsta->lock);
1196drop:
1197	IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid);
 
1198	return -1;
1199}
1200
1201int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
1202		   struct ieee80211_sta *sta)
1203{
1204	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1205	struct ieee80211_tx_info info;
1206	struct sk_buff_head mpdus_skbs;
 
1207	unsigned int payload_len;
1208	int ret;
 
 
1209
1210	if (WARN_ON_ONCE(!mvmsta))
1211		return -1;
1212
1213	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
1214		return -1;
1215
1216	memcpy(&info, skb->cb, sizeof(info));
1217
1218	if (!skb_is_gso(skb))
1219		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1220
1221	payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1222		tcp_hdrlen(skb) + skb->data_len;
1223
1224	if (payload_len <= skb_shinfo(skb)->gso_size)
1225		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
1226
1227	__skb_queue_head_init(&mpdus_skbs);
1228
 
 
 
 
1229	ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
1230	if (ret)
1231		return ret;
1232
1233	if (WARN_ON(skb_queue_empty(&mpdus_skbs)))
1234		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1235
1236	while (!skb_queue_empty(&mpdus_skbs)) {
 
 
 
1237		skb = __skb_dequeue(&mpdus_skbs);
 
 
 
 
1238
1239		ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 
1240		if (ret) {
 
1241			__skb_queue_purge(&mpdus_skbs);
1242			return ret;
 
 
 
 
 
 
 
 
1243		}
1244	}
1245
1246	return 0;
1247}
1248
1249static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1250				      struct ieee80211_sta *sta, u8 tid)
1251{
1252	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1253	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1254	struct ieee80211_vif *vif = mvmsta->vif;
1255	u16 normalized_ssn;
1256
1257	lockdep_assert_held(&mvmsta->lock);
1258
1259	if ((tid_data->state == IWL_AGG_ON ||
1260	     tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
1261	    iwl_mvm_tid_queued(mvm, tid_data) == 0) {
1262		/*
1263		 * Now that this aggregation or DQA queue is empty tell
1264		 * mac80211 so it knows we no longer have frames buffered for
1265		 * the station on this TID (for the TIM bitmap calculation.)
1266		 */
1267		ieee80211_sta_set_buffered(sta, tid, false);
1268	}
1269
1270	/*
1271	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
1272	 * to align the wrap around of ssn so we compare relevant values.
1273	 */
1274	normalized_ssn = tid_data->ssn;
1275	if (mvm->trans->trans_cfg->gen2)
1276		normalized_ssn &= 0xff;
1277
1278	if (normalized_ssn != tid_data->next_reclaimed)
1279		return;
1280
1281	switch (tid_data->state) {
1282	case IWL_EMPTYING_HW_QUEUE_ADDBA:
1283		IWL_DEBUG_TX_QUEUES(mvm,
1284				    "Can continue addBA flow ssn = next_recl = %d\n",
1285				    tid_data->next_reclaimed);
1286		tid_data->state = IWL_AGG_STARTING;
1287		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1288		break;
1289
1290	case IWL_EMPTYING_HW_QUEUE_DELBA:
1291		IWL_DEBUG_TX_QUEUES(mvm,
1292				    "Can continue DELBA flow ssn = next_recl = %d\n",
1293				    tid_data->next_reclaimed);
1294		tid_data->state = IWL_AGG_OFF;
1295		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1296		break;
1297
1298	default:
1299		break;
1300	}
1301}
1302
1303#ifdef CONFIG_IWLWIFI_DEBUG
1304const char *iwl_mvm_get_tx_fail_reason(u32 status)
1305{
1306#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1307#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1308
1309	switch (status & TX_STATUS_MSK) {
1310	case TX_STATUS_SUCCESS:
1311		return "SUCCESS";
1312	TX_STATUS_POSTPONE(DELAY);
1313	TX_STATUS_POSTPONE(FEW_BYTES);
1314	TX_STATUS_POSTPONE(BT_PRIO);
1315	TX_STATUS_POSTPONE(QUIET_PERIOD);
1316	TX_STATUS_POSTPONE(CALC_TTAK);
1317	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1318	TX_STATUS_FAIL(SHORT_LIMIT);
1319	TX_STATUS_FAIL(LONG_LIMIT);
1320	TX_STATUS_FAIL(UNDERRUN);
1321	TX_STATUS_FAIL(DRAIN_FLOW);
1322	TX_STATUS_FAIL(RFKILL_FLUSH);
1323	TX_STATUS_FAIL(LIFE_EXPIRE);
1324	TX_STATUS_FAIL(DEST_PS);
1325	TX_STATUS_FAIL(HOST_ABORTED);
1326	TX_STATUS_FAIL(BT_RETRY);
1327	TX_STATUS_FAIL(STA_INVALID);
1328	TX_STATUS_FAIL(FRAG_DROPPED);
1329	TX_STATUS_FAIL(TID_DISABLE);
1330	TX_STATUS_FAIL(FIFO_FLUSHED);
1331	TX_STATUS_FAIL(SMALL_CF_POLL);
1332	TX_STATUS_FAIL(FW_DROP);
1333	TX_STATUS_FAIL(STA_COLOR_MISMATCH);
1334	}
1335
1336	return "UNKNOWN";
1337
1338#undef TX_STATUS_FAIL
1339#undef TX_STATUS_POSTPONE
1340}
1341#endif /* CONFIG_IWLWIFI_DEBUG */
1342
1343void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1344			       enum nl80211_band band,
1345			       struct ieee80211_tx_rate *r)
1346{
1347	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1348		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1349	switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1350	case RATE_MCS_CHAN_WIDTH_20:
1351		break;
1352	case RATE_MCS_CHAN_WIDTH_40:
1353		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1354		break;
1355	case RATE_MCS_CHAN_WIDTH_80:
1356		r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1357		break;
1358	case RATE_MCS_CHAN_WIDTH_160:
1359		r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1360		break;
 
1361	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1362	if (rate_n_flags & RATE_MCS_SGI_MSK)
1363		r->flags |= IEEE80211_TX_RC_SHORT_GI;
1364	if (rate_n_flags & RATE_MCS_HT_MSK) {
1365		r->flags |= IEEE80211_TX_RC_MCS;
1366		r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
1367	} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1368		ieee80211_rate_set_vht(
1369			r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
1370			((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1371						RATE_VHT_MCS_NSS_POS) + 1);
1372		r->flags |= IEEE80211_TX_RC_VHT_MCS;
1373	} else {
1374		r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1375							     band);
1376	}
1377}
1378
1379/**
1380 * translate ucode response to mac80211 tx status control values
1381 */
1382static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
 
1383					struct ieee80211_tx_info *info)
1384{
1385	struct ieee80211_tx_rate *r = &info->status.rates[0];
1386
 
 
 
 
1387	info->status.antenna =
1388		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1389	iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
 
1390}
1391
1392static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
1393					    u32 status)
1394{
1395	struct iwl_fw_dbg_trigger_tlv *trig;
1396	struct iwl_fw_dbg_trigger_tx_status *status_trig;
1397	int i;
1398
 
 
 
 
 
 
 
 
 
 
 
 
1399	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1400				     FW_DBG_TRIGGER_TX_STATUS);
1401	if (!trig)
1402		return;
1403
1404	status_trig = (void *)trig->data;
1405
1406	for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1407		/* don't collect on status 0 */
1408		if (!status_trig->statuses[i].status)
1409			break;
1410
1411		if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1412			continue;
1413
1414		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1415					"Tx status %d was received",
1416					status & TX_STATUS_MSK);
1417		break;
1418	}
1419}
1420
1421/**
1422 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1423 * @tx_resp: the Tx response from the fw (agg or non-agg)
1424 *
1425 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
1426 * it can't know that everything will go well until the end of the AMPDU, it
1427 * can't know in advance the number of MPDUs that will be sent in the current
1428 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1429 * Hence, it can't know in advance what the SSN of the SCD will be at the end
1430 * of the batch. This is why the SSN of the SCD is written at the end of the
1431 * whole struct at a variable offset. This function knows how to cope with the
1432 * variable offset and returns the SSN of the SCD.
1433 */
1434static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
1435				      struct iwl_mvm_tx_resp *tx_resp)
1436{
1437	return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
1438			    tx_resp->frame_count) & 0xfff;
1439}
1440
1441static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1442				     struct iwl_rx_packet *pkt)
1443{
1444	struct ieee80211_sta *sta;
1445	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1446	int txq_id = SEQ_TO_QUEUE(sequence);
1447	/* struct iwl_mvm_tx_resp_v3 is almost the same */
1448	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1449	int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1450	int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1451	struct agg_tx_status *agg_status =
1452		iwl_mvm_get_agg_status(mvm, tx_resp);
1453	u32 status = le16_to_cpu(agg_status->status);
1454	u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
1455	struct sk_buff_head skbs;
1456	u8 skb_freed = 0;
1457	u8 lq_color;
1458	u16 next_reclaimed, seq_ctl;
1459	bool is_ndp = false;
1460
1461	__skb_queue_head_init(&skbs);
1462
1463	if (iwl_mvm_has_new_tx_api(mvm))
1464		txq_id = le16_to_cpu(tx_resp->tx_queue);
1465
1466	seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1467
1468	/* we can free until ssn % q.n_bd not inclusive */
1469	iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
1470
1471	while (!skb_queue_empty(&skbs)) {
1472		struct sk_buff *skb = __skb_dequeue(&skbs);
1473		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1474		struct ieee80211_hdr *hdr = (void *)skb->data;
1475		bool flushed = false;
1476
1477		skb_freed++;
1478
1479		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1480
1481		memset(&info->status, 0, sizeof(info->status));
 
1482
1483		/* inform mac80211 about what happened with the frame */
1484		switch (status & TX_STATUS_MSK) {
1485		case TX_STATUS_SUCCESS:
1486		case TX_STATUS_DIRECT_DONE:
1487			info->flags |= IEEE80211_TX_STAT_ACK;
1488			break;
1489		case TX_STATUS_FAIL_FIFO_FLUSHED:
1490		case TX_STATUS_FAIL_DRAIN_FLOW:
1491			flushed = true;
1492			break;
1493		case TX_STATUS_FAIL_DEST_PS:
1494			/* the FW should have stopped the queue and not
1495			 * return this status
1496			 */
1497			WARN_ON(1);
 
 
1498			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1499			break;
1500		default:
1501			break;
1502		}
1503
1504		if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1505		    ieee80211_is_mgmt(hdr->frame_control))
1506			iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
1507
1508		/*
1509		 * If we are freeing multiple frames, mark all the frames
1510		 * but the first one as acked, since they were acknowledged
1511		 * before
1512		 * */
1513		if (skb_freed > 1)
1514			info->flags |= IEEE80211_TX_STAT_ACK;
1515
1516		iwl_mvm_tx_status_check_trigger(mvm, status);
1517
1518		info->status.rates[0].count = tx_resp->failure_frame + 1;
1519		iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
 
 
1520					    info);
 
 
 
 
1521		info->status.status_driver_data[1] =
1522			(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
1523
1524		/* Single frame failure in an AMPDU queue => send BAR */
1525		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1526		    !(info->flags & IEEE80211_TX_STAT_ACK) &&
1527		    !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
1528			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1529		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1530
1531		/* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
1532		if (ieee80211_is_back_req(hdr->frame_control))
1533			seq_ctl = 0;
1534		else if (status != TX_STATUS_SUCCESS)
1535			seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1536
1537		if (unlikely(!seq_ctl)) {
1538			struct ieee80211_hdr *hdr = (void *)skb->data;
1539
1540			/*
1541			 * If it is an NDP, we can't update next_reclaim since
1542			 * its sequence control is 0. Note that for that same
1543			 * reason, NDPs are never sent to A-MPDU'able queues
1544			 * so that we can never have more than one freed frame
1545			 * for a single Tx resonse (see WARN_ON below).
1546			 */
1547			if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1548				is_ndp = true;
1549		}
1550
1551		/*
1552		 * TODO: this is not accurate if we are freeing more than one
1553		 * packet.
1554		 */
1555		info->status.tx_time =
1556			le16_to_cpu(tx_resp->wireless_media_time);
1557		BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1558		lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1559		info->status.status_driver_data[0] =
1560			RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1561
1562		ieee80211_tx_status(mvm->hw, skb);
 
1563	}
1564
1565	/* This is an aggregation queue or might become one, so we use
1566	 * the ssn since: ssn = wifi seq_num % 256.
1567	 * The seq_ctl is the sequence control of the packet to which
1568	 * this Tx response relates. But if there is a hole in the
1569	 * bitmap of the BA we received, this Tx response may allow to
1570	 * reclaim the hole and all the subsequent packets that were
1571	 * already acked. In that case, seq_ctl != ssn, and the next
1572	 * packet to be reclaimed will be ssn and not seq_ctl. In that
1573	 * case, several packets will be reclaimed even if
1574	 * frame_count = 1.
1575	 *
1576	 * The ssn is the index (% 256) of the latest packet that has
1577	 * treated (acked / dropped) + 1.
1578	 */
1579	next_reclaimed = ssn;
1580
1581	IWL_DEBUG_TX_REPLY(mvm,
1582			   "TXQ %d status %s (0x%08x)\n",
1583			   txq_id, iwl_mvm_get_tx_fail_reason(status), status);
1584
1585	IWL_DEBUG_TX_REPLY(mvm,
1586			   "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1587			   le32_to_cpu(tx_resp->initial_rate),
1588			   tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1589			   ssn, next_reclaimed, seq_ctl);
1590
1591	rcu_read_lock();
1592
1593	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1594	/*
1595	 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1596	 * the firmware while we still have packets for it in the Tx queues.
1597	 */
1598	if (WARN_ON_ONCE(!sta))
1599		goto out;
1600
1601	if (!IS_ERR(sta)) {
1602		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1603
1604		iwl_mvm_tx_airtime(mvm, mvmsta,
1605				   le16_to_cpu(tx_resp->wireless_media_time));
1606
1607		if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1608		    mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
1609			iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
1610
1611		if (sta->wme && tid != IWL_MGMT_TID) {
1612			struct iwl_mvm_tid_data *tid_data =
1613				&mvmsta->tid_data[tid];
1614			bool send_eosp_ndp = false;
1615
1616			spin_lock_bh(&mvmsta->lock);
1617
1618			if (!is_ndp) {
1619				tid_data->next_reclaimed = next_reclaimed;
1620				IWL_DEBUG_TX_REPLY(mvm,
1621						   "Next reclaimed packet:%d\n",
1622						   next_reclaimed);
1623			} else {
1624				IWL_DEBUG_TX_REPLY(mvm,
1625						   "NDP - don't update next_reclaimed\n");
1626			}
1627
1628			iwl_mvm_check_ratid_empty(mvm, sta, tid);
1629
1630			if (mvmsta->sleep_tx_count) {
1631				mvmsta->sleep_tx_count--;
1632				if (mvmsta->sleep_tx_count &&
1633				    !iwl_mvm_tid_queued(mvm, tid_data)) {
1634					/*
1635					 * The number of frames in the queue
1636					 * dropped to 0 even if we sent less
1637					 * frames than we thought we had on the
1638					 * Tx queue.
1639					 * This means we had holes in the BA
1640					 * window that we just filled, ask
1641					 * mac80211 to send EOSP since the
1642					 * firmware won't know how to do that.
1643					 * Send NDP and the firmware will send
1644					 * EOSP notification that will trigger
1645					 * a call to ieee80211_sta_eosp().
1646					 */
1647					send_eosp_ndp = true;
1648				}
1649			}
1650
1651			spin_unlock_bh(&mvmsta->lock);
1652			if (send_eosp_ndp) {
1653				iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
1654					IEEE80211_FRAME_RELEASE_UAPSD,
1655					1, tid, false, false);
1656				mvmsta->sleep_tx_count = 0;
1657				ieee80211_send_eosp_nullfunc(sta, tid);
1658			}
1659		}
1660
1661		if (mvmsta->next_status_eosp) {
1662			mvmsta->next_status_eosp = false;
1663			ieee80211_sta_eosp(sta);
1664		}
1665	}
1666out:
1667	rcu_read_unlock();
1668}
1669
1670#ifdef CONFIG_IWLWIFI_DEBUG
1671#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1672static const char *iwl_get_agg_tx_status(u16 status)
1673{
1674	switch (status & AGG_TX_STATE_STATUS_MSK) {
1675	AGG_TX_STATE_(TRANSMITTED);
1676	AGG_TX_STATE_(UNDERRUN);
1677	AGG_TX_STATE_(BT_PRIO);
1678	AGG_TX_STATE_(FEW_BYTES);
1679	AGG_TX_STATE_(ABORT);
1680	AGG_TX_STATE_(TX_ON_AIR_DROP);
1681	AGG_TX_STATE_(LAST_SENT_TRY_CNT);
1682	AGG_TX_STATE_(LAST_SENT_BT_KILL);
1683	AGG_TX_STATE_(SCD_QUERY);
1684	AGG_TX_STATE_(TEST_BAD_CRC32);
1685	AGG_TX_STATE_(RESPONSE);
1686	AGG_TX_STATE_(DUMP_TX);
1687	AGG_TX_STATE_(DELAY_TX);
1688	}
1689
1690	return "UNKNOWN";
1691}
1692
1693static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1694				      struct iwl_rx_packet *pkt)
1695{
1696	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1697	struct agg_tx_status *frame_status =
1698		iwl_mvm_get_agg_status(mvm, tx_resp);
1699	int i;
 
1700
1701	for (i = 0; i < tx_resp->frame_count; i++) {
1702		u16 fstatus = le16_to_cpu(frame_status[i].status);
1703
 
 
1704		IWL_DEBUG_TX_REPLY(mvm,
1705				   "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1706				   iwl_get_agg_tx_status(fstatus),
1707				   fstatus & AGG_TX_STATE_STATUS_MSK,
1708				   (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
1709					AGG_TX_STATE_TRY_CNT_POS,
1710				   le16_to_cpu(frame_status[i].sequence));
1711	}
 
 
 
 
 
1712}
1713#else
1714static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1715				      struct iwl_rx_packet *pkt)
1716{}
1717#endif /* CONFIG_IWLWIFI_DEBUG */
1718
1719static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1720				  struct iwl_rx_packet *pkt)
1721{
1722	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1723	int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1724	int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1725	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1726	struct iwl_mvm_sta *mvmsta;
1727	int queue = SEQ_TO_QUEUE(sequence);
1728	struct ieee80211_sta *sta;
1729
1730	if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
1731			 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
1732		return;
1733
1734	iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
1735
1736	rcu_read_lock();
1737
1738	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1739
1740	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1741	if (WARN_ON_ONCE(!sta || !sta->wme)) {
1742		rcu_read_unlock();
1743		return;
1744	}
1745
1746	if (!WARN_ON_ONCE(!mvmsta)) {
1747		mvmsta->tid_data[tid].rate_n_flags =
1748			le32_to_cpu(tx_resp->initial_rate);
1749		mvmsta->tid_data[tid].tx_time =
1750			le16_to_cpu(tx_resp->wireless_media_time);
1751		mvmsta->tid_data[tid].lq_color =
1752			TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1753		iwl_mvm_tx_airtime(mvm, mvmsta,
1754				   le16_to_cpu(tx_resp->wireless_media_time));
1755	}
1756
1757	rcu_read_unlock();
1758}
1759
1760void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1761{
1762	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1763	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1764
1765	if (tx_resp->frame_count == 1)
1766		iwl_mvm_rx_tx_cmd_single(mvm, pkt);
1767	else
1768		iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
1769}
1770
1771static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1772			       int txq, int index,
1773			       struct ieee80211_tx_info *ba_info, u32 rate)
 
1774{
1775	struct sk_buff_head reclaimed_skbs;
1776	struct iwl_mvm_tid_data *tid_data;
1777	struct ieee80211_sta *sta;
1778	struct iwl_mvm_sta *mvmsta;
1779	struct sk_buff *skb;
1780	int freed;
1781
1782	if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
1783		      tid > IWL_MAX_TID_COUNT,
1784		      "sta_id %d tid %d", sta_id, tid))
1785		return;
1786
1787	rcu_read_lock();
1788
1789	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1790
1791	/* Reclaiming frames for a station that has been deleted ? */
1792	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
1793		rcu_read_unlock();
1794		return;
1795	}
1796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1797	mvmsta = iwl_mvm_sta_from_mac80211(sta);
1798	tid_data = &mvmsta->tid_data[tid];
1799
1800	if (tid_data->txq_id != txq) {
1801		IWL_ERR(mvm,
1802			"invalid BA notification: Q %d, tid %d\n",
1803			tid_data->txq_id, tid);
1804		rcu_read_unlock();
1805		return;
1806	}
1807
1808	__skb_queue_head_init(&reclaimed_skbs);
1809
1810	/*
1811	 * Release all TFDs before the SSN, i.e. all TFDs in front of
1812	 * block-ack window (we assume that they've been successfully
1813	 * transmitted ... if not, it's too late anyway).
1814	 */
1815	iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
1816
1817	spin_lock_bh(&mvmsta->lock);
1818
1819	tid_data->next_reclaimed = index;
1820
1821	iwl_mvm_check_ratid_empty(mvm, sta, tid);
1822
1823	freed = 0;
1824
1825	/* pack lq color from tid_data along the reduced txp */
1826	ba_info->status.status_driver_data[0] =
1827		RS_DRV_DATA_PACK(tid_data->lq_color,
1828				 ba_info->status.status_driver_data[0]);
1829	ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
1830
1831	skb_queue_walk(&reclaimed_skbs, skb) {
1832		struct ieee80211_hdr *hdr = (void *)skb->data;
1833		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1834
1835		if (ieee80211_is_data_qos(hdr->frame_control))
1836			freed++;
1837		else
1838			WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
1839
1840		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1841
1842		memset(&info->status, 0, sizeof(info->status));
1843		/* Packet was transmitted successfully, failures come as single
1844		 * frames because before failing a frame the firmware transmits
1845		 * it without aggregation at least once.
1846		 */
1847		info->flags |= IEEE80211_TX_STAT_ACK;
1848
1849		/* this is the first skb we deliver in this batch */
1850		/* put the rate scaling data there */
1851		if (freed == 1) {
1852			info->flags |= IEEE80211_TX_STAT_AMPDU;
1853			memcpy(&info->status, &ba_info->status,
1854			       sizeof(ba_info->status));
1855			iwl_mvm_hwrate_to_tx_status(rate, info);
1856		}
1857	}
1858
1859	spin_unlock_bh(&mvmsta->lock);
1860
1861	/* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1862	 * possible (i.e. first MPDU in the aggregation wasn't acked)
1863	 * Still it's important to update RS about sent vs. acked.
1864	 */
1865	if (skb_queue_empty(&reclaimed_skbs)) {
 
1866		struct ieee80211_chanctx_conf *chanctx_conf = NULL;
1867
 
1868		if (mvmsta->vif)
1869			chanctx_conf =
1870				rcu_dereference(mvmsta->vif->chanctx_conf);
1871
1872		if (WARN_ON_ONCE(!chanctx_conf))
1873			goto out;
1874
1875		ba_info->band = chanctx_conf->def.chan->band;
1876		iwl_mvm_hwrate_to_tx_status(rate, ba_info);
1877
1878		if (!iwl_mvm_has_tlc_offload(mvm)) {
1879			IWL_DEBUG_TX_REPLY(mvm,
1880					   "No reclaim. Update rs directly\n");
1881			iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
1882		}
1883	}
1884
1885out:
1886	rcu_read_unlock();
1887
1888	while (!skb_queue_empty(&reclaimed_skbs)) {
1889		skb = __skb_dequeue(&reclaimed_skbs);
1890		ieee80211_tx_status(mvm->hw, skb);
1891	}
1892}
1893
1894void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1895{
1896	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
1897	int sta_id, tid, txq, index;
1898	struct ieee80211_tx_info ba_info = {};
1899	struct iwl_mvm_ba_notif *ba_notif;
1900	struct iwl_mvm_tid_data *tid_data;
1901	struct iwl_mvm_sta *mvmsta;
1902
1903	ba_info.flags = IEEE80211_TX_STAT_AMPDU;
1904
1905	if (iwl_mvm_has_new_tx_api(mvm)) {
1906		struct iwl_mvm_compressed_ba_notif *ba_res =
1907			(void *)pkt->data;
1908		u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
 
1909		int i;
1910
 
 
 
 
1911		sta_id = ba_res->sta_id;
1912		ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
1913		ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
1914		ba_info.status.tx_time =
1915			(u16)le32_to_cpu(ba_res->wireless_time);
1916		ba_info.status.status_driver_data[0] =
1917			(void *)(uintptr_t)ba_res->reduced_txp;
1918
1919		if (!le16_to_cpu(ba_res->tfd_cnt))
1920			goto out;
 
 
 
 
 
 
 
1921
1922		rcu_read_lock();
1923
1924		mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1925		if (!mvmsta)
1926			goto out_unlock;
 
 
 
 
 
 
1927
1928		/* Free per TID */
1929		for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
1930			struct iwl_mvm_compressed_ba_tfd *ba_tfd =
1931				&ba_res->tfd[i];
1932
1933			tid = ba_tfd->tid;
1934			if (tid == IWL_MGMT_TID)
1935				tid = IWL_MAX_TID_COUNT;
1936
1937			mvmsta->tid_data[i].lq_color = lq_color;
 
 
1938			iwl_mvm_tx_reclaim(mvm, sta_id, tid,
1939					   (int)(le16_to_cpu(ba_tfd->q_num)),
1940					   le16_to_cpu(ba_tfd->tfd_index),
1941					   &ba_info,
1942					   le32_to_cpu(ba_res->tx_rate));
1943		}
1944
1945		iwl_mvm_tx_airtime(mvm, mvmsta,
1946				   le32_to_cpu(ba_res->wireless_time));
1947out_unlock:
1948		rcu_read_unlock();
1949out:
1950		IWL_DEBUG_TX_REPLY(mvm,
1951				   "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
1952				   sta_id, le32_to_cpu(ba_res->flags),
1953				   le16_to_cpu(ba_res->txed),
1954				   le16_to_cpu(ba_res->done));
1955		return;
1956	}
1957
1958	ba_notif = (void *)pkt->data;
1959	sta_id = ba_notif->sta_id;
1960	tid = ba_notif->tid;
1961	/* "flow" corresponds to Tx queue */
1962	txq = le16_to_cpu(ba_notif->scd_flow);
1963	/* "ssn" is start of block-ack Tx window, corresponds to index
1964	 * (in Tx queue's circular buffer) of first TFD/frame in window */
1965	index = le16_to_cpu(ba_notif->scd_ssn);
1966
1967	rcu_read_lock();
1968	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1969	if (WARN_ON_ONCE(!mvmsta)) {
 
 
1970		rcu_read_unlock();
1971		return;
1972	}
1973
1974	tid_data = &mvmsta->tid_data[tid];
1975
1976	ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
1977	ba_info.status.ampdu_len = ba_notif->txed;
1978	ba_info.status.tx_time = tid_data->tx_time;
1979	ba_info.status.status_driver_data[0] =
1980		(void *)(uintptr_t)ba_notif->reduced_txp;
1981
1982	rcu_read_unlock();
1983
1984	iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
1985			   tid_data->rate_n_flags);
1986
1987	IWL_DEBUG_TX_REPLY(mvm,
1988			   "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1989			   ba_notif->sta_addr, ba_notif->sta_id);
1990
1991	IWL_DEBUG_TX_REPLY(mvm,
1992			   "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1993			   ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
1994			   le64_to_cpu(ba_notif->bitmap), txq, index,
1995			   ba_notif->txed, ba_notif->txed_2_done);
1996
1997	IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
1998			   ba_notif->reduced_txp);
1999}
2000
2001/*
2002 * Note that there are transports that buffer frames before they reach
2003 * the firmware. This means that after flush_tx_path is called, the
2004 * queue might not be empty. The race-free way to handle this is to:
2005 * 1) set the station as draining
2006 * 2) flush the Tx path
2007 * 3) wait for the transport queues to be empty
2008 */
2009int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
2010{
2011	int ret;
2012	struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
2013		.queues_ctl = cpu_to_le32(tfd_msk),
2014		.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
2015	};
2016
2017	WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2018
2019	ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
2020				   sizeof(flush_cmd), &flush_cmd);
2021	if (ret)
2022		IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
2023	return ret;
2024}
2025
2026int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
2027			   u16 tids, u32 flags)
2028{
2029	int ret;
 
2030	struct iwl_tx_path_flush_cmd flush_cmd = {
2031		.sta_id = cpu_to_le32(sta_id),
2032		.tid_mask = cpu_to_le16(tids),
2033	};
2034
 
 
 
 
 
 
2035	WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2036
2037	ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
2038				   sizeof(flush_cmd), &flush_cmd);
2039	if (ret)
 
 
 
 
 
 
2040		IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2041	return ret;
2042}
2043
2044int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
2045{
2046	struct iwl_mvm_int_sta *int_sta = sta;
2047	struct iwl_mvm_sta *mvm_sta = sta;
2048
2049	BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) !=
2050		     offsetof(struct iwl_mvm_sta, sta_id));
2051
2052	if (iwl_mvm_has_new_tx_api(mvm))
2053		return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
2054					      0xff | BIT(IWL_MGMT_TID), flags);
2055
2056	if (internal)
2057		return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
2058					     flags);
2059
2060	return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
2061}