Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (C) 2012-2015, 2018-2024 Intel Corporation
   4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
   5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
   6 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7#include <net/mac80211.h>
   8
   9#include "mvm.h"
  10#include "sta.h"
  11#include "rs.h"
  12
  13/*
  14 * New version of ADD_STA_sta command added new fields at the end of the
  15 * structure, so sending the size of the relevant API's structure is enough to
  16 * support both API versions.
  17 */
  18static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
  19{
  20	if (iwl_mvm_has_new_rx_api(mvm) ||
  21	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
  22		return sizeof(struct iwl_mvm_add_sta_cmd);
  23	else
  24		return sizeof(struct iwl_mvm_add_sta_cmd_v7);
  25}
  26
  27int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype)
 
  28{
  29	int sta_id;
  30	u32 reserved_ids = 0;
  31
  32	BUILD_BUG_ON(IWL_STATION_COUNT_MAX > 32);
  33	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
  34
  35	lockdep_assert_held(&mvm->mutex);
  36
  37	/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
  38	if (iftype != NL80211_IFTYPE_STATION)
  39		reserved_ids = BIT(0);
  40
  41	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
  42	for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
  43		if (BIT(sta_id) & reserved_ids)
  44			continue;
  45
  46		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
  47					       lockdep_is_held(&mvm->mutex)))
  48			return sta_id;
  49	}
  50	return IWL_INVALID_STA;
  51}
  52
  53/* Calculate the ampdu density and max size */
  54u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
  55			       struct ieee80211_bss_conf *link_conf,
  56			       u32 *_agg_size)
  57{
  58	u32 agg_size = 0, mpdu_dens = 0;
  59
  60	if (WARN_ON(!link_sta))
  61		return 0;
  62
  63	/* Note that we always use only legacy & highest supported PPDUs, so
  64	 * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
  65	 * the maximum A-MPDU size of various PPDU types in different bands,
  66	 * we only need to worry about the highest supported PPDU type here.
  67	 */
  68
  69	if (link_sta->ht_cap.ht_supported) {
  70		agg_size = link_sta->ht_cap.ampdu_factor;
  71		mpdu_dens = link_sta->ht_cap.ampdu_density;
  72	}
  73
  74	if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
  75		/* overwrite HT values on 6 GHz */
  76		mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
  77					  IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
  78		agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa,
  79					 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
  80	} else if (link_sta->vht_cap.vht_supported) {
  81		/* if VHT supported overwrite HT value */
  82		agg_size = u32_get_bits(link_sta->vht_cap.cap,
  83					IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
  84	}
  85
  86	/* D6.0 10.12.2 A-MPDU length limit rules
  87	 * A STA indicates the maximum length of the A-MPDU preEOF padding
  88	 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
  89	 * Exponent field in its HT Capabilities, VHT Capabilities,
  90	 * and HE 6 GHz Band Capabilities elements (if present) and the
  91	 * Maximum AMPDU Length Exponent Extension field in its HE
  92	 * Capabilities element
  93	 */
  94	if (link_sta->he_cap.has_he)
  95		agg_size +=
  96			u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
  97				    IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
  98
  99	if (link_sta->eht_cap.has_eht)
 100		agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
 101					IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
 102
 103	/* Limit to max A-MPDU supported by FW */
 104	agg_size = min_t(u32, agg_size,
 105			 STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
 106
 107	*_agg_size = agg_size;
 108	return mpdu_dens;
 109}
 110
 111u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta)
 112{
 113	u8 uapsd_acs = 0;
 114
 115	if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
 116		uapsd_acs |= BIT(AC_BK);
 117	if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
 118		uapsd_acs |= BIT(AC_BE);
 119	if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
 120		uapsd_acs |= BIT(AC_VI);
 121	if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
 122		uapsd_acs |= BIT(AC_VO);
 123
 124	return uapsd_acs | uapsd_acs << 4;
 125}
 126
 127/* send station add/update command to firmware */
 128int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 129			   bool update, unsigned int flags)
 130{
 131	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 132	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
 133		.sta_id = mvm_sta->deflink.sta_id,
 134		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
 135		.add_modify = update ? 1 : 0,
 136		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
 137						 STA_FLG_MIMO_EN_MSK |
 138						 STA_FLG_RTS_MIMO_PROT),
 139		.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
 140	};
 141	int ret;
 142	u32 status;
 143	u32 agg_size = 0, mpdu_dens = 0;
 144
 145	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
 146		add_sta_cmd.station_type = mvm_sta->sta_type;
 147
 148	if (!update || (flags & STA_MODIFY_QUEUES)) {
 149		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
 150
 151		if (!iwl_mvm_has_new_tx_api(mvm)) {
 152			add_sta_cmd.tfd_queue_msk =
 153				cpu_to_le32(mvm_sta->tfd_queue_msk);
 154
 155			if (flags & STA_MODIFY_QUEUES)
 156				add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
 157		} else {
 158			WARN_ON(flags & STA_MODIFY_QUEUES);
 159		}
 160	}
 161
 162	switch (sta->deflink.bandwidth) {
 163	case IEEE80211_STA_RX_BW_320:
 164	case IEEE80211_STA_RX_BW_160:
 165		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
 166		fallthrough;
 167	case IEEE80211_STA_RX_BW_80:
 168		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
 169		fallthrough;
 170	case IEEE80211_STA_RX_BW_40:
 171		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
 172		fallthrough;
 173	case IEEE80211_STA_RX_BW_20:
 174		if (sta->deflink.ht_cap.ht_supported)
 175			add_sta_cmd.station_flags |=
 176				cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
 177		break;
 178	}
 179
 180	switch (sta->deflink.rx_nss) {
 181	case 1:
 182		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
 183		break;
 184	case 2:
 185		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
 186		break;
 187	case 3 ... 8:
 188		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
 189		break;
 190	}
 191
 192	switch (sta->deflink.smps_mode) {
 193	case IEEE80211_SMPS_AUTOMATIC:
 194	case IEEE80211_SMPS_NUM_MODES:
 195		WARN_ON(1);
 196		break;
 197	case IEEE80211_SMPS_STATIC:
 198		/* override NSS */
 199		add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
 200		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
 201		break;
 202	case IEEE80211_SMPS_DYNAMIC:
 203		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
 204		break;
 205	case IEEE80211_SMPS_OFF:
 206		/* nothing */
 207		break;
 208	}
 209
 210	if (sta->deflink.ht_cap.ht_supported ||
 211	    mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
 212		add_sta_cmd.station_flags_msk |=
 213			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
 214				    STA_FLG_AGG_MPDU_DENS_MSK);
 215
 216	mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink,
 217					       &mvm_sta->vif->bss_conf,
 218					       &agg_size);
 
 
 
 
 
 
 
 
 
 219	add_sta_cmd.station_flags |=
 220		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
 221	add_sta_cmd.station_flags |=
 222		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 223
 224	if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
 225		add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
 226
 227	if (sta->wme) {
 228		add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
 229		add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta);
 
 
 
 
 
 
 
 
 
 230		add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
 231	}
 232
 233	status = ADD_STA_SUCCESS;
 234	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 235					  iwl_mvm_add_sta_cmd_size(mvm),
 236					  &add_sta_cmd, &status);
 237	if (ret)
 238		return ret;
 239
 240	switch (status & IWL_ADD_STA_STATUS_MASK) {
 241	case ADD_STA_SUCCESS:
 242		IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
 243		break;
 244	default:
 245		ret = -EIO;
 246		IWL_ERR(mvm, "ADD_STA failed\n");
 247		break;
 248	}
 249
 250	return ret;
 251}
 252
 253static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
 254{
 255	struct iwl_mvm_baid_data *data =
 256		from_timer(data, t, session_timer);
 257	struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
 258	struct iwl_mvm_baid_data *ba_data;
 259	struct ieee80211_sta *sta;
 260	struct iwl_mvm_sta *mvm_sta;
 261	unsigned long timeout;
 262	unsigned int sta_id;
 263
 264	rcu_read_lock();
 265
 266	ba_data = rcu_dereference(*rcu_ptr);
 267
 268	if (WARN_ON(!ba_data))
 269		goto unlock;
 270
 271	if (!ba_data->timeout)
 272		goto unlock;
 273
 274	timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
 275	if (time_is_after_jiffies(timeout)) {
 276		mod_timer(&ba_data->session_timer, timeout);
 277		goto unlock;
 278	}
 279
 280	/* Timer expired */
 281	sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */
 282	sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]);
 283
 284	/*
 285	 * sta should be valid unless the following happens:
 286	 * The firmware asserts which triggers a reconfig flow, but
 287	 * the reconfig fails before we set the pointer to sta into
 288	 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
 289	 * A-MDPU and hence the timer continues to run. Then, the
 290	 * timer expires and sta is NULL.
 291	 */
 292	if (IS_ERR_OR_NULL(sta))
 293		goto unlock;
 294
 295	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 296	ieee80211_rx_ba_timer_expired(mvm_sta->vif,
 297				      sta->addr, ba_data->tid);
 298unlock:
 299	rcu_read_unlock();
 300}
 301
 302/* Disable aggregations for a bitmap of TIDs for a given station */
 303static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
 304					unsigned long disable_agg_tids,
 305					bool remove_queue)
 306{
 307	struct iwl_mvm_add_sta_cmd cmd = {};
 308	struct ieee80211_sta *sta;
 309	struct iwl_mvm_sta *mvmsta;
 310	u32 status;
 311	u8 sta_id;
 
 312
 313	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 314		return -EINVAL;
 315
 
 316	sta_id = mvm->queue_info[queue].ra_sta_id;
 
 317
 318	rcu_read_lock();
 319
 320	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 321
 322	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 323		rcu_read_unlock();
 324		return -EINVAL;
 325	}
 326
 327	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 328
 329	mvmsta->tid_disable_agg |= disable_agg_tids;
 330
 331	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
 332	cmd.sta_id = mvmsta->deflink.sta_id;
 333	cmd.add_modify = STA_MODE_MODIFY;
 334	cmd.modify_mask = STA_MODIFY_QUEUES;
 335	if (disable_agg_tids)
 336		cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
 337	if (remove_queue)
 338		cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
 339	cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
 340	cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
 341
 342	rcu_read_unlock();
 343
 344	/* Notify FW of queue removal from the STA queues */
 345	status = ADD_STA_SUCCESS;
 346	return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 347					   iwl_mvm_add_sta_cmd_size(mvm),
 348					   &cmd, &status);
 349}
 350
 351static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 352			       int sta_id, u16 *queueptr, u8 tid)
 353{
 354	int queue = *queueptr;
 355	struct iwl_scd_txq_cfg_cmd cmd = {
 356		.scd_queue = queue,
 357		.action = SCD_CFG_DISABLE_QUEUE,
 358	};
 359	int ret;
 360
 361	lockdep_assert_held(&mvm->mutex);
 362
 363	if (iwl_mvm_has_new_tx_api(mvm)) {
 364		if (mvm->sta_remove_requires_queue_remove) {
 365			u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
 366					     SCD_QUEUE_CONFIG_CMD);
 367			struct iwl_scd_queue_cfg_cmd remove_cmd = {
 368				.operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
 369				.u.remove.sta_mask = cpu_to_le32(BIT(sta_id)),
 370			};
 371
 372			if (tid == IWL_MAX_TID_COUNT)
 373				tid = IWL_MGMT_TID;
 374
 375			remove_cmd.u.remove.tid = cpu_to_le32(tid);
 376
 377			ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
 378						   sizeof(remove_cmd),
 379						   &remove_cmd);
 380		} else {
 381			ret = 0;
 382		}
 383
 384		iwl_trans_txq_free(mvm->trans, queue);
 385		*queueptr = IWL_MVM_INVALID_QUEUE;
 386
 387		return ret;
 388	}
 389
 390	if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
 391		return 0;
 392
 393	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
 394
 395	cmd.action = mvm->queue_info[queue].tid_bitmap ?
 396		SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
 397	if (cmd.action == SCD_CFG_DISABLE_QUEUE)
 398		mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
 399
 400	IWL_DEBUG_TX_QUEUES(mvm,
 401			    "Disabling TXQ #%d tids=0x%x\n",
 402			    queue,
 403			    mvm->queue_info[queue].tid_bitmap);
 404
 405	/* If the queue is still enabled - nothing left to do in this func */
 406	if (cmd.action == SCD_CFG_ENABLE_QUEUE)
 407		return 0;
 408
 409	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 410	cmd.tid = mvm->queue_info[queue].txq_tid;
 411
 412	/* Make sure queue info is correct even though we overwrite it */
 413	WARN(mvm->queue_info[queue].tid_bitmap,
 414	     "TXQ #%d info out-of-sync - tids=0x%x\n",
 415	     queue, mvm->queue_info[queue].tid_bitmap);
 416
 417	/* If we are here - the queue is freed and we can zero out these vals */
 418	mvm->queue_info[queue].tid_bitmap = 0;
 419
 420	if (sta) {
 421		struct iwl_mvm_txq *mvmtxq =
 422			iwl_mvm_txq_from_tid(sta, tid);
 423
 424		spin_lock_bh(&mvm->add_stream_lock);
 425		list_del_init(&mvmtxq->list);
 426		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
 427		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
 428		spin_unlock_bh(&mvm->add_stream_lock);
 429	}
 430
 431	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
 432	mvm->queue_info[queue].reserved = false;
 433
 434	iwl_trans_txq_disable(mvm->trans, queue, false);
 435	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
 436				   sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
 437
 438	if (ret)
 439		IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
 440			queue, ret);
 441	return ret;
 442}
 443
 444static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
 445{
 446	struct ieee80211_sta *sta;
 447	struct iwl_mvm_sta *mvmsta;
 448	unsigned long tid_bitmap;
 449	unsigned long agg_tids = 0;
 450	u8 sta_id;
 451	int tid;
 452
 453	lockdep_assert_held(&mvm->mutex);
 454
 455	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 456		return -EINVAL;
 457
 
 458	sta_id = mvm->queue_info[queue].ra_sta_id;
 459	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 
 460
 461	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 462					lockdep_is_held(&mvm->mutex));
 463
 464	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
 465		return -EINVAL;
 466
 467	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 468
 469	spin_lock_bh(&mvmsta->lock);
 470	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 471		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 472			agg_tids |= BIT(tid);
 473	}
 474	spin_unlock_bh(&mvmsta->lock);
 475
 476	return agg_tids;
 477}
 478
 479/*
 480 * Remove a queue from a station's resources.
 481 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
 482 * doesn't disable the queue
 483 */
 484static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
 485{
 486	struct ieee80211_sta *sta;
 487	struct iwl_mvm_sta *mvmsta;
 488	unsigned long tid_bitmap;
 489	unsigned long disable_agg_tids = 0;
 490	u8 sta_id;
 491	int tid;
 492
 493	lockdep_assert_held(&mvm->mutex);
 494
 495	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 496		return -EINVAL;
 497
 
 498	sta_id = mvm->queue_info[queue].ra_sta_id;
 499	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 
 500
 501	rcu_read_lock();
 502
 503	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 504
 505	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 506		rcu_read_unlock();
 507		return 0;
 508	}
 509
 510	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 511
 512	spin_lock_bh(&mvmsta->lock);
 513	/* Unmap MAC queues and TIDs from this queue */
 514	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 515		struct iwl_mvm_txq *mvmtxq =
 516			iwl_mvm_txq_from_tid(sta, tid);
 517
 518		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 519			disable_agg_tids |= BIT(tid);
 520		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
 521
 522		spin_lock_bh(&mvm->add_stream_lock);
 523		list_del_init(&mvmtxq->list);
 524		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
 525		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
 526		spin_unlock_bh(&mvm->add_stream_lock);
 527	}
 528
 529	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
 530	spin_unlock_bh(&mvmsta->lock);
 531
 532	rcu_read_unlock();
 533
 534	/*
 535	 * The TX path may have been using this TXQ_ID from the tid_data,
 536	 * so make sure it's no longer running so that we can safely reuse
 537	 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
 538	 * above, but nothing guarantees we've stopped using them. Thus,
 539	 * without this, we could get to iwl_mvm_disable_txq() and remove
 540	 * the queue while still sending frames to it.
 541	 */
 542	synchronize_net();
 543
 544	return disable_agg_tids;
 545}
 546
 547static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
 548				       struct ieee80211_sta *old_sta,
 549				       u8 new_sta_id)
 550{
 551	struct iwl_mvm_sta *mvmsta;
 552	u8 sta_id, tid;
 553	unsigned long disable_agg_tids = 0;
 554	bool same_sta;
 555	u16 queue_tmp = queue;
 556	int ret;
 557
 558	lockdep_assert_held(&mvm->mutex);
 559
 560	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 561		return -EINVAL;
 562
 
 
 563	sta_id = mvm->queue_info[queue].ra_sta_id;
 564	tid = mvm->queue_info[queue].txq_tid;
 565
 566	same_sta = sta_id == new_sta_id;
 567
 568	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
 569	if (WARN_ON(!mvmsta))
 570		return -EINVAL;
 571
 572	disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
 573	/* Disable the queue */
 574	if (disable_agg_tids)
 575		iwl_mvm_invalidate_sta_queue(mvm, queue,
 576					     disable_agg_tids, false);
 577
 578	ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid);
 
 
 579	if (ret) {
 
 
 
 
 580		IWL_ERR(mvm,
 581			"Failed to free inactive queue %d (ret=%d)\n",
 582			queue, ret);
 583
 584		return ret;
 585	}
 586
 587	/* If TXQ is allocated to another STA, update removal in FW */
 588	if (!same_sta)
 589		iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
 590
 591	return 0;
 592}
 593
 594static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
 595				    unsigned long tfd_queue_mask, u8 ac)
 596{
 597	int queue = 0;
 598	u8 ac_to_queue[IEEE80211_NUM_ACS];
 599	int i;
 600
 601	/*
 602	 * This protects us against grabbing a queue that's being reconfigured
 603	 * by the inactivity checker.
 604	 */
 605	lockdep_assert_held(&mvm->mutex);
 606
 607	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 608		return -EINVAL;
 609
 610	memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
 611
 612	/* See what ACs the existing queues for this STA have */
 613	for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
 614		/* Only DATA queues can be shared */
 615		if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
 616		    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
 617			continue;
 618
 
 
 
 
 
 619		ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
 620	}
 621
 622	/*
 623	 * The queue to share is chosen only from DATA queues as follows (in
 624	 * descending priority):
 625	 * 1. An AC_BE queue
 626	 * 2. Same AC queue
 627	 * 3. Highest AC queue that is lower than new AC
 628	 * 4. Any existing AC (there always is at least 1 DATA queue)
 629	 */
 630
 631	/* Priority 1: An AC_BE queue */
 632	if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
 633		queue = ac_to_queue[IEEE80211_AC_BE];
 634	/* Priority 2: Same AC queue */
 635	else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
 636		queue = ac_to_queue[ac];
 637	/* Priority 3a: If new AC is VO and VI exists - use VI */
 638	else if (ac == IEEE80211_AC_VO &&
 639		 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 640		queue = ac_to_queue[IEEE80211_AC_VI];
 641	/* Priority 3b: No BE so only AC less than the new one is BK */
 642	else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
 643		queue = ac_to_queue[IEEE80211_AC_BK];
 644	/* Priority 4a: No BE nor BK - use VI if exists */
 645	else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 646		queue = ac_to_queue[IEEE80211_AC_VI];
 647	/* Priority 4b: No BE, BK nor VI - use VO if exists */
 648	else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
 649		queue = ac_to_queue[IEEE80211_AC_VO];
 650
 651	/* Make sure queue found (or not) is legal */
 652	if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
 653	    !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
 654	    (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
 655		IWL_ERR(mvm, "No DATA queues available to share\n");
 656		return -ENOSPC;
 657	}
 658
 659	return queue;
 660}
 661
 662/* Re-configure the SCD for a queue that has already been configured */
 663static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
 664				int sta_id, int tid, int frame_limit, u16 ssn)
 665{
 666	struct iwl_scd_txq_cfg_cmd cmd = {
 667		.scd_queue = queue,
 668		.action = SCD_CFG_ENABLE_QUEUE,
 669		.window = frame_limit,
 670		.sta_id = sta_id,
 671		.ssn = cpu_to_le16(ssn),
 672		.tx_fifo = fifo,
 673		.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
 674			      queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
 675		.tid = tid,
 676	};
 677	int ret;
 678
 679	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 680		return -EINVAL;
 681
 682	if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
 683		 "Trying to reconfig unallocated queue %d\n", queue))
 684		return -ENXIO;
 685
 686	IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
 687
 688	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 689	WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
 690		  queue, fifo, ret);
 691
 692	return ret;
 693}
 694
 695/*
 696 * If a given queue has a higher AC than the TID stream that is being compared
 697 * to, the queue needs to be redirected to the lower AC. This function does that
 698 * in such a case, otherwise - if no redirection required - it does nothing,
 699 * unless the %force param is true.
 700 */
 701static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
 702				  int ac, int ssn, unsigned int wdg_timeout,
 703				  bool force, struct iwl_mvm_txq *txq)
 704{
 705	struct iwl_scd_txq_cfg_cmd cmd = {
 706		.scd_queue = queue,
 707		.action = SCD_CFG_DISABLE_QUEUE,
 708	};
 709	bool shared_queue;
 
 710	int ret;
 711
 712	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 713		return -EINVAL;
 714
 715	/*
 716	 * If the AC is lower than current one - FIFO needs to be redirected to
 717	 * the lowest one of the streams in the queue. Check if this is needed
 718	 * here.
 719	 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
 720	 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
 721	 * we need to check if the numerical value of X is LARGER than of Y.
 722	 */
 
 723	if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
 
 
 724		IWL_DEBUG_TX_QUEUES(mvm,
 725				    "No redirection needed on TXQ #%d\n",
 726				    queue);
 727		return 0;
 728	}
 729
 730	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 731	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
 732	cmd.tid = mvm->queue_info[queue].txq_tid;
 733	shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
 
 
 734
 735	IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
 736			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
 737
 738	/* Stop the queue and wait for it to empty */
 739	set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
 740
 741	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
 742	if (ret) {
 743		IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
 744			queue);
 745		ret = -EIO;
 746		goto out;
 747	}
 748
 749	/* Before redirecting the queue we need to de-activate it */
 750	iwl_trans_txq_disable(mvm->trans, queue, false);
 751	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 752	if (ret)
 753		IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
 754			ret);
 755
 756	/* Make sure the SCD wrptr is correctly set before reconfiguring */
 757	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
 758
 759	/* Update the TID "owner" of the queue */
 
 760	mvm->queue_info[queue].txq_tid = tid;
 
 761
 762	/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
 763
 764	/* Redirect to lower AC */
 765	iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
 766			     cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
 767
 768	/* Update AC marking of the queue */
 
 769	mvm->queue_info[queue].mac80211_ac = ac;
 
 770
 771	/*
 772	 * Mark queue as shared in transport if shared
 773	 * Note this has to be done after queue enablement because enablement
 774	 * can also set this value, and there is no indication there to shared
 775	 * queues
 776	 */
 777	if (shared_queue)
 778		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
 779
 780out:
 781	/* Continue using the queue */
 782	clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
 783
 784	return ret;
 785}
 786
 787static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
 788				   u8 minq, u8 maxq)
 789{
 790	int i;
 791
 792	lockdep_assert_held(&mvm->mutex);
 793
 794	if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
 795		 "max queue %d >= num_of_queues (%d)", maxq,
 796		 mvm->trans->trans_cfg->base_params->num_of_queues))
 797		maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
 798
 799	/* This should not be hit with new TX path */
 800	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 801		return -ENOSPC;
 802
 803	/* Start by looking for a free queue */
 804	for (i = minq; i <= maxq; i++)
 805		if (mvm->queue_info[i].tid_bitmap == 0 &&
 806		    mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
 807			return i;
 808
 809	return -ENOSPC;
 810}
 811
 812static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta)
 813{
 814	int max_size = IWL_DEFAULT_QUEUE_SIZE;
 815	unsigned int link_id;
 816
 817	/* this queue isn't used for traffic (cab_queue) */
 818	if (!sta)
 819		return IWL_MGMT_QUEUE_SIZE;
 820
 821	rcu_read_lock();
 822
 823	for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
 824		struct ieee80211_link_sta *link =
 825			rcu_dereference(sta->link[link_id]);
 826
 827		if (!link)
 828			continue;
 829
 830		/* support for 512 ba size */
 831		if (link->eht_cap.has_eht &&
 832		    max_size < IWL_DEFAULT_QUEUE_SIZE_EHT)
 833			max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
 834
 835		/* support for 256 ba size */
 836		if (link->he_cap.has_he &&
 837		    max_size < IWL_DEFAULT_QUEUE_SIZE_HE)
 838			max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
 839	}
 840
 841	rcu_read_unlock();
 842	return max_size;
 843}
 844
 845int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
 846			    struct ieee80211_sta *sta,
 847			    u8 sta_id, u8 tid, unsigned int timeout)
 848{
 849	int queue, size;
 850	u32 sta_mask = 0;
 851
 852	if (tid == IWL_MAX_TID_COUNT) {
 853		tid = IWL_MGMT_TID;
 854		size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
 855			     mvm->trans->cfg->min_txq_size);
 856	} else {
 857		size = iwl_mvm_get_queue_size(sta);
 858	}
 859
 860	if (sta) {
 861		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 862		struct ieee80211_link_sta *link_sta;
 863		unsigned int link_id;
 864
 865		rcu_read_lock();
 866		for_each_sta_active_link(mvmsta->vif, sta, link_sta, link_id) {
 867			struct iwl_mvm_link_sta *link =
 868				rcu_dereference_protected(mvmsta->link[link_id],
 869							  lockdep_is_held(&mvm->mutex));
 870
 871			if (!link)
 872				continue;
 873
 874			sta_mask |= BIT(link->sta_id);
 875		}
 876		rcu_read_unlock();
 877	} else {
 878		sta_mask |= BIT(sta_id);
 879	}
 880
 881	if (!sta_mask)
 882		return -EINVAL;
 883
 884	queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
 885				    tid, size, timeout);
 886
 887	if (queue >= 0)
 888		IWL_DEBUG_TX_QUEUES(mvm,
 889				    "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
 890				    queue, sta_mask, tid);
 891
 892	return queue;
 893}
 894
 895static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
 896					struct ieee80211_sta *sta, u8 ac,
 897					int tid)
 898{
 899	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 900	struct iwl_mvm_txq *mvmtxq =
 901		iwl_mvm_txq_from_tid(sta, tid);
 902	unsigned int wdg_timeout =
 903		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
 
 904	int queue = -1;
 905
 906	lockdep_assert_held(&mvm->mutex);
 907
 908	IWL_DEBUG_TX_QUEUES(mvm,
 909			    "Allocating queue for sta %d on tid %d\n",
 910			    mvmsta->deflink.sta_id, tid);
 911	queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id,
 912					tid, wdg_timeout);
 913	if (queue < 0)
 914		return queue;
 915
 916	mvmtxq->txq_id = queue;
 917	mvm->tvqm_info[queue].txq_tid = tid;
 918	mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id;
 919
 920	IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
 921
 922	spin_lock_bh(&mvmsta->lock);
 923	mvmsta->tid_data[tid].txq_id = queue;
 
 924	spin_unlock_bh(&mvmsta->lock);
 925
 926	return 0;
 927}
 928
 929static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
 930				       struct ieee80211_sta *sta,
 931				       int queue, u8 sta_id, u8 tid)
 932{
 933	bool enable_queue = true;
 934
 935	/* Make sure this TID isn't already enabled */
 936	if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
 937		IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
 938			queue, tid);
 939		return false;
 940	}
 941
 942	/* Update mappings and refcounts */
 943	if (mvm->queue_info[queue].tid_bitmap)
 944		enable_queue = false;
 945
 946	mvm->queue_info[queue].tid_bitmap |= BIT(tid);
 947	mvm->queue_info[queue].ra_sta_id = sta_id;
 948
 949	if (enable_queue) {
 950		if (tid != IWL_MAX_TID_COUNT)
 951			mvm->queue_info[queue].mac80211_ac =
 952				tid_to_mac80211_ac[tid];
 953		else
 954			mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
 955
 956		mvm->queue_info[queue].txq_tid = tid;
 957	}
 958
 959	if (sta) {
 960		struct iwl_mvm_txq *mvmtxq =
 961			iwl_mvm_txq_from_tid(sta, tid);
 962
 963		mvmtxq->txq_id = queue;
 964	}
 965
 966	IWL_DEBUG_TX_QUEUES(mvm,
 967			    "Enabling TXQ #%d tids=0x%x\n",
 968			    queue, mvm->queue_info[queue].tid_bitmap);
 969
 970	return enable_queue;
 971}
 972
 973static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 974			       int queue, u16 ssn,
 975			       const struct iwl_trans_txq_scd_cfg *cfg,
 976			       unsigned int wdg_timeout)
 977{
 978	struct iwl_scd_txq_cfg_cmd cmd = {
 979		.scd_queue = queue,
 980		.action = SCD_CFG_ENABLE_QUEUE,
 981		.window = cfg->frame_limit,
 982		.sta_id = cfg->sta_id,
 983		.ssn = cpu_to_le16(ssn),
 984		.tx_fifo = cfg->fifo,
 985		.aggregate = cfg->aggregate,
 986		.tid = cfg->tid,
 987	};
 988	bool inc_ssn;
 989
 990	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 991		return false;
 992
 993	/* Send the enabling command if we need to */
 994	if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
 995		return false;
 996
 997	inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
 998					   NULL, wdg_timeout);
 999	if (inc_ssn)
1000		le16_add_cpu(&cmd.ssn, 1);
1001
1002	WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
1003	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
1004
1005	return inc_ssn;
1006}
1007
1008static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
1009{
1010	struct iwl_scd_txq_cfg_cmd cmd = {
1011		.scd_queue = queue,
1012		.action = SCD_CFG_UPDATE_QUEUE_TID,
1013	};
1014	int tid;
1015	unsigned long tid_bitmap;
1016	int ret;
1017
1018	lockdep_assert_held(&mvm->mutex);
1019
1020	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1021		return;
1022
1023	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1024
1025	if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
1026		return;
1027
1028	/* Find any TID for queue */
1029	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1030	cmd.tid = tid;
1031	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1032
1033	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
1034	if (ret) {
1035		IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
1036			queue, ret);
1037		return;
1038	}
1039
1040	mvm->queue_info[queue].txq_tid = tid;
1041	IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
1042			    queue, tid);
1043}
1044
1045static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
1046{
1047	struct ieee80211_sta *sta;
1048	struct iwl_mvm_sta *mvmsta;
1049	u8 sta_id;
1050	int tid = -1;
1051	unsigned long tid_bitmap;
1052	unsigned int wdg_timeout;
1053	int ssn;
1054	int ret = true;
1055
1056	/* queue sharing is disabled on new TX path */
1057	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1058		return;
1059
1060	lockdep_assert_held(&mvm->mutex);
1061
1062	sta_id = mvm->queue_info[queue].ra_sta_id;
1063	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1064
1065	/* Find TID for queue, and make sure it is the only one on the queue */
1066	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1067	if (tid_bitmap != BIT(tid)) {
1068		IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1069			queue, tid_bitmap);
1070		return;
1071	}
1072
1073	IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1074			    tid);
1075
1076	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1077					lockdep_is_held(&mvm->mutex));
1078
1079	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1080		return;
1081
1082	mvmsta = iwl_mvm_sta_from_mac80211(sta);
1083	wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
1084
1085	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1086
1087	ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1088				     tid_to_mac80211_ac[tid], ssn,
1089				     wdg_timeout, true,
1090				     iwl_mvm_txq_from_tid(sta, tid));
1091	if (ret) {
1092		IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1093		return;
1094	}
1095
1096	/* If aggs should be turned back on - do it */
1097	if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1098		struct iwl_mvm_add_sta_cmd cmd = {0};
1099
1100		mvmsta->tid_disable_agg &= ~BIT(tid);
1101
1102		cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1103		cmd.sta_id = mvmsta->deflink.sta_id;
1104		cmd.add_modify = STA_MODE_MODIFY;
1105		cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1106		cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1107		cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1108
1109		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1110					   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1111		if (!ret) {
1112			IWL_DEBUG_TX_QUEUES(mvm,
1113					    "TXQ #%d is now aggregated again\n",
1114					    queue);
1115
1116			/* Mark queue intenally as aggregating again */
1117			iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1118		}
1119	}
1120
1121	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1122}
1123
1124/*
1125 * Remove inactive TIDs of a given queue.
1126 * If all queue TIDs are inactive - mark the queue as inactive
1127 * If only some the queue TIDs are inactive - unmap them from the queue
1128 *
1129 * Returns %true if all TIDs were removed and the queue could be reused.
1130 */
1131static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1132					 struct iwl_mvm_sta *mvmsta, int queue,
1133					 unsigned long tid_bitmap,
1134					 unsigned long *unshare_queues,
1135					 unsigned long *changetid_queues)
1136{
1137	unsigned int tid;
1138
1139	lockdep_assert_held(&mvmsta->lock);
1140	lockdep_assert_held(&mvm->mutex);
1141
1142	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1143		return false;
1144
1145	/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1146	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1147		/* If some TFDs are still queued - don't mark TID as inactive */
1148		if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1149			tid_bitmap &= ~BIT(tid);
1150
1151		/* Don't mark as inactive any TID that has an active BA */
1152		if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1153			tid_bitmap &= ~BIT(tid);
1154	}
1155
1156	/* If all TIDs in the queue are inactive - return it can be reused */
1157	if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1158		IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1159		return true;
1160	}
1161
1162	/*
1163	 * If we are here, this is a shared queue and not all TIDs timed-out.
1164	 * Remove the ones that did.
1165	 */
1166	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1167		u16 q_tid_bitmap;
1168
1169		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1170		mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1171
1172		q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1173
1174		/*
1175		 * We need to take into account a situation in which a TXQ was
1176		 * allocated to TID x, and then turned shared by adding TIDs y
1177		 * and z. If TID x becomes inactive and is removed from the TXQ,
1178		 * ownership must be given to one of the remaining TIDs.
1179		 * This is mainly because if TID x continues - a new queue can't
1180		 * be allocated for it as long as it is an owner of another TXQ.
1181		 *
1182		 * Mark this queue in the right bitmap, we'll send the command
1183		 * to the firmware later.
1184		 */
1185		if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1186			set_bit(queue, changetid_queues);
1187
1188		IWL_DEBUG_TX_QUEUES(mvm,
1189				    "Removing inactive TID %d from shared Q:%d\n",
1190				    tid, queue);
1191	}
1192
1193	IWL_DEBUG_TX_QUEUES(mvm,
1194			    "TXQ #%d left with tid bitmap 0x%x\n", queue,
1195			    mvm->queue_info[queue].tid_bitmap);
1196
1197	/*
1198	 * There may be different TIDs with the same mac queues, so make
1199	 * sure all TIDs have existing corresponding mac queues enabled
1200	 */
1201	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1202
1203	/* If the queue is marked as shared - "unshare" it */
1204	if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1205	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1206		IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1207				    queue);
1208		set_bit(queue, unshare_queues);
1209	}
1210
1211	return false;
1212}
1213
1214/*
1215 * Check for inactivity - this includes checking if any queue
1216 * can be unshared and finding one (and only one) that can be
1217 * reused.
1218 * This function is also invoked as a sort of clean-up task,
1219 * in which case @alloc_for_sta is IWL_INVALID_STA.
1220 *
1221 * Returns the queue number, or -ENOSPC.
1222 */
1223static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1224{
1225	unsigned long now = jiffies;
1226	unsigned long unshare_queues = 0;
1227	unsigned long changetid_queues = 0;
1228	int i, ret, free_queue = -ENOSPC;
1229	struct ieee80211_sta *queue_owner  = NULL;
1230
1231	lockdep_assert_held(&mvm->mutex);
1232
1233	if (iwl_mvm_has_new_tx_api(mvm))
1234		return -ENOSPC;
1235
1236	rcu_read_lock();
1237
1238	/* we skip the CMD queue below by starting at 1 */
1239	BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1240
1241	for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1242		struct ieee80211_sta *sta;
1243		struct iwl_mvm_sta *mvmsta;
1244		u8 sta_id;
1245		int tid;
1246		unsigned long inactive_tid_bitmap = 0;
1247		unsigned long queue_tid_bitmap;
1248
1249		queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1250		if (!queue_tid_bitmap)
1251			continue;
1252
1253		/* If TXQ isn't in active use anyway - nothing to do here... */
1254		if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1255		    mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1256			continue;
1257
1258		/* Check to see if there are inactive TIDs on this queue */
1259		for_each_set_bit(tid, &queue_tid_bitmap,
1260				 IWL_MAX_TID_COUNT + 1) {
1261			if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1262				       IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1263				continue;
1264
1265			inactive_tid_bitmap |= BIT(tid);
1266		}
1267
1268		/* If all TIDs are active - finish check on this queue */
1269		if (!inactive_tid_bitmap)
1270			continue;
1271
1272		/*
1273		 * If we are here - the queue hadn't been served recently and is
1274		 * in use
1275		 */
1276
1277		sta_id = mvm->queue_info[i].ra_sta_id;
1278		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1279
1280		/*
1281		 * If the STA doesn't exist anymore, it isn't an error. It could
1282		 * be that it was removed since getting the queues, and in this
1283		 * case it should've inactivated its queues anyway.
1284		 */
1285		if (IS_ERR_OR_NULL(sta))
1286			continue;
1287
1288		mvmsta = iwl_mvm_sta_from_mac80211(sta);
1289
1290		spin_lock_bh(&mvmsta->lock);
1291		ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1292						   inactive_tid_bitmap,
1293						   &unshare_queues,
1294						   &changetid_queues);
1295		if (ret && free_queue < 0) {
1296			queue_owner = sta;
1297			free_queue = i;
1298		}
1299		/* only unlock sta lock - we still need the queue info lock */
1300		spin_unlock_bh(&mvmsta->lock);
1301	}
1302
1303
1304	/* Reconfigure queues requiring reconfiguation */
1305	for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1306		iwl_mvm_unshare_queue(mvm, i);
1307	for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1308		iwl_mvm_change_queue_tid(mvm, i);
1309
1310	rcu_read_unlock();
1311
1312	if (free_queue >= 0 && alloc_for_sta != IWL_INVALID_STA) {
1313		ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1314						  alloc_for_sta);
1315		if (ret)
1316			return ret;
1317	}
1318
1319	return free_queue;
1320}
1321
1322static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1323				   struct ieee80211_sta *sta, u8 ac, int tid)
 
1324{
1325	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1326	struct iwl_trans_txq_scd_cfg cfg = {
1327		.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1328		.sta_id = mvmsta->deflink.sta_id,
1329		.tid = tid,
1330		.frame_limit = IWL_FRAME_LIMIT,
1331	};
1332	unsigned int wdg_timeout =
1333		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
 
1334	int queue = -1;
1335	u16 queue_tmp;
1336	unsigned long disable_agg_tids = 0;
1337	enum iwl_mvm_agg_state queue_state;
1338	bool shared_queue = false, inc_ssn;
1339	int ssn;
1340	unsigned long tfd_queue_mask;
1341	int ret;
1342
1343	lockdep_assert_held(&mvm->mutex);
1344
1345	if (iwl_mvm_has_new_tx_api(mvm))
1346		return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1347
1348	spin_lock_bh(&mvmsta->lock);
1349	tfd_queue_mask = mvmsta->tfd_queue_msk;
1350	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1351	spin_unlock_bh(&mvmsta->lock);
1352
1353	if (tid == IWL_MAX_TID_COUNT) {
1354		queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
 
 
 
 
 
 
 
1355						IWL_MVM_DQA_MIN_MGMT_QUEUE,
1356						IWL_MVM_DQA_MAX_MGMT_QUEUE);
1357		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1358			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1359					    queue);
1360
1361		/* If no such queue is found, we'll use a DATA queue instead */
1362	}
1363
1364	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1365	    (mvm->queue_info[mvmsta->reserved_queue].status ==
1366			IWL_MVM_QUEUE_RESERVED)) {
 
 
1367		queue = mvmsta->reserved_queue;
1368		mvm->queue_info[queue].reserved = true;
1369		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1370	}
1371
1372	if (queue < 0)
1373		queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1374						IWL_MVM_DQA_MIN_DATA_QUEUE,
1375						IWL_MVM_DQA_MAX_DATA_QUEUE);
1376	if (queue < 0) {
1377		/* try harder - perhaps kill an inactive queue */
1378		queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
 
 
 
 
 
 
 
 
 
 
 
 
1379	}
1380
1381	/* No free queue - we'll have to share */
1382	if (queue <= 0) {
1383		queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1384		if (queue > 0) {
1385			shared_queue = true;
1386			mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1387		}
1388	}
1389
1390	/*
1391	 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1392	 * to make sure no one else takes it.
1393	 * This will allow avoiding re-acquiring the lock at the end of the
1394	 * configuration. On error we'll mark it back as free.
1395	 */
1396	if (queue > 0 && !shared_queue)
1397		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1398
 
 
1399	/* This shouldn't happen - out of queues */
1400	if (WARN_ON(queue <= 0)) {
1401		IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1402			tid, cfg.sta_id);
1403		return queue;
1404	}
1405
1406	/*
1407	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1408	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1409	 * as aggregatable.
1410	 * Mark all DATA queues as allowing to be aggregated at some point
1411	 */
1412	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1413			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1414
 
 
 
 
 
 
 
 
 
 
1415	IWL_DEBUG_TX_QUEUES(mvm,
1416			    "Allocating %squeue #%d to sta %d on tid %d\n",
1417			    shared_queue ? "shared " : "", queue,
1418			    mvmsta->deflink.sta_id, tid);
1419
1420	if (shared_queue) {
1421		/* Disable any open aggs on this queue */
1422		disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1423
1424		if (disable_agg_tids) {
1425			IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1426					    queue);
1427			iwl_mvm_invalidate_sta_queue(mvm, queue,
1428						     disable_agg_tids, false);
1429		}
1430	}
1431
1432	inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
 
 
 
 
 
 
1433
1434	/*
1435	 * Mark queue as shared in transport if shared
1436	 * Note this has to be done after queue enablement because enablement
1437	 * can also set this value, and there is no indication there to shared
1438	 * queues
1439	 */
1440	if (shared_queue)
1441		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1442
1443	spin_lock_bh(&mvmsta->lock);
1444	/*
1445	 * This looks racy, but it is not. We have only one packet for
1446	 * this ra/tid in our Tx path since we stop the Qdisc when we
1447	 * need to allocate a new TFD queue.
1448	 */
1449	if (inc_ssn) {
1450		mvmsta->tid_data[tid].seq_number += 0x10;
1451		ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1452	}
1453	mvmsta->tid_data[tid].txq_id = queue;
 
1454	mvmsta->tfd_queue_msk |= BIT(queue);
1455	queue_state = mvmsta->tid_data[tid].state;
1456
1457	if (mvmsta->reserved_queue == queue)
1458		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1459	spin_unlock_bh(&mvmsta->lock);
1460
1461	if (!shared_queue) {
1462		ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1463		if (ret)
1464			goto out_err;
1465
1466		/* If we need to re-enable aggregations... */
1467		if (queue_state == IWL_AGG_ON) {
1468			ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1469			if (ret)
1470				goto out_err;
1471		}
1472	} else {
1473		/* Redirect queue, if needed */
1474		ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1475					     wdg_timeout, false,
1476					     iwl_mvm_txq_from_tid(sta, tid));
1477		if (ret)
1478			goto out_err;
1479	}
1480
1481	return 0;
1482
1483out_err:
1484	queue_tmp = queue;
1485	iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid);
1486
1487	return ret;
1488}
1489
1490int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
1491			     struct ieee80211_txq *txq)
1492{
1493	struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
1494	int ret = -EINVAL;
 
 
 
 
 
1495
1496	lockdep_assert_held(&mvm->mutex);
1497
1498	if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
1499	    !txq->sta) {
1500		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1501	}
1502
1503	if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
1504		set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
1505		ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1506	}
1507
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1508	local_bh_disable();
1509	spin_lock(&mvm->add_stream_lock);
1510	if (!list_empty(&mvmtxq->list))
1511		list_del_init(&mvmtxq->list);
1512	spin_unlock(&mvm->add_stream_lock);
 
 
 
 
1513	local_bh_enable();
1514
1515	return ret;
 
1516}
1517
1518void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1519{
1520	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1521					   add_stream_wk);
 
 
 
 
 
 
 
1522
1523	mutex_lock(&mvm->mutex);
1524
1525	iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
 
 
1526
1527	while (!list_empty(&mvm->add_stream_txqs)) {
1528		struct iwl_mvm_txq *mvmtxq;
1529		struct ieee80211_txq *txq;
1530		u8 tid;
1531
1532		mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1533					  struct iwl_mvm_txq, list);
1534
1535		txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1536				   drv_priv);
1537		tid = txq->tid;
1538		if (tid == IEEE80211_NUM_TIDS)
1539			tid = IWL_MAX_TID_COUNT;
1540
1541		/*
1542		 * We can't really do much here, but if this fails we can't
1543		 * transmit anyway - so just don't transmit the frame etc.
1544		 * and let them back up ... we've tried our best to allocate
1545		 * a queue in the function itself.
 
 
1546		 */
1547		if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1548			spin_lock_bh(&mvm->add_stream_lock);
1549			list_del_init(&mvmtxq->list);
1550			spin_unlock_bh(&mvm->add_stream_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1551			continue;
1552		}
1553
1554		/* now we're ready, any remaining races/concurrency will be
1555		 * handled in iwl_mvm_mac_itxq_xmit()
1556		 */
1557		set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
1558
1559		local_bh_disable();
1560		spin_lock(&mvm->add_stream_lock);
1561		list_del_init(&mvmtxq->list);
1562		spin_unlock(&mvm->add_stream_lock);
1563
1564		iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1565		local_bh_enable();
 
1566	}
1567
1568	mutex_unlock(&mvm->mutex);
1569}
1570
1571static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1572				      struct ieee80211_sta *sta,
1573				      enum nl80211_iftype vif_type)
1574{
1575	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1576	int queue;
 
1577
1578	/* queue reserving is disabled on new TX path */
1579	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1580		return 0;
1581
1582	/* run the general cleanup/unsharing of queues */
1583	iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
 
 
 
 
 
1584
1585	/* Make sure we have free resources for this STA */
1586	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1587	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1588	    (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1589	     IWL_MVM_QUEUE_FREE))
1590		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1591	else
1592		queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1593						IWL_MVM_DQA_MIN_DATA_QUEUE,
1594						IWL_MVM_DQA_MAX_DATA_QUEUE);
1595	if (queue < 0) {
1596		/* try again - this time kick out a queue if needed */
1597		queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
1598		if (queue < 0) {
1599			IWL_ERR(mvm, "No available queues for new station\n");
1600			return -ENOSPC;
1601		}
 
 
 
 
 
1602	}
1603	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1604
 
 
1605	mvmsta->reserved_queue = queue;
1606
 
 
 
1607	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1608			    queue, mvmsta->deflink.sta_id);
1609
1610	return 0;
1611}
1612
1613/*
1614 * In DQA mode, after a HW restart the queues should be allocated as before, in
1615 * order to avoid race conditions when there are shared queues. This function
1616 * does the re-mapping and queue allocation.
1617 *
1618 * Note that re-enabling aggregations isn't done in this function.
1619 */
1620void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1621					  struct ieee80211_sta *sta)
1622{
1623	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1624	unsigned int wdg =
1625		iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif);
1626	int i;
1627	struct iwl_trans_txq_scd_cfg cfg = {
1628		.sta_id = mvm_sta->deflink.sta_id,
1629		.frame_limit = IWL_FRAME_LIMIT,
1630	};
1631
1632	/* Make sure reserved queue is still marked as such (if allocated) */
1633	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1634		mvm->queue_info[mvm_sta->reserved_queue].status =
1635			IWL_MVM_QUEUE_RESERVED;
1636
1637	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1638		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1639		int txq_id = tid_data->txq_id;
1640		int ac;
 
1641
1642		if (txq_id == IWL_MVM_INVALID_QUEUE)
1643			continue;
1644
 
 
1645		ac = tid_to_mac80211_ac[i];
 
1646
1647		if (iwl_mvm_has_new_tx_api(mvm)) {
1648			IWL_DEBUG_TX_QUEUES(mvm,
1649					    "Re-mapping sta %d tid %d\n",
1650					    mvm_sta->deflink.sta_id, i);
1651			txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta,
1652							 mvm_sta->deflink.sta_id,
1653							 i, wdg);
1654			/*
1655			 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1656			 * to try again later, we have no other good way of
1657			 * failing here
1658			 */
1659			if (txq_id < 0)
1660				txq_id = IWL_MVM_INVALID_QUEUE;
1661			tid_data->txq_id = txq_id;
1662
1663			/*
1664			 * Since we don't set the seq number after reset, and HW
1665			 * sets it now, FW reset will cause the seq num to start
1666			 * at 0 again, so driver will need to update it
1667			 * internally as well, so it keeps in sync with real val
1668			 */
1669			tid_data->seq_number = 0;
1670		} else {
1671			u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1672
1673			cfg.tid = i;
1674			cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1675			cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1676					 txq_id ==
1677					 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1678
1679			IWL_DEBUG_TX_QUEUES(mvm,
1680					    "Re-mapping sta %d tid %d to queue %d\n",
1681					    mvm_sta->deflink.sta_id, i,
1682					    txq_id);
1683
1684			iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
 
1685			mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1686		}
1687	}
1688}
1689
1690static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1691				      struct iwl_mvm_int_sta *sta,
1692				      const u8 *addr,
1693				      u16 mac_id, u16 color)
1694{
1695	struct iwl_mvm_add_sta_cmd cmd;
1696	int ret;
1697	u32 status = ADD_STA_SUCCESS;
1698
1699	lockdep_assert_held(&mvm->mutex);
1700
1701	memset(&cmd, 0, sizeof(cmd));
1702	cmd.sta_id = sta->sta_id;
1703
1704	if (iwl_mvm_has_new_station_api(mvm->fw) &&
1705	    sta->type == IWL_STA_AUX_ACTIVITY)
1706		cmd.mac_id_n_color = cpu_to_le32(mac_id);
1707	else
1708		cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1709								     color));
1710
1711	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1712		cmd.station_type = sta->type;
1713
1714	if (!iwl_mvm_has_new_tx_api(mvm))
1715		cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1716	cmd.tid_disable_tx = cpu_to_le16(0xffff);
1717
1718	if (addr)
1719		memcpy(cmd.addr, addr, ETH_ALEN);
1720
1721	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1722					  iwl_mvm_add_sta_cmd_size(mvm),
1723					  &cmd, &status);
1724	if (ret)
1725		return ret;
1726
1727	switch (status & IWL_ADD_STA_STATUS_MASK) {
1728	case ADD_STA_SUCCESS:
1729		IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1730		return 0;
1731	default:
1732		ret = -EIO;
1733		IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1734			status);
1735		break;
1736	}
1737	return ret;
1738}
1739
1740/* Initialize driver data of a new sta */
1741int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1742		     struct ieee80211_sta *sta, int sta_id, u8 sta_type)
1743{
1744	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1745	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1746	struct iwl_mvm_rxq_dup_data *dup_data;
1747	int i, ret = 0;
 
 
1748
1749	lockdep_assert_held(&mvm->mutex);
1750
1751	mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1752						      mvmvif->color);
1753	mvm_sta->vif = vif;
 
 
1754
1755	/* for MLD sta_id(s) should be allocated for each link before calling
1756	 * this function
1757	 */
1758	if (!mvm->mld_api_is_used) {
1759		if (WARN_ON(sta_id == IWL_INVALID_STA))
1760			return -EINVAL;
1761
1762		mvm_sta->deflink.sta_id = sta_id;
1763		rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink);
1764
1765		if (!mvm->trans->trans_cfg->gen2)
1766			mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
1767				LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1768		else
1769			mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
1770				LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1771	}
1772
 
 
 
 
 
 
 
 
 
1773	mvm_sta->tt_tx_protection = false;
1774	mvm_sta->sta_type = sta_type;
1775
 
1776	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
 
1777
 
1778	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
 
 
 
 
1779		/*
1780		 * Mark all queues for this STA as unallocated and defer TX
1781		 * frames until the queue is allocated
1782		 */
1783		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
 
1784	}
 
 
1785
1786	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1787		struct iwl_mvm_txq *mvmtxq =
1788			iwl_mvm_txq_from_mac80211(sta->txq[i]);
1789
1790		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1791		INIT_LIST_HEAD(&mvmtxq->list);
1792		atomic_set(&mvmtxq->tx_request, 0);
1793	}
1794
1795	if (iwl_mvm_has_new_rx_api(mvm)) {
1796		int q;
1797
1798		dup_data = kcalloc(mvm->trans->num_rx_queues,
1799				   sizeof(*dup_data), GFP_KERNEL);
1800		if (!dup_data)
1801			return -ENOMEM;
1802		/*
1803		 * Initialize all the last_seq values to 0xffff which can never
1804		 * compare equal to the frame's seq_ctrl in the check in
1805		 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1806		 * number and fragmented packets don't reach that function.
1807		 *
1808		 * This thus allows receiving a packet with seqno 0 and the
1809		 * retry bit set as the very first packet on a new TID.
1810		 */
1811		for (q = 0; q < mvm->trans->num_rx_queues; q++)
1812			memset(dup_data[q].last_seq, 0xff,
1813			       sizeof(dup_data[q].last_seq));
1814		mvm_sta->dup_data = dup_data;
1815	}
1816
1817	if (!iwl_mvm_has_new_tx_api(mvm)) {
1818		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1819						 ieee80211_vif_type_p2p(vif));
1820		if (ret)
1821			return ret;
1822	}
1823
1824	/*
1825	 * if rs is registered with mac80211, then "add station" will be handled
1826	 * via the corresponding ops, otherwise need to notify rate scaling here
1827	 */
1828	if (iwl_mvm_has_tlc_offload(mvm))
1829		iwl_mvm_rs_add_sta(mvm, mvm_sta);
1830	else
1831		spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock);
1832
1833	iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1834
1835	/* MPDUs are counted only when EMLSR is possible */
1836	if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
1837	    !sta->tdls && ieee80211_vif_is_mld(vif)) {
1838		mvm_sta->mpdu_counters =
1839			kcalloc(mvm->trans->num_rx_queues,
1840				sizeof(*mvm_sta->mpdu_counters),
1841				GFP_KERNEL);
1842		if (mvm_sta->mpdu_counters)
1843			for (int q = 0; q < mvm->trans->num_rx_queues; q++)
1844				spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
1845	}
1846
1847	return 0;
1848}
1849
1850int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1851		    struct ieee80211_vif *vif,
1852		    struct ieee80211_sta *sta)
1853{
1854	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1855	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1856	int ret, sta_id;
1857	bool sta_update = false;
1858	unsigned int sta_flags = 0;
1859
1860	lockdep_assert_held(&mvm->mutex);
1861
1862	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1863		sta_id = iwl_mvm_find_free_sta_id(mvm,
1864						  ieee80211_vif_type_p2p(vif));
1865	else
1866		sta_id = mvm_sta->deflink.sta_id;
1867
1868	if (sta_id == IWL_INVALID_STA)
1869		return -ENOSPC;
1870
1871	spin_lock_init(&mvm_sta->lock);
1872
1873	/* if this is a HW restart re-alloc existing queues */
1874	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1875		struct iwl_mvm_int_sta tmp_sta = {
1876			.sta_id = sta_id,
1877			.type = mvm_sta->sta_type,
1878		};
1879
1880		/* First add an empty station since allocating
1881		 * a queue requires a valid station
1882		 */
1883		ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1884						 mvmvif->id, mvmvif->color);
1885		if (ret)
1886			goto err;
1887
1888		iwl_mvm_realloc_queues_after_restart(mvm, sta);
1889		sta_update = true;
1890		sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1891		goto update_fw;
1892	}
1893
1894	ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id,
1895			       sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK);
1896	if (ret)
1897		goto err;
1898
1899update_fw:
1900	ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1901	if (ret)
1902		goto err;
1903
1904	if (vif->type == NL80211_IFTYPE_STATION) {
1905		if (!sta->tdls) {
1906			WARN_ON(mvmvif->deflink.ap_sta_id != IWL_INVALID_STA);
1907			mvmvif->deflink.ap_sta_id = sta_id;
1908		} else {
1909			WARN_ON(mvmvif->deflink.ap_sta_id == IWL_INVALID_STA);
1910		}
1911	}
1912
1913	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1914
1915	return 0;
1916
1917err:
1918	return ret;
1919}
1920
1921int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1922		      bool drain)
1923{
1924	struct iwl_mvm_add_sta_cmd cmd = {};
1925	int ret;
1926	u32 status;
1927
1928	lockdep_assert_held(&mvm->mutex);
1929
1930	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1931	cmd.sta_id = mvmsta->deflink.sta_id;
1932	cmd.add_modify = STA_MODE_MODIFY;
1933	cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1934	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1935
1936	status = ADD_STA_SUCCESS;
1937	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1938					  iwl_mvm_add_sta_cmd_size(mvm),
1939					  &cmd, &status);
1940	if (ret)
1941		return ret;
1942
1943	switch (status & IWL_ADD_STA_STATUS_MASK) {
1944	case ADD_STA_SUCCESS:
1945		IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1946			       mvmsta->deflink.sta_id);
1947		break;
1948	default:
1949		ret = -EIO;
1950		IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1951			mvmsta->deflink.sta_id);
1952		break;
1953	}
1954
1955	return ret;
1956}
1957
1958/*
1959 * Remove a station from the FW table. Before sending the command to remove
1960 * the station validate that the station is indeed known to the driver (sanity
1961 * only).
1962 */
1963static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1964{
1965	struct ieee80211_sta *sta;
1966	struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1967		.sta_id = sta_id,
1968	};
1969	int ret;
1970
1971	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1972					lockdep_is_held(&mvm->mutex));
1973
1974	/* Note: internal stations are marked as error values */
1975	if (!sta) {
1976		IWL_ERR(mvm, "Invalid station id\n");
1977		return -EINVAL;
1978	}
1979
1980	ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1981				   sizeof(rm_sta_cmd), &rm_sta_cmd);
1982	if (ret) {
1983		IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1984		return ret;
1985	}
1986
1987	return 0;
1988}
1989
1990static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1991				       struct ieee80211_vif *vif,
1992				       struct ieee80211_sta *sta)
1993{
1994	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1995	int i;
1996
1997	lockdep_assert_held(&mvm->mutex);
1998
1999	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
2000		if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
2001			continue;
2002
2003		iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id,
2004				    &mvm_sta->tid_data[i].txq_id, i);
 
2005		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
2006	}
2007
2008	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
2009		struct iwl_mvm_txq *mvmtxq =
2010			iwl_mvm_txq_from_mac80211(sta->txq[i]);
2011
2012		spin_lock_bh(&mvm->add_stream_lock);
2013		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
2014		list_del_init(&mvmtxq->list);
2015		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
2016		spin_unlock_bh(&mvm->add_stream_lock);
2017	}
2018}
2019
2020int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
2021				  struct iwl_mvm_sta *mvm_sta)
2022{
2023	int i;
2024
2025	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
2026		u16 txq_id;
2027		int ret;
2028
2029		spin_lock_bh(&mvm_sta->lock);
2030		txq_id = mvm_sta->tid_data[i].txq_id;
2031		spin_unlock_bh(&mvm_sta->lock);
2032
2033		if (txq_id == IWL_MVM_INVALID_QUEUE)
2034			continue;
2035
2036		ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
2037		if (ret)
2038			return ret;
2039	}
2040
2041	return 0;
2042}
2043
2044/* Execute the common part for both MLD and non-MLD modes.
2045 * Returns if we're done with removing the station, either
2046 * with error or success
2047 */
2048bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2049		     struct ieee80211_sta *sta,
2050		     struct ieee80211_link_sta *link_sta, int *ret)
2051{
2052	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2053	struct iwl_mvm_vif_link_info *mvm_link =
2054		mvmvif->link[link_sta->link_id];
2055	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2056	struct iwl_mvm_link_sta *mvm_link_sta;
2057	u8 sta_id;
2058
2059	lockdep_assert_held(&mvm->mutex);
2060
2061	mvm_link_sta =
2062		rcu_dereference_protected(mvm_sta->link[link_sta->link_id],
2063					  lockdep_is_held(&mvm->mutex));
2064	sta_id = mvm_link_sta->sta_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2065
2066	/* If there is a TXQ still marked as reserved - free it */
2067	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
2068		u8 reserved_txq = mvm_sta->reserved_queue;
2069		enum iwl_mvm_queue_status *status;
2070
2071		/*
2072		 * If no traffic has gone through the reserved TXQ - it
2073		 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2074		 * should be manually marked as free again
2075		 */
 
2076		status = &mvm->queue_info[reserved_txq].status;
2077		if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2078			 (*status != IWL_MVM_QUEUE_FREE),
2079			 "sta_id %d reserved txq %d status %d",
2080			 sta_id, reserved_txq, *status)) {
2081			*ret = -EINVAL;
2082			return true;
2083		}
2084
2085		*status = IWL_MVM_QUEUE_FREE;
 
2086	}
2087
2088	if (vif->type == NL80211_IFTYPE_STATION &&
2089	    mvm_link->ap_sta_id == sta_id) {
2090		/* if associated - we can't remove the AP STA now */
2091		if (vif->cfg.assoc)
2092			return true;
2093
2094		/* first remove remaining keys */
2095		iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0);
2096
2097		/* unassoc - go ahead - remove the AP STA now */
2098		mvm_link->ap_sta_id = IWL_INVALID_STA;
 
 
 
 
2099	}
2100
2101	/*
2102	 * This shouldn't happen - the TDLS channel switch should be canceled
2103	 * before the STA is removed.
2104	 */
2105	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
2106		mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
2107		cancel_delayed_work(&mvm->tdls_cs.dwork);
2108	}
2109
2110	return false;
2111}
2112
2113int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
2114		   struct ieee80211_vif *vif,
2115		   struct ieee80211_sta *sta)
2116{
2117	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2118	int ret;
2119
2120	lockdep_assert_held(&mvm->mutex);
2121
2122	ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
2123	if (ret)
2124		return ret;
2125
2126	/* flush its queues here since we are freeing mvm_sta */
2127	ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,
2128				mvm_sta->tfd_queue_msk);
2129	if (ret)
2130		return ret;
2131	if (iwl_mvm_has_new_tx_api(mvm)) {
2132		ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
2133	} else {
2134		u32 q_mask = mvm_sta->tfd_queue_msk;
2135
2136		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2137						     q_mask);
2138	}
2139	if (ret)
2140		return ret;
2141
2142	ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
2143
2144	iwl_mvm_disable_sta_queues(mvm, vif, sta);
2145
2146	if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret))
2147		return ret;
2148
2149	ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id);
2150	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL);
2151
2152	return ret;
2153}
2154
2155int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2156		      struct ieee80211_vif *vif,
2157		      u8 sta_id)
2158{
2159	int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2160
2161	lockdep_assert_held(&mvm->mutex);
2162
2163	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
2164	return ret;
2165}
2166
2167int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2168			     struct iwl_mvm_int_sta *sta,
2169			     u32 qmask, enum nl80211_iftype iftype,
2170			     u8 type)
2171{
2172	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2173	    sta->sta_id == IWL_INVALID_STA) {
2174		sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2175		if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA))
2176			return -ENOSPC;
2177	}
2178
2179	sta->tfd_queue_msk = qmask;
2180	sta->type = type;
2181
2182	/* put a non-NULL value so iterating over the stations won't stop */
2183	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2184	return 0;
2185}
2186
2187void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2188{
2189	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2190	memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2191	sta->sta_id = IWL_INVALID_STA;
2192}
2193
2194static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2195					  u8 sta_id, u8 fifo)
2196{
2197	unsigned int wdg_timeout =
2198		mvm->trans->trans_cfg->base_params->wd_timeout;
2199	struct iwl_trans_txq_scd_cfg cfg = {
2200		.fifo = fifo,
2201		.sta_id = sta_id,
2202		.tid = IWL_MAX_TID_COUNT,
2203		.aggregate = false,
2204		.frame_limit = IWL_FRAME_LIMIT,
2205	};
2206
2207	WARN_ON(iwl_mvm_has_new_tx_api(mvm));
 
 
 
 
 
 
 
 
 
 
 
 
 
2208
2209	iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
 
2210}
2211
2212static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2213{
2214	unsigned int wdg_timeout =
2215		mvm->trans->trans_cfg->base_params->wd_timeout;
2216
2217	WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2218
2219	return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT,
2220				       wdg_timeout);
2221}
2222
2223static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2224					  int maccolor, u8 *addr,
2225					  struct iwl_mvm_int_sta *sta,
2226					  u16 *queue, int fifo)
2227{
2228	int ret;
2229
2230	/* Map queue to fifo - needs to happen before adding station */
2231	if (!iwl_mvm_has_new_tx_api(mvm))
2232		iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
 
 
2233
2234	ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
 
2235	if (ret) {
2236		if (!iwl_mvm_has_new_tx_api(mvm))
2237			iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue,
2238					    IWL_MAX_TID_COUNT);
2239		return ret;
2240	}
2241
2242	/*
2243	 * For 22000 firmware and on we cannot add queue to a station unknown
2244	 * to firmware so enable queue here - after the station was added
2245	 */
2246	if (iwl_mvm_has_new_tx_api(mvm)) {
2247		int txq;
2248
2249		txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2250		if (txq < 0) {
2251			iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2252			return txq;
2253		}
2254
2255		*queue = txq;
2256	}
2257
2258	return 0;
2259}
2260
2261int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2262{
 
2263	int ret;
2264	u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 :
2265		BIT(mvm->aux_queue);
2266
2267	lockdep_assert_held(&mvm->mutex);
2268
2269	/* Allocate aux station and assign to it the aux queue */
2270	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask,
2271				       NL80211_IFTYPE_UNSPECIFIED,
2272				       IWL_STA_AUX_ACTIVITY);
 
 
 
 
2273	if (ret)
2274		return ret;
2275
2276	/*
2277	 * In CDB NICs we need to specify which lmac to use for aux activity
2278	 * using the mac_id argument place to send lmac_id to the function
2279	 */
2280	ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2281					     &mvm->aux_sta, &mvm->aux_queue,
2282					     IWL_MVM_TX_FIFO_MCAST);
2283	if (ret) {
2284		iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2285		return ret;
2286	}
2287
2288	return 0;
2289}
2290
2291int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2292{
2293	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2294
2295	lockdep_assert_held(&mvm->mutex);
2296
2297	return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2298					      NULL, &mvm->snif_sta,
2299					      &mvm->snif_queue,
2300					      IWL_MVM_TX_FIFO_BE);
2301}
2302
2303int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2304{
2305	int ret;
2306
2307	lockdep_assert_held(&mvm->mutex);
2308
2309	if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_INVALID_STA))
2310		return -EINVAL;
2311
2312	iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id,
2313			    &mvm->snif_queue, IWL_MAX_TID_COUNT);
2314	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2315	if (ret)
2316		IWL_WARN(mvm, "Failed sending remove station\n");
2317
2318	return ret;
2319}
2320
2321int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2322{
2323	int ret;
 
2324
 
 
2325	lockdep_assert_held(&mvm->mutex);
2326
2327	if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_INVALID_STA))
2328		return -EINVAL;
2329
2330	iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id,
2331			    &mvm->aux_queue, IWL_MAX_TID_COUNT);
2332	ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2333	if (ret)
2334		IWL_WARN(mvm, "Failed sending remove station\n");
2335	iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2336
2337	return ret;
2338}
2339
2340void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2341{
2342	iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2343}
2344
2345/*
2346 * Send the add station command for the vif's broadcast station.
2347 * Assumes that the station was already allocated.
2348 *
2349 * @mvm: the mvm component
2350 * @vif: the interface to which the broadcast station is added
2351 * @bsta: the broadcast station to add.
2352 */
2353int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2354{
2355	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2356	struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
2357	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2358	const u8 *baddr = _baddr;
2359	int queue;
2360	int ret;
2361	unsigned int wdg_timeout =
2362		iwl_mvm_get_wd_timeout(mvm, vif);
2363	struct iwl_trans_txq_scd_cfg cfg = {
2364		.fifo = IWL_MVM_TX_FIFO_VO,
2365		.sta_id = mvmvif->deflink.bcast_sta.sta_id,
2366		.tid = IWL_MAX_TID_COUNT,
2367		.aggregate = false,
2368		.frame_limit = IWL_FRAME_LIMIT,
2369	};
2370
2371	lockdep_assert_held(&mvm->mutex);
2372
2373	if (!iwl_mvm_has_new_tx_api(mvm)) {
2374		if (vif->type == NL80211_IFTYPE_AP ||
2375		    vif->type == NL80211_IFTYPE_ADHOC) {
2376			queue = mvm->probe_queue;
2377		} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2378			queue = mvm->p2p_dev_queue;
2379		} else {
2380			WARN(1, "Missing required TXQ for adding bcast STA\n");
2381			return -EINVAL;
2382		}
2383
2384		bsta->tfd_queue_msk |= BIT(queue);
2385
2386		iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
 
2387	}
2388
2389	if (vif->type == NL80211_IFTYPE_ADHOC)
2390		baddr = vif->bss_conf.bssid;
2391
2392	if (WARN_ON_ONCE(bsta->sta_id == IWL_INVALID_STA))
2393		return -ENOSPC;
2394
2395	ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2396					 mvmvif->id, mvmvif->color);
2397	if (ret)
2398		return ret;
2399
2400	/*
2401	 * For 22000 firmware and on we cannot add queue to a station unknown
2402	 * to firmware so enable queue here - after the station was added
2403	 */
2404	if (iwl_mvm_has_new_tx_api(mvm)) {
2405		queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id,
 
2406						IWL_MAX_TID_COUNT,
2407						wdg_timeout);
2408		if (queue < 0) {
2409			iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2410			return queue;
2411		}
2412
2413		if (vif->type == NL80211_IFTYPE_AP ||
2414		    vif->type == NL80211_IFTYPE_ADHOC) {
2415			/* for queue management */
2416			mvm->probe_queue = queue;
2417			/* for use in TX */
2418			mvmvif->deflink.mgmt_queue = queue;
2419		} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2420			mvm->p2p_dev_queue = queue;
2421		}
2422	} else if (vif->type == NL80211_IFTYPE_AP ||
2423		   vif->type == NL80211_IFTYPE_ADHOC) {
2424		/* set it for use in TX */
2425		mvmvif->deflink.mgmt_queue = mvm->probe_queue;
2426	}
2427
2428	return 0;
2429}
2430
2431void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2432				   struct ieee80211_vif *vif)
2433{
2434	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2435	u16 *queueptr, queue;
2436
2437	lockdep_assert_held(&mvm->mutex);
2438
2439	iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
2440			  mvmvif->deflink.bcast_sta.tfd_queue_msk);
2441
2442	switch (vif->type) {
2443	case NL80211_IFTYPE_AP:
2444	case NL80211_IFTYPE_ADHOC:
2445		queueptr = &mvm->probe_queue;
2446		break;
2447	case NL80211_IFTYPE_P2P_DEVICE:
2448		queueptr = &mvm->p2p_dev_queue;
2449		break;
2450	default:
2451		WARN(1, "Can't free bcast queue on vif type %d\n",
2452		     vif->type);
2453		return;
2454	}
2455
2456	queue = *queueptr;
2457	iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id,
2458			    queueptr, IWL_MAX_TID_COUNT);
2459
2460	if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)
2461		mvmvif->deflink.mgmt_queue = mvm->probe_queue;
2462
2463	if (iwl_mvm_has_new_tx_api(mvm))
2464		return;
2465
2466	WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue)));
2467	mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue);
2468}
2469
2470/* Send the FW a request to remove the station from it's internal data
2471 * structures, but DO NOT remove the entry from the local data structures. */
2472int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2473{
2474	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2475	int ret;
2476
2477	lockdep_assert_held(&mvm->mutex);
2478
2479	iwl_mvm_free_bcast_sta_queues(mvm, vif);
2480
2481	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id);
2482	if (ret)
2483		IWL_WARN(mvm, "Failed sending remove station\n");
2484	return ret;
2485}
2486
2487int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2488{
2489	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2490
2491	lockdep_assert_held(&mvm->mutex);
2492
2493	return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0,
2494					ieee80211_vif_type_p2p(vif),
2495					IWL_STA_GENERAL_PURPOSE);
2496}
2497
2498/* Allocate a new station entry for the broadcast station to the given vif,
2499 * and send it to the FW.
2500 * Note that each P2P mac should have its own broadcast station.
2501 *
2502 * @mvm: the mvm component
2503 * @vif: the interface to which the broadcast station is added
2504 * @bsta: the broadcast station to add. */
2505int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2506{
2507	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2508	struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
2509	int ret;
2510
2511	lockdep_assert_held(&mvm->mutex);
2512
2513	ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2514	if (ret)
2515		return ret;
2516
2517	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2518
2519	if (ret)
2520		iwl_mvm_dealloc_int_sta(mvm, bsta);
2521
2522	return ret;
2523}
2524
2525void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2526{
2527	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2528
2529	iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta);
2530}
2531
2532/*
2533 * Send the FW a request to remove the station from it's internal data
2534 * structures, and in addition remove it from the local data structure.
2535 */
2536int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2537{
2538	int ret;
2539
2540	lockdep_assert_held(&mvm->mutex);
2541
2542	ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2543
2544	iwl_mvm_dealloc_bcast_sta(mvm, vif);
2545
2546	return ret;
2547}
2548
2549/*
2550 * Allocate a new station entry for the multicast station to the given vif,
2551 * and send it to the FW.
2552 * Note that each AP/GO mac should have its own multicast station.
2553 *
2554 * @mvm: the mvm component
2555 * @vif: the interface to which the multicast station is added
2556 */
2557int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2558{
2559	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2560	struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta;
2561	static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2562	const u8 *maddr = _maddr;
2563	struct iwl_trans_txq_scd_cfg cfg = {
2564		.fifo = vif->type == NL80211_IFTYPE_AP ?
2565			IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2566		.sta_id = msta->sta_id,
2567		.tid = 0,
2568		.aggregate = false,
2569		.frame_limit = IWL_FRAME_LIMIT,
2570	};
2571	unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif);
2572	int ret;
2573
2574	lockdep_assert_held(&mvm->mutex);
2575
2576	if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2577		    vif->type != NL80211_IFTYPE_ADHOC))
2578		return -EOPNOTSUPP;
2579
2580	/*
2581	 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2582	 * invalid, so make sure we use the queue we want.
2583	 * Note that this is done here as we want to avoid making DQA
2584	 * changes in mac80211 layer.
2585	 */
2586	if (vif->type == NL80211_IFTYPE_ADHOC)
2587		mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
 
 
2588
2589	/*
2590	 * While in previous FWs we had to exclude cab queue from TFD queue
2591	 * mask, now it is needed as any other queue.
2592	 */
2593	if (!iwl_mvm_has_new_tx_api(mvm) &&
2594	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2595		iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
2596				   &cfg,
2597				   timeout);
2598		msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue);
2599	}
2600	ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2601					 mvmvif->id, mvmvif->color);
2602	if (ret)
2603		goto err;
 
 
2604
2605	/*
2606	 * Enable cab queue after the ADD_STA command is sent.
2607	 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2608	 * command with unknown station id, and for FW that doesn't support
2609	 * station API since the cab queue is not included in the
2610	 * tfd_queue_mask.
2611	 */
2612	if (iwl_mvm_has_new_tx_api(mvm)) {
2613		int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id,
2614						    0, timeout);
2615		if (queue < 0) {
2616			ret = queue;
2617			goto err;
2618		}
2619		mvmvif->deflink.cab_queue = queue;
2620	} else if (!fw_has_api(&mvm->fw->ucode_capa,
2621			       IWL_UCODE_TLV_API_STA_TYPE))
2622		iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
2623				   &cfg,
2624				   timeout);
2625
2626	return 0;
2627err:
2628	iwl_mvm_dealloc_int_sta(mvm, msta);
2629	return ret;
2630}
2631
2632static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2633				    struct ieee80211_key_conf *keyconf,
2634				    bool mcast)
2635{
2636	union {
2637		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2638		struct iwl_mvm_add_sta_key_cmd cmd;
2639	} u = {};
2640	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2641				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2642	__le16 key_flags;
2643	int ret, size;
2644	u32 status;
2645
2646	/* This is a valid situation for GTK removal */
2647	if (sta_id == IWL_INVALID_STA)
2648		return 0;
2649
2650	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2651				 STA_KEY_FLG_KEYID_MSK);
2652	key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2653	key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2654
2655	if (mcast)
2656		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2657
2658	/*
2659	 * The fields assigned here are in the same location at the start
2660	 * of the command, so we can do this union trick.
2661	 */
2662	u.cmd.common.key_flags = key_flags;
2663	u.cmd.common.key_offset = keyconf->hw_key_idx;
2664	u.cmd.common.sta_id = sta_id;
2665
2666	size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2667
2668	status = ADD_STA_SUCCESS;
2669	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2670					  &status);
2671
2672	switch (status) {
2673	case ADD_STA_SUCCESS:
2674		IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2675		break;
2676	default:
2677		ret = -EIO;
2678		IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2679		break;
2680	}
2681
2682	return ret;
2683}
2684
2685/*
2686 * Send the FW a request to remove the station from it's internal data
2687 * structures, and in addition remove it from the local data structure.
2688 */
2689int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2690{
2691	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2692	int ret;
2693
2694	lockdep_assert_held(&mvm->mutex);
2695
2696	iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,
2697			  mvmvif->deflink.mcast_sta.tfd_queue_msk);
2698
2699	iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
2700			    &mvmvif->deflink.cab_queue, 0);
2701
2702	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id);
2703	if (ret)
2704		IWL_WARN(mvm, "Failed sending remove station\n");
2705
2706	return ret;
2707}
2708
 
 
2709static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2710{
2711	struct iwl_mvm_delba_data notif = {
2712		.baid = baid,
 
 
2713	};
2714
2715	iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2716					&notif, sizeof(notif));
2717};
2718
2719static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2720				 struct iwl_mvm_baid_data *data)
2721{
2722	int i;
2723
2724	iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2725
2726	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2727		int j;
2728		struct iwl_mvm_reorder_buffer *reorder_buf =
2729			&data->reorder_buf[i];
2730		struct iwl_mvm_reorder_buf_entry *entries =
2731			&data->entries[i * data->entries_per_queue];
2732
2733		spin_lock_bh(&reorder_buf->lock);
2734		if (likely(!reorder_buf->num_stored)) {
2735			spin_unlock_bh(&reorder_buf->lock);
2736			continue;
2737		}
2738
2739		/*
2740		 * This shouldn't happen in regular DELBA since the internal
2741		 * delBA notification should trigger a release of all frames in
2742		 * the reorder buffer.
2743		 */
2744		WARN_ON(1);
2745
2746		for (j = 0; j < data->buf_size; j++)
2747			__skb_queue_purge(&entries[j].frames);
2748
 
 
 
 
 
 
 
 
2749		spin_unlock_bh(&reorder_buf->lock);
 
2750	}
2751}
2752
2753static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2754					struct iwl_mvm_baid_data *data,
2755					u16 ssn)
2756{
2757	int i;
2758
2759	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2760		struct iwl_mvm_reorder_buffer *reorder_buf =
2761			&data->reorder_buf[i];
2762		struct iwl_mvm_reorder_buf_entry *entries =
2763			&data->entries[i * data->entries_per_queue];
2764		int j;
2765
2766		reorder_buf->num_stored = 0;
2767		reorder_buf->head_sn = ssn;
 
 
 
 
2768		spin_lock_init(&reorder_buf->lock);
 
2769		reorder_buf->queue = i;
2770		reorder_buf->valid = false;
2771		for (j = 0; j < data->buf_size; j++)
2772			__skb_queue_head_init(&entries[j].frames);
2773	}
2774}
2775
2776static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
2777				  struct ieee80211_sta *sta,
2778				  bool start, int tid, u16 ssn,
2779				  u16 buf_size)
2780{
2781	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2782	struct iwl_mvm_add_sta_cmd cmd = {
2783		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2784		.sta_id = mvm_sta->deflink.sta_id,
2785		.add_modify = STA_MODE_MODIFY,
2786	};
2787	u32 status;
2788	int ret;
2789
2790	if (start) {
2791		cmd.add_immediate_ba_tid = tid;
2792		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2793		cmd.rx_ba_window = cpu_to_le16(buf_size);
2794		cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2795	} else {
2796		cmd.remove_immediate_ba_tid = tid;
2797		cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2798	}
2799
2800	status = ADD_STA_SUCCESS;
2801	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2802					  iwl_mvm_add_sta_cmd_size(mvm),
2803					  &cmd, &status);
2804	if (ret)
2805		return ret;
2806
2807	switch (status & IWL_ADD_STA_STATUS_MASK) {
2808	case ADD_STA_SUCCESS:
2809		IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2810			     start ? "start" : "stopp");
2811		if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2812			    !(status & IWL_ADD_STA_BAID_VALID_MASK)))
2813			return -EINVAL;
2814		return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2815	case ADD_STA_IMMEDIATE_BA_FAILURE:
2816		IWL_WARN(mvm, "RX BA Session refused by fw\n");
2817		return -ENOSPC;
2818	default:
2819		IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2820			start ? "start" : "stopp", status);
2821		return -EIO;
2822	}
2823}
2824
2825static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
2826				  struct ieee80211_sta *sta,
2827				  bool start, int tid, u16 ssn,
2828				  u16 buf_size, int baid)
2829{
2830	struct iwl_rx_baid_cfg_cmd cmd = {
2831		.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
2832				  cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
2833	};
2834	struct iwl_host_cmd hcmd = {
2835		.id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
2836		.flags = CMD_SEND_IN_RFKILL,
2837		.len[0] = sizeof(cmd),
2838		.data[0] = &cmd,
2839	};
2840	int ret;
2841
2842	BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
2843
2844	if (start) {
2845		cmd.alloc.sta_id_mask =
2846			cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
2847		cmd.alloc.tid = tid;
2848		cmd.alloc.ssn = cpu_to_le16(ssn);
2849		cmd.alloc.win_size = cpu_to_le16(buf_size);
2850		baid = -EIO;
2851	} else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
2852		cmd.remove_v1.baid = cpu_to_le32(baid);
2853		BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
2854	} else {
2855		cmd.remove.sta_id_mask =
2856			cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
2857		cmd.remove.tid = cpu_to_le32(tid);
2858	}
2859
2860	ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
2861	if (ret)
2862		return ret;
2863
2864	if (!start) {
2865		/* ignore firmware baid on remove */
2866		baid = 0;
2867	}
2868
2869	IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2870		     start ? "start" : "stopp");
2871
2872	if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
2873		return -EINVAL;
2874
2875	return baid;
2876}
2877
2878static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2879			      bool start, int tid, u16 ssn, u16 buf_size,
2880			      int baid)
2881{
2882	if (fw_has_capa(&mvm->fw->ucode_capa,
2883			IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
2884		return iwl_mvm_fw_baid_op_cmd(mvm, sta, start,
2885					      tid, ssn, buf_size, baid);
2886
2887	return iwl_mvm_fw_baid_op_sta(mvm, sta, start,
2888				      tid, ssn, buf_size);
2889}
2890
2891int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2892		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2893{
2894	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 
2895	struct iwl_mvm_baid_data *baid_data = NULL;
2896	int ret, baid;
2897	u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
2898							       IWL_MAX_BAID_OLD;
2899
2900	lockdep_assert_held(&mvm->mutex);
2901
2902	if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
2903		IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2904		return -ENOSPC;
2905	}
2906
2907	if (iwl_mvm_has_new_rx_api(mvm) && start) {
2908		u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2909
2910		/* sparse doesn't like the __align() so don't check */
2911#ifndef __CHECKER__
2912		/*
2913		 * The division below will be OK if either the cache line size
2914		 * can be divided by the entry size (ALIGN will round up) or if
2915		 * if the entry size can be divided by the cache line size, in
2916		 * which case the ALIGN() will do nothing.
2917		 */
2918		BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2919			     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2920#endif
2921
2922		/*
2923		 * Upward align the reorder buffer size to fill an entire cache
2924		 * line for each queue, to avoid sharing cache lines between
2925		 * different queues.
2926		 */
2927		reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2928
2929		/*
2930		 * Allocate here so if allocation fails we can bail out early
2931		 * before starting the BA session in the firmware
2932		 */
2933		baid_data = kzalloc(sizeof(*baid_data) +
2934				    mvm->trans->num_rx_queues *
2935				    reorder_buf_size,
2936				    GFP_KERNEL);
2937		if (!baid_data)
2938			return -ENOMEM;
2939
2940		/*
2941		 * This division is why we need the above BUILD_BUG_ON(),
2942		 * if that doesn't hold then this will not be right.
2943		 */
2944		baid_data->entries_per_queue =
2945			reorder_buf_size / sizeof(baid_data->entries[0]);
2946	}
2947
2948	if (iwl_mvm_has_new_rx_api(mvm) && !start) {
2949		baid = mvm_sta->tid_to_baid[tid];
 
 
 
 
 
2950	} else {
2951		/* we don't really need it in this case */
2952		baid = -1;
2953	}
 
 
2954
2955	/* Don't send command to remove (start=0) BAID during restart */
2956	if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2957		baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size,
2958					  baid);
2959
2960	if (baid < 0) {
2961		ret = baid;
2962		goto out_free;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2963	}
2964
 
 
 
2965	if (start) {
 
 
2966		mvm->rx_ba_sessions++;
2967
2968		if (!iwl_mvm_has_new_rx_api(mvm))
2969			return 0;
2970
 
 
 
 
 
 
2971		baid_data->baid = baid;
2972		baid_data->timeout = timeout;
2973		baid_data->last_rx = jiffies;
2974		baid_data->rcu_ptr = &mvm->baid_map[baid];
2975		timer_setup(&baid_data->session_timer,
2976			    iwl_mvm_rx_agg_session_expired, 0);
2977		baid_data->mvm = mvm;
2978		baid_data->tid = tid;
2979		baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
2980		baid_data->buf_size = buf_size;
2981
2982		mvm_sta->tid_to_baid[tid] = baid;
2983		if (timeout)
2984			mod_timer(&baid_data->session_timer,
2985				  TU_TO_EXP_TIME(timeout * 2));
2986
2987		iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn);
2988		/*
2989		 * protect the BA data with RCU to cover a case where our
2990		 * internal RX sync mechanism will timeout (not that it's
2991		 * supposed to happen) and we will free the session data while
2992		 * RX is being processed in parallel
2993		 */
2994		IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2995			     mvm_sta->deflink.sta_id, tid, baid);
2996		WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2997		rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2998	} else  {
2999		baid = mvm_sta->tid_to_baid[tid];
3000
3001		if (mvm->rx_ba_sessions > 0)
3002			/* check that restart flow didn't zero the counter */
3003			mvm->rx_ba_sessions--;
3004		if (!iwl_mvm_has_new_rx_api(mvm))
3005			return 0;
3006
3007		if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
3008			return -EINVAL;
3009
3010		baid_data = rcu_access_pointer(mvm->baid_map[baid]);
3011		if (WARN_ON(!baid_data))
3012			return -EINVAL;
3013
3014		/* synchronize all rx queues so we can safely delete */
3015		iwl_mvm_free_reorder(mvm, baid_data);
3016		timer_shutdown_sync(&baid_data->session_timer);
3017		RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
3018		kfree_rcu(baid_data, rcu_head);
3019		IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
3020	}
3021	return 0;
3022
3023out_free:
3024	kfree(baid_data);
3025	return ret;
3026}
3027
3028int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3029		       int tid, u8 queue, bool start)
3030{
3031	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3032	struct iwl_mvm_add_sta_cmd cmd = {};
3033	int ret;
3034	u32 status;
3035
3036	lockdep_assert_held(&mvm->mutex);
3037
3038	if (start) {
3039		mvm_sta->tfd_queue_msk |= BIT(queue);
3040		mvm_sta->tid_disable_agg &= ~BIT(tid);
3041	} else {
3042		/* In DQA-mode the queue isn't removed on agg termination */
3043		mvm_sta->tid_disable_agg |= BIT(tid);
3044	}
3045
3046	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
3047	cmd.sta_id = mvm_sta->deflink.sta_id;
3048	cmd.add_modify = STA_MODE_MODIFY;
3049	if (!iwl_mvm_has_new_tx_api(mvm))
3050		cmd.modify_mask = STA_MODIFY_QUEUES;
3051	cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
3052	cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
3053	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
3054
3055	status = ADD_STA_SUCCESS;
3056	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
3057					  iwl_mvm_add_sta_cmd_size(mvm),
3058					  &cmd, &status);
3059	if (ret)
3060		return ret;
3061
3062	switch (status & IWL_ADD_STA_STATUS_MASK) {
3063	case ADD_STA_SUCCESS:
3064		break;
3065	default:
3066		ret = -EIO;
3067		IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
3068			start ? "start" : "stopp", status);
3069		break;
3070	}
3071
3072	return ret;
3073}
3074
3075const u8 tid_to_mac80211_ac[] = {
3076	IEEE80211_AC_BE,
3077	IEEE80211_AC_BK,
3078	IEEE80211_AC_BK,
3079	IEEE80211_AC_BE,
3080	IEEE80211_AC_VI,
3081	IEEE80211_AC_VI,
3082	IEEE80211_AC_VO,
3083	IEEE80211_AC_VO,
3084	IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
3085};
3086
3087static const u8 tid_to_ucode_ac[] = {
3088	AC_BE,
3089	AC_BK,
3090	AC_BK,
3091	AC_BE,
3092	AC_VI,
3093	AC_VI,
3094	AC_VO,
3095	AC_VO,
3096};
3097
3098int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3099			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3100{
3101	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3102	struct iwl_mvm_tid_data *tid_data;
3103	u16 normalized_ssn;
3104	u16 txq_id;
3105	int ret;
3106
3107	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
3108		return -EINVAL;
3109
3110	if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
3111	    mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
3112		IWL_ERR(mvm,
3113			"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
3114			mvmsta->tid_data[tid].state);
3115		return -ENXIO;
3116	}
3117
3118	lockdep_assert_held(&mvm->mutex);
3119
3120	if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
3121	    iwl_mvm_has_new_tx_api(mvm)) {
3122		u8 ac = tid_to_mac80211_ac[tid];
3123
3124		ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
3125		if (ret)
3126			return ret;
 
 
3127	}
3128
3129	spin_lock_bh(&mvmsta->lock);
3130
3131	/*
3132	 * Note the possible cases:
3133	 *  1. An enabled TXQ - TXQ needs to become agg'ed
3134	 *  2. The TXQ hasn't yet been enabled, so find a free one and mark
3135	 *	it as reserved
3136	 */
3137	txq_id = mvmsta->tid_data[tid].txq_id;
3138	if (txq_id == IWL_MVM_INVALID_QUEUE) {
3139		ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
3140					      IWL_MVM_DQA_MIN_DATA_QUEUE,
3141					      IWL_MVM_DQA_MAX_DATA_QUEUE);
3142		if (ret < 0) {
 
3143			IWL_ERR(mvm, "Failed to allocate agg queue\n");
3144			goto out;
3145		}
3146
3147		txq_id = ret;
3148
3149		/* TXQ hasn't yet been enabled, so mark it only as reserved */
3150		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
3151	} else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
3152		ret = -ENXIO;
3153		IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
3154			tid, IWL_MAX_HW_QUEUES - 1);
3155		goto out;
3156
3157	} else if (unlikely(mvm->queue_info[txq_id].status ==
3158			    IWL_MVM_QUEUE_SHARED)) {
3159		ret = -ENXIO;
3160		IWL_DEBUG_TX_QUEUES(mvm,
3161				    "Can't start tid %d agg on shared queue!\n",
3162				    tid);
3163		goto out;
3164	}
3165
 
 
3166	IWL_DEBUG_TX_QUEUES(mvm,
3167			    "AGG for tid %d will be on queue #%d\n",
3168			    tid, txq_id);
3169
3170	tid_data = &mvmsta->tid_data[tid];
3171	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3172	tid_data->txq_id = txq_id;
3173	*ssn = tid_data->ssn;
3174
3175	IWL_DEBUG_TX_QUEUES(mvm,
3176			    "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
3177			    mvmsta->deflink.sta_id, tid, txq_id,
3178			    tid_data->ssn,
3179			    tid_data->next_reclaimed);
3180
3181	/*
3182	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3183	 * to align the wrap around of ssn so we compare relevant values.
3184	 */
3185	normalized_ssn = tid_data->ssn;
3186	if (mvm->trans->trans_cfg->gen2)
3187		normalized_ssn &= 0xff;
3188
3189	if (normalized_ssn == tid_data->next_reclaimed) {
3190		tid_data->state = IWL_AGG_STARTING;
3191		ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
3192	} else {
3193		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3194		ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
3195	}
3196
 
 
 
 
 
3197out:
3198	spin_unlock_bh(&mvmsta->lock);
3199
3200	return ret;
3201}
3202
3203int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3204			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3205			    bool amsdu)
3206{
3207	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3208	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3209	unsigned int wdg_timeout =
3210		iwl_mvm_get_wd_timeout(mvm, vif);
3211	int queue, ret;
3212	bool alloc_queue = true;
3213	enum iwl_mvm_queue_status queue_status;
3214	u16 ssn;
3215
3216	struct iwl_trans_txq_scd_cfg cfg = {
3217		.sta_id = mvmsta->deflink.sta_id,
3218		.tid = tid,
3219		.frame_limit = buf_size,
3220		.aggregate = true,
3221	};
3222
3223	/*
3224	 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3225	 * manager, so this function should never be called in this case.
3226	 */
3227	if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
3228		return -EINVAL;
3229
3230	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3231		     != IWL_MAX_TID_COUNT);
3232
3233	spin_lock_bh(&mvmsta->lock);
3234	ssn = tid_data->ssn;
3235	queue = tid_data->txq_id;
3236	tid_data->state = IWL_AGG_ON;
3237	mvmsta->agg_tids |= BIT(tid);
3238	tid_data->ssn = 0xffff;
3239	tid_data->amsdu_in_ampdu_allowed = amsdu;
3240	spin_unlock_bh(&mvmsta->lock);
3241
3242	if (iwl_mvm_has_new_tx_api(mvm)) {
3243		/*
3244		 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3245		 * would have failed, so if we are here there is no need to
3246		 * allocate a queue.
3247		 * However, if aggregation size is different than the default
3248		 * size, the scheduler should be reconfigured.
3249		 * We cannot do this with the new TX API, so return unsupported
3250		 * for now, until it will be offloaded to firmware..
3251		 * Note that if SCD default value changes - this condition
3252		 * should be updated as well.
3253		 */
3254		if (buf_size < IWL_FRAME_LIMIT)
3255			return -EOPNOTSUPP;
3256
3257		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3258		if (ret)
3259			return -EIO;
3260		goto out;
3261	}
3262
3263	cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3264
 
3265	queue_status = mvm->queue_info[queue].status;
 
3266
3267	/* Maybe there is no need to even alloc a queue... */
3268	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3269		alloc_queue = false;
3270
3271	/*
3272	 * Only reconfig the SCD for the queue if the window size has
3273	 * changed from current (become smaller)
3274	 */
3275	if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3276		/*
3277		 * If reconfiguring an existing queue, it first must be
3278		 * drained
3279		 */
3280		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3281						     BIT(queue));
3282		if (ret) {
3283			IWL_ERR(mvm,
3284				"Error draining queue before reconfig\n");
3285			return ret;
3286		}
3287
3288		ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3289					   mvmsta->deflink.sta_id, tid,
3290					   buf_size, ssn);
3291		if (ret) {
3292			IWL_ERR(mvm,
3293				"Error reconfiguring TXQ #%d\n", queue);
3294			return ret;
3295		}
3296	}
3297
3298	if (alloc_queue)
3299		iwl_mvm_enable_txq(mvm, sta, queue, ssn,
 
3300				   &cfg, wdg_timeout);
3301
3302	/* Send ADD_STA command to enable aggs only if the queue isn't shared */
3303	if (queue_status != IWL_MVM_QUEUE_SHARED) {
3304		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3305		if (ret)
3306			return -EIO;
3307	}
3308
3309	/* No need to mark as reserved */
 
3310	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
 
3311
3312out:
3313	/*
3314	 * Even though in theory the peer could have different
3315	 * aggregation reorder buffer sizes for different sessions,
3316	 * our ucode doesn't allow for that and has a global limit
3317	 * for each station. Therefore, use the minimum of all the
3318	 * aggregation sessions and our default value.
3319	 */
3320	mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
3321		min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize,
3322		    buf_size);
3323	mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit =
3324		mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize;
3325
3326	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3327		     sta->addr, tid);
3328
3329	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq);
3330}
3331
3332static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3333					struct iwl_mvm_sta *mvmsta,
3334					struct iwl_mvm_tid_data *tid_data)
3335{
3336	u16 txq_id = tid_data->txq_id;
3337
3338	lockdep_assert_held(&mvm->mutex);
3339
3340	if (iwl_mvm_has_new_tx_api(mvm))
3341		return;
3342
 
3343	/*
3344	 * The TXQ is marked as reserved only if no traffic came through yet
3345	 * This means no traffic has been sent on this TID (agg'd or not), so
3346	 * we no longer have use for the queue. Since it hasn't even been
3347	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3348	 * free.
3349	 */
3350	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3351		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3352		tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3353	}
 
 
3354}
3355
3356int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3357			    struct ieee80211_sta *sta, u16 tid)
3358{
3359	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3360	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3361	u16 txq_id;
3362	int err;
3363
3364	/*
3365	 * If mac80211 is cleaning its state, then say that we finished since
3366	 * our state has been cleared anyway.
3367	 */
3368	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3369		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3370		return 0;
3371	}
3372
3373	spin_lock_bh(&mvmsta->lock);
3374
3375	txq_id = tid_data->txq_id;
3376
3377	IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3378			    mvmsta->deflink.sta_id, tid, txq_id,
3379			    tid_data->state);
3380
3381	mvmsta->agg_tids &= ~BIT(tid);
3382
3383	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3384
3385	switch (tid_data->state) {
3386	case IWL_AGG_ON:
3387		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3388
3389		IWL_DEBUG_TX_QUEUES(mvm,
3390				    "ssn = %d, next_recl = %d\n",
3391				    tid_data->ssn, tid_data->next_reclaimed);
3392
3393		tid_data->ssn = 0xffff;
3394		tid_data->state = IWL_AGG_OFF;
3395		spin_unlock_bh(&mvmsta->lock);
3396
3397		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3398
3399		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3400		return 0;
3401	case IWL_AGG_STARTING:
3402	case IWL_EMPTYING_HW_QUEUE_ADDBA:
3403		/*
3404		 * The agg session has been stopped before it was set up. This
3405		 * can happen when the AddBA timer times out for example.
3406		 */
3407
3408		/* No barriers since we are under mutex */
3409		lockdep_assert_held(&mvm->mutex);
3410
3411		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3412		tid_data->state = IWL_AGG_OFF;
3413		err = 0;
3414		break;
3415	default:
3416		IWL_ERR(mvm,
3417			"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3418			mvmsta->deflink.sta_id, tid, tid_data->state);
3419		IWL_ERR(mvm,
3420			"\ttid_data->txq_id = %d\n", tid_data->txq_id);
3421		err = -EINVAL;
3422	}
3423
3424	spin_unlock_bh(&mvmsta->lock);
3425
3426	return err;
3427}
3428
3429int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3430			    struct ieee80211_sta *sta, u16 tid)
3431{
3432	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3433	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3434	u16 txq_id;
3435	enum iwl_mvm_agg_state old_state;
3436
3437	/*
3438	 * First set the agg state to OFF to avoid calling
3439	 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3440	 */
3441	spin_lock_bh(&mvmsta->lock);
3442	txq_id = tid_data->txq_id;
3443	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3444			    mvmsta->deflink.sta_id, tid, txq_id,
3445			    tid_data->state);
3446	old_state = tid_data->state;
3447	tid_data->state = IWL_AGG_OFF;
3448	mvmsta->agg_tids &= ~BIT(tid);
3449	spin_unlock_bh(&mvmsta->lock);
3450
3451	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3452
3453	if (old_state >= IWL_AGG_ON) {
3454		iwl_mvm_drain_sta(mvm, mvmsta, true);
3455
3456		if (iwl_mvm_has_new_tx_api(mvm)) {
3457			if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id,
3458						   BIT(tid)))
3459				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3460			iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3461		} else {
3462			if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3463				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3464			iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3465		}
3466
3467		iwl_mvm_drain_sta(mvm, mvmsta, false);
3468
3469		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3470	}
3471
3472	return 0;
3473}
3474
3475static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3476{
3477	int i, max = -1, max_offs = -1;
3478
3479	lockdep_assert_held(&mvm->mutex);
3480
3481	/* Pick the unused key offset with the highest 'deleted'
3482	 * counter. Every time a key is deleted, all the counters
3483	 * are incremented and the one that was just deleted is
3484	 * reset to zero. Thus, the highest counter is the one
3485	 * that was deleted longest ago. Pick that one.
3486	 */
3487	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3488		if (test_bit(i, mvm->fw_key_table))
3489			continue;
3490		if (mvm->fw_key_deleted[i] > max) {
3491			max = mvm->fw_key_deleted[i];
3492			max_offs = i;
3493		}
3494	}
3495
3496	if (max_offs < 0)
3497		return STA_KEY_IDX_INVALID;
3498
3499	return max_offs;
3500}
3501
3502static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3503					       struct ieee80211_vif *vif,
3504					       struct ieee80211_sta *sta)
3505{
3506	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3507
3508	if (sta)
3509		return iwl_mvm_sta_from_mac80211(sta);
3510
3511	/*
3512	 * The device expects GTKs for station interfaces to be
3513	 * installed as GTKs for the AP station. If we have no
3514	 * station ID, then use AP's station ID.
3515	 */
3516	if (vif->type == NL80211_IFTYPE_STATION &&
3517	    mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
3518		u8 sta_id = mvmvif->deflink.ap_sta_id;
3519
3520		sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3521					    lockdep_is_held(&mvm->mutex));
3522
3523		/*
3524		 * It is possible that the 'sta' parameter is NULL,
3525		 * for example when a GTK is removed - the sta_id will then
3526		 * be the AP ID, and no station was passed by mac80211.
3527		 */
3528		if (IS_ERR_OR_NULL(sta))
3529			return NULL;
3530
3531		return iwl_mvm_sta_from_mac80211(sta);
3532	}
3533
3534	return NULL;
3535}
3536
3537static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3538{
3539	int i;
3540
3541	for (i = len - 1; i >= 0; i--) {
3542		if (pn1[i] > pn2[i])
3543			return 1;
3544		if (pn1[i] < pn2[i])
3545			return -1;
3546	}
3547
3548	return 0;
3549}
3550
3551static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3552				u32 sta_id,
3553				struct ieee80211_key_conf *key, bool mcast,
3554				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3555				u8 key_offset, bool mfp)
3556{
3557	union {
3558		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3559		struct iwl_mvm_add_sta_key_cmd cmd;
3560	} u = {};
3561	__le16 key_flags;
3562	int ret;
3563	u32 status;
3564	u16 keyidx;
3565	u64 pn = 0;
3566	int i, size;
3567	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3568				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3569	int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3570					    new_api ? 2 : 1);
3571
3572	if (sta_id == IWL_INVALID_STA)
3573		return -EINVAL;
3574
3575	keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3576		 STA_KEY_FLG_KEYID_MSK;
3577	key_flags = cpu_to_le16(keyidx);
3578	key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3579
3580	if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
3581		key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP);
3582
3583	switch (key->cipher) {
3584	case WLAN_CIPHER_SUITE_TKIP:
3585		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3586		if (api_ver >= 2) {
3587			memcpy((void *)&u.cmd.tx_mic_key,
3588			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3589			       IWL_MIC_KEY_SIZE);
3590
3591			memcpy((void *)&u.cmd.rx_mic_key,
3592			       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3593			       IWL_MIC_KEY_SIZE);
3594			pn = atomic64_read(&key->tx_pn);
3595
3596		} else {
3597			u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3598			for (i = 0; i < 5; i++)
3599				u.cmd_v1.tkip_rx_ttak[i] =
3600					cpu_to_le16(tkip_p1k[i]);
3601		}
3602		memcpy(u.cmd.common.key, key->key, key->keylen);
3603		break;
3604	case WLAN_CIPHER_SUITE_CCMP:
3605		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3606		memcpy(u.cmd.common.key, key->key, key->keylen);
3607		if (api_ver >= 2)
3608			pn = atomic64_read(&key->tx_pn);
3609		break;
3610	case WLAN_CIPHER_SUITE_WEP104:
3611		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3612		fallthrough;
3613	case WLAN_CIPHER_SUITE_WEP40:
3614		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3615		memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3616		break;
3617	case WLAN_CIPHER_SUITE_GCMP_256:
3618		key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3619		fallthrough;
3620	case WLAN_CIPHER_SUITE_GCMP:
3621		key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3622		memcpy(u.cmd.common.key, key->key, key->keylen);
3623		if (api_ver >= 2)
3624			pn = atomic64_read(&key->tx_pn);
3625		break;
3626	default:
3627		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3628		memcpy(u.cmd.common.key, key->key, key->keylen);
3629	}
3630
3631	if (mcast)
3632		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3633	if (mfp)
3634		key_flags |= cpu_to_le16(STA_KEY_MFP);
3635
3636	u.cmd.common.key_offset = key_offset;
3637	u.cmd.common.key_flags = key_flags;
3638	u.cmd.common.sta_id = sta_id;
3639
3640	if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3641		i = 0;
3642	else
3643		i = -1;
3644
3645	for (; i < IEEE80211_NUM_TIDS; i++) {
3646		struct ieee80211_key_seq seq = {};
3647		u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3648		int rx_pn_len = 8;
3649		/* there's a hole at 2/3 in FW format depending on version */
3650		int hole = api_ver >= 3 ? 0 : 2;
3651
3652		ieee80211_get_key_rx_seq(key, i, &seq);
3653
3654		if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3655			rx_pn[0] = seq.tkip.iv16;
3656			rx_pn[1] = seq.tkip.iv16 >> 8;
3657			rx_pn[2 + hole] = seq.tkip.iv32;
3658			rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3659			rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3660			rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3661		} else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3662			rx_pn = seq.hw.seq;
3663			rx_pn_len = seq.hw.seq_len;
3664		} else {
3665			rx_pn[0] = seq.ccmp.pn[0];
3666			rx_pn[1] = seq.ccmp.pn[1];
3667			rx_pn[2 + hole] = seq.ccmp.pn[2];
3668			rx_pn[3 + hole] = seq.ccmp.pn[3];
3669			rx_pn[4 + hole] = seq.ccmp.pn[4];
3670			rx_pn[5 + hole] = seq.ccmp.pn[5];
3671		}
3672
3673		if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3674				   rx_pn_len) > 0)
3675			memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3676			       rx_pn_len);
3677	}
3678
3679	if (api_ver >= 2) {
3680		u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3681		size = sizeof(u.cmd);
3682	} else {
3683		size = sizeof(u.cmd_v1);
3684	}
3685
3686	status = ADD_STA_SUCCESS;
3687	if (cmd_flags & CMD_ASYNC)
3688		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3689					   &u.cmd);
3690	else
3691		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3692						  &u.cmd, &status);
3693
3694	switch (status) {
3695	case ADD_STA_SUCCESS:
3696		IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3697		break;
3698	default:
3699		ret = -EIO;
3700		IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3701		break;
3702	}
3703
3704	return ret;
3705}
3706
3707static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3708				 struct ieee80211_key_conf *keyconf,
3709				 u8 sta_id, bool remove_key)
3710{
3711	struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3712
3713	/* verify the key details match the required command's expectations */
3714	if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3715		    (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3716		     keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3717		    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3718		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3719		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3720		return -EINVAL;
3721
3722	if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3723		    keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3724		return -EINVAL;
3725
3726	igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3727	igtk_cmd.sta_id = cpu_to_le32(sta_id);
3728
3729	if (remove_key) {
3730		/* This is a valid situation for IGTK */
3731		if (sta_id == IWL_INVALID_STA)
3732			return 0;
3733
3734		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3735	} else {
3736		struct ieee80211_key_seq seq;
3737		const u8 *pn;
3738
3739		switch (keyconf->cipher) {
3740		case WLAN_CIPHER_SUITE_AES_CMAC:
3741			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3742			break;
3743		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3744		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3745			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3746			break;
3747		default:
3748			return -EINVAL;
3749		}
3750
3751		memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3752		if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3753			igtk_cmd.ctrl_flags |=
3754				cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3755		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3756		pn = seq.aes_cmac.pn;
3757		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3758						       ((u64) pn[4] << 8) |
3759						       ((u64) pn[3] << 16) |
3760						       ((u64) pn[2] << 24) |
3761						       ((u64) pn[1] << 32) |
3762						       ((u64) pn[0] << 40));
3763	}
3764
3765	IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3766		       remove_key ? "removing" : "installing",
3767		       keyconf->keyidx >= 6 ? "B" : "",
3768		       keyconf->keyidx, igtk_cmd.sta_id);
3769
3770	if (!iwl_mvm_has_new_rx_api(mvm)) {
3771		struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3772			.ctrl_flags = igtk_cmd.ctrl_flags,
3773			.key_id = igtk_cmd.key_id,
3774			.sta_id = igtk_cmd.sta_id,
3775			.receive_seq_cnt = igtk_cmd.receive_seq_cnt
3776		};
3777
3778		memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3779		       ARRAY_SIZE(igtk_cmd_v1.igtk));
3780		return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3781					    sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3782	}
3783	return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3784				    sizeof(igtk_cmd), &igtk_cmd);
3785}
3786
3787
3788static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3789				       struct ieee80211_vif *vif,
3790				       struct ieee80211_sta *sta)
3791{
3792	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3793
3794	if (sta)
3795		return sta->addr;
3796
3797	if (vif->type == NL80211_IFTYPE_STATION &&
3798	    mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
3799		u8 sta_id = mvmvif->deflink.ap_sta_id;
3800		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3801						lockdep_is_held(&mvm->mutex));
3802		if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
3803			return NULL;
3804
3805		return sta->addr;
3806	}
3807
3808
3809	return NULL;
3810}
3811
3812static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3813				 struct ieee80211_vif *vif,
3814				 struct ieee80211_sta *sta,
3815				 struct ieee80211_key_conf *keyconf,
3816				 u8 key_offset,
3817				 bool mcast)
3818{
 
3819	const u8 *addr;
3820	struct ieee80211_key_seq seq;
3821	u16 p1k[5];
3822	u32 sta_id;
3823	bool mfp = false;
3824
3825	if (sta) {
3826		struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3827
3828		sta_id = mvm_sta->deflink.sta_id;
3829		mfp = sta->mfp;
3830	} else if (vif->type == NL80211_IFTYPE_AP &&
3831		   !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3832		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3833
3834		sta_id = mvmvif->deflink.mcast_sta.sta_id;
3835	} else {
3836		IWL_ERR(mvm, "Failed to find station id\n");
3837		return -EINVAL;
3838	}
3839
3840	if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3841		addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3842		if (!addr) {
3843			IWL_ERR(mvm, "Failed to find mac address\n");
3844			return -EINVAL;
3845		}
3846
3847		/* get phase 1 key from mac80211 */
3848		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3849		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3850
3851		return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3852					    seq.tkip.iv32, p1k, 0, key_offset,
3853					    mfp);
 
 
 
 
 
 
 
 
 
3854	}
3855
3856	return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3857				    0, NULL, 0, key_offset, mfp);
3858}
3859
3860int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3861			struct ieee80211_vif *vif,
3862			struct ieee80211_sta *sta,
3863			struct ieee80211_key_conf *keyconf,
3864			u8 key_offset)
3865{
3866	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3867	struct iwl_mvm_sta *mvm_sta;
3868	u8 sta_id = IWL_INVALID_STA;
3869	int ret;
3870	static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3871
3872	lockdep_assert_held(&mvm->mutex);
3873
3874	if (vif->type != NL80211_IFTYPE_AP ||
3875	    keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3876		/* Get the station id from the mvm local station table */
3877		mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3878		if (!mvm_sta) {
3879			IWL_ERR(mvm, "Failed to find station\n");
3880			return -EINVAL;
3881		}
3882		sta_id = mvm_sta->deflink.sta_id;
3883
3884		/*
3885		 * It is possible that the 'sta' parameter is NULL, and thus
3886		 * there is a need to retrieve the sta from the local station
3887		 * table.
3888		 */
3889		if (!sta) {
3890			sta = rcu_dereference_protected(
3891				mvm->fw_id_to_mac_id[sta_id],
3892				lockdep_is_held(&mvm->mutex));
3893			if (IS_ERR_OR_NULL(sta)) {
3894				IWL_ERR(mvm, "Invalid station id\n");
3895				return -EINVAL;
3896			}
3897		}
3898
3899		if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3900			return -EINVAL;
3901	} else {
3902		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3903
3904		sta_id = mvmvif->deflink.mcast_sta.sta_id;
3905	}
3906
3907	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3908	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3909	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3910		ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3911		goto end;
3912	}
3913
3914	/* If the key_offset is not pre-assigned, we need to find a
3915	 * new offset to use.  In normal cases, the offset is not
3916	 * pre-assigned, but during HW_RESTART we want to reuse the
3917	 * same indices, so we pass them when this function is called.
3918	 *
3919	 * In D3 entry, we need to hardcoded the indices (because the
3920	 * firmware hardcodes the PTK offset to 0).  In this case, we
3921	 * need to make sure we don't overwrite the hw_key_idx in the
3922	 * keyconf structure, because otherwise we cannot configure
3923	 * the original ones back when resuming.
3924	 */
3925	if (key_offset == STA_KEY_IDX_INVALID) {
3926		key_offset  = iwl_mvm_set_fw_key_idx(mvm);
3927		if (key_offset == STA_KEY_IDX_INVALID)
3928			return -ENOSPC;
3929		keyconf->hw_key_idx = key_offset;
3930	}
3931
3932	ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3933	if (ret)
3934		goto end;
3935
3936	/*
3937	 * For WEP, the same key is used for multicast and unicast. Upload it
3938	 * again, using the same key offset, and now pointing the other one
3939	 * to the same key slot (offset).
3940	 * If this fails, remove the original as well.
3941	 */
3942	if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3943	     keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3944	    sta) {
3945		ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3946					    key_offset, !mcast);
3947		if (ret) {
3948			__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3949			goto end;
3950		}
3951	}
3952
3953	__set_bit(key_offset, mvm->fw_key_table);
3954
3955end:
3956	IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3957		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3958		      sta ? sta->addr : zero_addr, ret);
3959	return ret;
3960}
3961
3962int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3963			   struct ieee80211_vif *vif,
3964			   struct ieee80211_sta *sta,
3965			   struct ieee80211_key_conf *keyconf)
3966{
3967	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3968	struct iwl_mvm_sta *mvm_sta;
3969	u8 sta_id = IWL_INVALID_STA;
3970	int ret, i;
3971
3972	lockdep_assert_held(&mvm->mutex);
3973
3974	/* Get the station from the mvm local station table */
3975	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3976	if (mvm_sta)
3977		sta_id = mvm_sta->deflink.sta_id;
3978	else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3979		sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id;
3980
3981
3982	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3983		      keyconf->keyidx, sta_id);
3984
3985	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3986	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3987	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3988		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3989
3990	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3991		IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3992			keyconf->hw_key_idx);
3993		return -ENOENT;
3994	}
3995
3996	/* track which key was deleted last */
3997	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3998		if (mvm->fw_key_deleted[i] < U8_MAX)
3999			mvm->fw_key_deleted[i]++;
4000	}
4001	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
4002
4003	if (sta && !mvm_sta) {
4004		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
4005		return 0;
4006	}
4007
4008	ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
4009	if (ret)
4010		return ret;
4011
4012	/* delete WEP key twice to get rid of (now useless) offset */
4013	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
4014	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
4015		ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
4016
4017	return ret;
4018}
4019
4020void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
4021			     struct ieee80211_vif *vif,
4022			     struct ieee80211_key_conf *keyconf,
4023			     struct ieee80211_sta *sta, u32 iv32,
4024			     u16 *phase1key)
4025{
4026	struct iwl_mvm_sta *mvm_sta;
4027	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
4028	bool mfp = sta ? sta->mfp : false;
4029
4030	rcu_read_lock();
4031
4032	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
4033	if (WARN_ON_ONCE(!mvm_sta))
4034		goto unlock;
4035	iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast,
4036			     iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
4037			     mfp);
4038
4039 unlock:
4040	rcu_read_unlock();
4041}
4042
4043void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
4044				struct ieee80211_sta *sta)
4045{
4046	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4047	struct iwl_mvm_add_sta_cmd cmd = {
4048		.add_modify = STA_MODE_MODIFY,
4049		.sta_id = mvmsta->deflink.sta_id,
4050		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
4051		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4052	};
4053	int ret;
4054
4055	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4056				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4057	if (ret)
4058		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4059}
4060
4061void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
4062				       struct ieee80211_sta *sta,
4063				       enum ieee80211_frame_release_type reason,
4064				       u16 cnt, u16 tids, bool more_data,
4065				       bool single_sta_queue)
4066{
4067	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4068	struct iwl_mvm_add_sta_cmd cmd = {
4069		.add_modify = STA_MODE_MODIFY,
4070		.sta_id = mvmsta->deflink.sta_id,
4071		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
4072		.sleep_tx_count = cpu_to_le16(cnt),
4073		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4074	};
4075	int tid, ret;
4076	unsigned long _tids = tids;
4077
4078	/* convert TIDs to ACs - we don't support TSPEC so that's OK
4079	 * Note that this field is reserved and unused by firmware not
4080	 * supporting GO uAPSD, so it's safe to always do this.
4081	 */
4082	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
4083		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
4084
4085	/* If we're releasing frames from aggregation or dqa queues then check
4086	 * if all the queues that we're releasing frames from, combined, have:
4087	 *  - more frames than the service period, in which case more_data
4088	 *    needs to be set
4089	 *  - fewer than 'cnt' frames, in which case we need to adjust the
4090	 *    firmware command (but do that unconditionally)
4091	 */
4092	if (single_sta_queue) {
4093		int remaining = cnt;
4094		int sleep_tx_count;
4095
4096		spin_lock_bh(&mvmsta->lock);
4097		for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
4098			struct iwl_mvm_tid_data *tid_data;
4099			u16 n_queued;
4100
4101			tid_data = &mvmsta->tid_data[tid];
4102
4103			n_queued = iwl_mvm_tid_queued(mvm, tid_data);
4104			if (n_queued > remaining) {
4105				more_data = true;
4106				remaining = 0;
4107				break;
4108			}
4109			remaining -= n_queued;
4110		}
4111		sleep_tx_count = cnt - remaining;
4112		if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
4113			mvmsta->sleep_tx_count = sleep_tx_count;
4114		spin_unlock_bh(&mvmsta->lock);
4115
4116		cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
4117		if (WARN_ON(cnt - remaining == 0)) {
4118			ieee80211_sta_eosp(sta);
4119			return;
4120		}
4121	}
4122
4123	/* Note: this is ignored by firmware not supporting GO uAPSD */
4124	if (more_data)
4125		cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
4126
4127	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
4128		mvmsta->next_status_eosp = true;
4129		cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
4130	} else {
4131		cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
4132	}
4133
4134	/* block the Tx queues until the FW updated the sleep Tx count */
 
 
4135	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
4136				   CMD_ASYNC | CMD_BLOCK_TXQS,
4137				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4138	if (ret)
4139		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4140}
4141
4142void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
4143			   struct iwl_rx_cmd_buffer *rxb)
4144{
4145	struct iwl_rx_packet *pkt = rxb_addr(rxb);
4146	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
4147	struct ieee80211_sta *sta;
4148	u32 sta_id = le32_to_cpu(notif->sta_id);
4149
4150	if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
4151		return;
4152
4153	rcu_read_lock();
4154	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
4155	if (!IS_ERR_OR_NULL(sta))
4156		ieee80211_sta_eosp(sta);
4157	rcu_read_unlock();
4158}
4159
4160void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
4161				   struct iwl_mvm_sta *mvmsta,
4162				   bool disable)
4163{
4164	struct iwl_mvm_add_sta_cmd cmd = {
4165		.add_modify = STA_MODE_MODIFY,
4166		.sta_id = mvmsta->deflink.sta_id,
4167		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4168		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4169		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4170	};
4171	int ret;
4172
4173	if (mvm->mld_api_is_used) {
4174		if (!iwl_mvm_has_no_host_disable_tx(mvm))
4175			iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);
4176		return;
4177	}
4178
4179	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4180				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4181	if (ret)
4182		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4183}
4184
4185void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
4186				      struct ieee80211_sta *sta,
4187				      bool disable)
4188{
4189	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4190
4191	if (mvm->mld_api_is_used) {
4192		if (!iwl_mvm_has_no_host_disable_tx(mvm))
4193			iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);
4194		return;
4195	}
4196
4197	spin_lock_bh(&mvm_sta->lock);
4198
4199	if (mvm_sta->disable_tx == disable) {
4200		spin_unlock_bh(&mvm_sta->lock);
4201		return;
4202	}
4203
4204	mvm_sta->disable_tx = disable;
4205
4206	/*
4207	 * If sta PS state is handled by mac80211, tell it to start/stop
4208	 * queuing tx for this station.
4209	 */
4210	if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
4211		ieee80211_sta_block_awake(mvm->hw, sta, disable);
4212
4213	iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
4214
4215	spin_unlock_bh(&mvm_sta->lock);
4216}
4217
4218static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
4219					      struct iwl_mvm_vif *mvmvif,
4220					      struct iwl_mvm_int_sta *sta,
4221					      bool disable)
4222{
4223	u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
4224	struct iwl_mvm_add_sta_cmd cmd = {
4225		.add_modify = STA_MODE_MODIFY,
4226		.sta_id = sta->sta_id,
4227		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4228		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4229		.mac_id_n_color = cpu_to_le32(id),
4230	};
4231	int ret;
4232
4233	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4234				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4235	if (ret)
4236		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4237}
4238
4239void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4240				       struct iwl_mvm_vif *mvmvif,
4241				       bool disable)
4242{
4243	struct ieee80211_sta *sta;
4244	struct iwl_mvm_sta *mvm_sta;
4245	int i;
4246
4247	if (mvm->mld_api_is_used) {
4248		if (!iwl_mvm_has_no_host_disable_tx(mvm))
4249			iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif,
4250							      disable);
4251		return;
4252	}
4253
4254	rcu_read_lock();
4255
4256	/* Block/unblock all the stations of the given mvmvif */
4257	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
4258		sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
 
4259		if (IS_ERR_OR_NULL(sta))
4260			continue;
4261
4262		mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4263		if (mvm_sta->mac_id_n_color !=
4264		    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4265			continue;
4266
4267		iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4268	}
4269
4270	rcu_read_unlock();
4271
4272	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4273		return;
4274
4275	/* Need to block/unblock also multicast station */
4276	if (mvmvif->deflink.mcast_sta.sta_id != IWL_INVALID_STA)
4277		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4278						  &mvmvif->deflink.mcast_sta,
4279						  disable);
4280
4281	/*
4282	 * Only unblock the broadcast station (FW blocks it for immediate
4283	 * quiet, not the driver)
4284	 */
4285	if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_INVALID_STA)
4286		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4287						  &mvmvif->deflink.bcast_sta,
4288						  disable);
4289}
4290
4291void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4292{
4293	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4294	struct iwl_mvm_sta *mvmsta;
4295
4296	rcu_read_lock();
4297
4298	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id);
4299
4300	if (mvmsta)
4301		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4302
4303	rcu_read_unlock();
4304}
4305
4306u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4307{
4308	u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4309
4310	/*
4311	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4312	 * to align the wrap around of ssn so we compare relevant values.
4313	 */
4314	if (mvm->trans->trans_cfg->gen2)
4315		sn &= 0xff;
4316
4317	return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4318}
4319
4320int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4321			 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
4322			 u8 *key, u32 key_len,
4323			 struct ieee80211_key_conf *keyconf)
4324{
4325	int ret;
4326	u16 queue;
4327	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4328	unsigned int wdg_timeout =
4329		iwl_mvm_get_wd_timeout(mvm, vif);
4330	bool mld = iwl_mvm_has_mld_api(mvm->fw);
4331	u32 type = IWL_STA_LINK;
4332
4333	if (mld)
4334		type = STATION_TYPE_PEER;
4335
4336	ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
4337				       NL80211_IFTYPE_UNSPECIFIED, type);
4338	if (ret)
4339		return ret;
4340
4341	if (mld)
4342		ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr,
4343							 mvmvif->deflink.fw_link_id,
4344							 &queue,
4345							 IWL_MAX_TID_COUNT,
4346							 &wdg_timeout);
4347	else
4348		ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id,
4349						     mvmvif->color, addr, sta,
4350						     &queue,
4351						     IWL_MVM_TX_FIFO_BE);
4352	if (ret)
4353		goto out;
4354
4355	keyconf->cipher = cipher;
4356	memcpy(keyconf->key, key, key_len);
4357	keyconf->keylen = key_len;
4358	keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE;
4359
4360	if (mld) {
4361		/* The MFP flag is set according to the station mfp field. Since
4362		 * we don't have a station, set it manually.
4363		 */
4364		u32 key_flags =
4365			iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
4366			IWL_SEC_KEY_FLAG_MFP;
4367		u32 sta_mask = BIT(sta->sta_id);
4368
4369		ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf);
4370	} else {
4371		ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4372					   0, NULL, 0, 0, true);
4373	}
4374
4375out:
4376	if (ret)
4377		iwl_mvm_dealloc_int_sta(mvm, sta);
4378	return ret;
4379}
4380
4381void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4382				   struct ieee80211_vif *vif,
4383				   u32 id)
4384{
4385	struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4386		.id = cpu_to_le32(id),
4387	};
4388	int ret;
4389
4390	ret = iwl_mvm_send_cmd_pdu(mvm,
4391				   WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4392				   CMD_ASYNC,
4393				   sizeof(cancel_channel_switch_cmd),
4394				   &cancel_channel_switch_cmd);
4395	if (ret)
4396		IWL_ERR(mvm, "Failed to cancel the channel switch\n");
4397}
4398
4399static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,
4400					   u8 fw_sta_id)
4401{
4402	struct ieee80211_link_sta *link_sta =
4403		rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);
4404	struct iwl_mvm_vif_link_info *link;
4405
4406	if (WARN_ON_ONCE(!link_sta))
4407		return -EINVAL;
4408
4409	link = mvmvif->link[link_sta->link_id];
4410
4411	if (WARN_ON_ONCE(!link))
4412		return -EINVAL;
4413
4414	return link->fw_link_id;
4415}
4416
4417#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
4418
4419void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
4420			bool tx, int queue)
4421{
4422	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
4423	struct iwl_mvm *mvm = mvmvif->mvm;
4424	struct iwl_mvm_tpt_counter *queue_counter;
4425	struct iwl_mvm_mpdu_counter *link_counter;
4426	u32 total_mpdus = 0;
4427	int fw_link_id;
4428
4429	/* Count only for a BSS sta, and only when EMLSR is possible */
4430	if (!mvm_sta->mpdu_counters)
4431		return;
4432
4433	/* Map sta id to link id */
4434	fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);
4435	if (fw_link_id < 0)
4436		return;
4437
4438	queue_counter = &mvm_sta->mpdu_counters[queue];
4439	link_counter = &queue_counter->per_link[fw_link_id];
4440
4441	spin_lock_bh(&queue_counter->lock);
4442
4443	if (tx)
4444		link_counter->tx += count;
4445	else
4446		link_counter->rx += count;
4447
4448	/*
4449	 * When not in EMLSR, the window and the decision to enter EMLSR are
4450	 * handled during counting, when in EMLSR - in the statistics flow
4451	 */
4452	if (mvmvif->esr_active)
4453		goto out;
4454
4455	if (time_is_before_jiffies(queue_counter->window_start +
4456					IWL_MVM_TPT_COUNT_WINDOW)) {
4457		memset(queue_counter->per_link, 0,
4458		       sizeof(queue_counter->per_link));
4459		queue_counter->window_start = jiffies;
4460
4461		IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
4462	}
4463
4464	for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++)
4465		total_mpdus += tx ? queue_counter->per_link[i].tx :
4466				    queue_counter->per_link[i].rx;
4467
4468	if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)
4469		wiphy_work_queue(mvmvif->mvm->hw->wiphy,
4470				 &mvmvif->unblock_esr_tpt_wk);
4471
4472out:
4473	spin_unlock_bh(&queue_counter->lock);
4474}
v4.17
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  24 * USA
  25 *
  26 * The full GNU General Public License is included in this distribution
  27 * in the file called COPYING.
  28 *
  29 * Contact Information:
  30 *  Intel Linux Wireless <linuxwifi@intel.com>
  31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  32 *
  33 * BSD LICENSE
  34 *
  35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  38 * All rights reserved.
  39 *
  40 * Redistribution and use in source and binary forms, with or without
  41 * modification, are permitted provided that the following conditions
  42 * are met:
  43 *
  44 *  * Redistributions of source code must retain the above copyright
  45 *    notice, this list of conditions and the following disclaimer.
  46 *  * Redistributions in binary form must reproduce the above copyright
  47 *    notice, this list of conditions and the following disclaimer in
  48 *    the documentation and/or other materials provided with the
  49 *    distribution.
  50 *  * Neither the name Intel Corporation nor the names of its
  51 *    contributors may be used to endorse or promote products derived
  52 *    from this software without specific prior written permission.
  53 *
  54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  65 *
  66 *****************************************************************************/
  67#include <net/mac80211.h>
  68
  69#include "mvm.h"
  70#include "sta.h"
  71#include "rs.h"
  72
  73/*
  74 * New version of ADD_STA_sta command added new fields at the end of the
  75 * structure, so sending the size of the relevant API's structure is enough to
  76 * support both API versions.
  77 */
  78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
  79{
  80	if (iwl_mvm_has_new_rx_api(mvm) ||
  81	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
  82		return sizeof(struct iwl_mvm_add_sta_cmd);
  83	else
  84		return sizeof(struct iwl_mvm_add_sta_cmd_v7);
  85}
  86
  87static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
  88				    enum nl80211_iftype iftype)
  89{
  90	int sta_id;
  91	u32 reserved_ids = 0;
  92
  93	BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
  94	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
  95
  96	lockdep_assert_held(&mvm->mutex);
  97
  98	/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
  99	if (iftype != NL80211_IFTYPE_STATION)
 100		reserved_ids = BIT(0);
 101
 102	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
 103	for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
 104		if (BIT(sta_id) & reserved_ids)
 105			continue;
 106
 107		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 108					       lockdep_is_held(&mvm->mutex)))
 109			return sta_id;
 110	}
 111	return IWL_MVM_INVALID_STA;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 112}
 113
 114/* send station add/update command to firmware */
 115int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 116			   bool update, unsigned int flags)
 117{
 118	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 119	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
 120		.sta_id = mvm_sta->sta_id,
 121		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
 122		.add_modify = update ? 1 : 0,
 123		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
 124						 STA_FLG_MIMO_EN_MSK |
 125						 STA_FLG_RTS_MIMO_PROT),
 126		.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
 127	};
 128	int ret;
 129	u32 status;
 130	u32 agg_size = 0, mpdu_dens = 0;
 131
 132	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
 133		add_sta_cmd.station_type = mvm_sta->sta_type;
 134
 135	if (!update || (flags & STA_MODIFY_QUEUES)) {
 136		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
 137
 138		if (!iwl_mvm_has_new_tx_api(mvm)) {
 139			add_sta_cmd.tfd_queue_msk =
 140				cpu_to_le32(mvm_sta->tfd_queue_msk);
 141
 142			if (flags & STA_MODIFY_QUEUES)
 143				add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
 144		} else {
 145			WARN_ON(flags & STA_MODIFY_QUEUES);
 146		}
 147	}
 148
 149	switch (sta->bandwidth) {
 
 150	case IEEE80211_STA_RX_BW_160:
 151		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
 152		/* fall through */
 153	case IEEE80211_STA_RX_BW_80:
 154		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
 155		/* fall through */
 156	case IEEE80211_STA_RX_BW_40:
 157		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
 158		/* fall through */
 159	case IEEE80211_STA_RX_BW_20:
 160		if (sta->ht_cap.ht_supported)
 161			add_sta_cmd.station_flags |=
 162				cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
 163		break;
 164	}
 165
 166	switch (sta->rx_nss) {
 167	case 1:
 168		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
 169		break;
 170	case 2:
 171		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
 172		break;
 173	case 3 ... 8:
 174		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
 175		break;
 176	}
 177
 178	switch (sta->smps_mode) {
 179	case IEEE80211_SMPS_AUTOMATIC:
 180	case IEEE80211_SMPS_NUM_MODES:
 181		WARN_ON(1);
 182		break;
 183	case IEEE80211_SMPS_STATIC:
 184		/* override NSS */
 185		add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
 186		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
 187		break;
 188	case IEEE80211_SMPS_DYNAMIC:
 189		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
 190		break;
 191	case IEEE80211_SMPS_OFF:
 192		/* nothing */
 193		break;
 194	}
 195
 196	if (sta->ht_cap.ht_supported) {
 
 197		add_sta_cmd.station_flags_msk |=
 198			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
 199				    STA_FLG_AGG_MPDU_DENS_MSK);
 200
 201		mpdu_dens = sta->ht_cap.ampdu_density;
 202	}
 203
 204	if (sta->vht_cap.vht_supported) {
 205		agg_size = sta->vht_cap.cap &
 206			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
 207		agg_size >>=
 208			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
 209	} else if (sta->ht_cap.ht_supported) {
 210		agg_size = sta->ht_cap.ampdu_factor;
 211	}
 212
 213	add_sta_cmd.station_flags |=
 214		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
 215	add_sta_cmd.station_flags |=
 216		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 217	if (mvm_sta->associated)
 
 218		add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
 219
 220	if (sta->wme) {
 221		add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
 222
 223		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
 224			add_sta_cmd.uapsd_acs |= BIT(AC_BK);
 225		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
 226			add_sta_cmd.uapsd_acs |= BIT(AC_BE);
 227		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
 228			add_sta_cmd.uapsd_acs |= BIT(AC_VI);
 229		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
 230			add_sta_cmd.uapsd_acs |= BIT(AC_VO);
 231		add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
 232		add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
 233	}
 234
 235	status = ADD_STA_SUCCESS;
 236	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 237					  iwl_mvm_add_sta_cmd_size(mvm),
 238					  &add_sta_cmd, &status);
 239	if (ret)
 240		return ret;
 241
 242	switch (status & IWL_ADD_STA_STATUS_MASK) {
 243	case ADD_STA_SUCCESS:
 244		IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
 245		break;
 246	default:
 247		ret = -EIO;
 248		IWL_ERR(mvm, "ADD_STA failed\n");
 249		break;
 250	}
 251
 252	return ret;
 253}
 254
 255static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
 256{
 257	struct iwl_mvm_baid_data *data =
 258		from_timer(data, t, session_timer);
 259	struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
 260	struct iwl_mvm_baid_data *ba_data;
 261	struct ieee80211_sta *sta;
 262	struct iwl_mvm_sta *mvm_sta;
 263	unsigned long timeout;
 
 264
 265	rcu_read_lock();
 266
 267	ba_data = rcu_dereference(*rcu_ptr);
 268
 269	if (WARN_ON(!ba_data))
 270		goto unlock;
 271
 272	if (!ba_data->timeout)
 273		goto unlock;
 274
 275	timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
 276	if (time_is_after_jiffies(timeout)) {
 277		mod_timer(&ba_data->session_timer, timeout);
 278		goto unlock;
 279	}
 280
 281	/* Timer expired */
 282	sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
 
 283
 284	/*
 285	 * sta should be valid unless the following happens:
 286	 * The firmware asserts which triggers a reconfig flow, but
 287	 * the reconfig fails before we set the pointer to sta into
 288	 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
 289	 * A-MDPU and hence the timer continues to run. Then, the
 290	 * timer expires and sta is NULL.
 291	 */
 292	if (!sta)
 293		goto unlock;
 294
 295	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 296	ieee80211_rx_ba_timer_expired(mvm_sta->vif,
 297				      sta->addr, ba_data->tid);
 298unlock:
 299	rcu_read_unlock();
 300}
 301
 302/* Disable aggregations for a bitmap of TIDs for a given station */
 303static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
 304					unsigned long disable_agg_tids,
 305					bool remove_queue)
 306{
 307	struct iwl_mvm_add_sta_cmd cmd = {};
 308	struct ieee80211_sta *sta;
 309	struct iwl_mvm_sta *mvmsta;
 310	u32 status;
 311	u8 sta_id;
 312	int ret;
 313
 314	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 315		return -EINVAL;
 316
 317	spin_lock_bh(&mvm->queue_info_lock);
 318	sta_id = mvm->queue_info[queue].ra_sta_id;
 319	spin_unlock_bh(&mvm->queue_info_lock);
 320
 321	rcu_read_lock();
 322
 323	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 324
 325	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 326		rcu_read_unlock();
 327		return -EINVAL;
 328	}
 329
 330	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 331
 332	mvmsta->tid_disable_agg |= disable_agg_tids;
 333
 334	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
 335	cmd.sta_id = mvmsta->sta_id;
 336	cmd.add_modify = STA_MODE_MODIFY;
 337	cmd.modify_mask = STA_MODIFY_QUEUES;
 338	if (disable_agg_tids)
 339		cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
 340	if (remove_queue)
 341		cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
 342	cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
 343	cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
 344
 345	rcu_read_unlock();
 346
 347	/* Notify FW of queue removal from the STA queues */
 348	status = ADD_STA_SUCCESS;
 349	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 350					  iwl_mvm_add_sta_cmd_size(mvm),
 351					  &cmd, &status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 352
 
 
 
 353	return ret;
 354}
 355
 356static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
 357{
 358	struct ieee80211_sta *sta;
 359	struct iwl_mvm_sta *mvmsta;
 360	unsigned long tid_bitmap;
 361	unsigned long agg_tids = 0;
 362	u8 sta_id;
 363	int tid;
 364
 365	lockdep_assert_held(&mvm->mutex);
 366
 367	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 368		return -EINVAL;
 369
 370	spin_lock_bh(&mvm->queue_info_lock);
 371	sta_id = mvm->queue_info[queue].ra_sta_id;
 372	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 373	spin_unlock_bh(&mvm->queue_info_lock);
 374
 375	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 376					lockdep_is_held(&mvm->mutex));
 377
 378	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
 379		return -EINVAL;
 380
 381	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 382
 383	spin_lock_bh(&mvmsta->lock);
 384	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 385		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 386			agg_tids |= BIT(tid);
 387	}
 388	spin_unlock_bh(&mvmsta->lock);
 389
 390	return agg_tids;
 391}
 392
 393/*
 394 * Remove a queue from a station's resources.
 395 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
 396 * doesn't disable the queue
 397 */
 398static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
 399{
 400	struct ieee80211_sta *sta;
 401	struct iwl_mvm_sta *mvmsta;
 402	unsigned long tid_bitmap;
 403	unsigned long disable_agg_tids = 0;
 404	u8 sta_id;
 405	int tid;
 406
 407	lockdep_assert_held(&mvm->mutex);
 408
 409	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 410		return -EINVAL;
 411
 412	spin_lock_bh(&mvm->queue_info_lock);
 413	sta_id = mvm->queue_info[queue].ra_sta_id;
 414	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 415	spin_unlock_bh(&mvm->queue_info_lock);
 416
 417	rcu_read_lock();
 418
 419	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 420
 421	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 422		rcu_read_unlock();
 423		return 0;
 424	}
 425
 426	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 427
 428	spin_lock_bh(&mvmsta->lock);
 429	/* Unmap MAC queues and TIDs from this queue */
 430	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 
 
 
 431		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 432			disable_agg_tids |= BIT(tid);
 433		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
 
 
 
 
 
 
 434	}
 435
 436	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
 437	spin_unlock_bh(&mvmsta->lock);
 438
 439	rcu_read_unlock();
 440
 
 
 
 
 
 
 
 
 
 
 441	return disable_agg_tids;
 442}
 443
 444static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
 445				       bool same_sta)
 
 446{
 447	struct iwl_mvm_sta *mvmsta;
 448	u8 txq_curr_ac, sta_id, tid;
 449	unsigned long disable_agg_tids = 0;
 
 
 450	int ret;
 451
 452	lockdep_assert_held(&mvm->mutex);
 453
 454	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 455		return -EINVAL;
 456
 457	spin_lock_bh(&mvm->queue_info_lock);
 458	txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
 459	sta_id = mvm->queue_info[queue].ra_sta_id;
 460	tid = mvm->queue_info[queue].txq_tid;
 461	spin_unlock_bh(&mvm->queue_info_lock);
 
 462
 463	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
 464	if (WARN_ON(!mvmsta))
 465		return -EINVAL;
 466
 467	disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
 468	/* Disable the queue */
 469	if (disable_agg_tids)
 470		iwl_mvm_invalidate_sta_queue(mvm, queue,
 471					     disable_agg_tids, false);
 472
 473	ret = iwl_mvm_disable_txq(mvm, queue,
 474				  mvmsta->vif->hw_queue[txq_curr_ac],
 475				  tid, 0);
 476	if (ret) {
 477		/* Re-mark the inactive queue as inactive */
 478		spin_lock_bh(&mvm->queue_info_lock);
 479		mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
 480		spin_unlock_bh(&mvm->queue_info_lock);
 481		IWL_ERR(mvm,
 482			"Failed to free inactive queue %d (ret=%d)\n",
 483			queue, ret);
 484
 485		return ret;
 486	}
 487
 488	/* If TXQ is allocated to another STA, update removal in FW */
 489	if (!same_sta)
 490		iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
 491
 492	return 0;
 493}
 494
 495static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
 496				    unsigned long tfd_queue_mask, u8 ac)
 497{
 498	int queue = 0;
 499	u8 ac_to_queue[IEEE80211_NUM_ACS];
 500	int i;
 501
 502	lockdep_assert_held(&mvm->queue_info_lock);
 
 
 
 
 
 503	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 504		return -EINVAL;
 505
 506	memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
 507
 508	/* See what ACs the existing queues for this STA have */
 509	for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
 510		/* Only DATA queues can be shared */
 511		if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
 512		    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
 513			continue;
 514
 515		/* Don't try and take queues being reconfigured */
 516		if (mvm->queue_info[queue].status ==
 517		    IWL_MVM_QUEUE_RECONFIGURING)
 518			continue;
 519
 520		ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
 521	}
 522
 523	/*
 524	 * The queue to share is chosen only from DATA queues as follows (in
 525	 * descending priority):
 526	 * 1. An AC_BE queue
 527	 * 2. Same AC queue
 528	 * 3. Highest AC queue that is lower than new AC
 529	 * 4. Any existing AC (there always is at least 1 DATA queue)
 530	 */
 531
 532	/* Priority 1: An AC_BE queue */
 533	if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
 534		queue = ac_to_queue[IEEE80211_AC_BE];
 535	/* Priority 2: Same AC queue */
 536	else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
 537		queue = ac_to_queue[ac];
 538	/* Priority 3a: If new AC is VO and VI exists - use VI */
 539	else if (ac == IEEE80211_AC_VO &&
 540		 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 541		queue = ac_to_queue[IEEE80211_AC_VI];
 542	/* Priority 3b: No BE so only AC less than the new one is BK */
 543	else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
 544		queue = ac_to_queue[IEEE80211_AC_BK];
 545	/* Priority 4a: No BE nor BK - use VI if exists */
 546	else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 547		queue = ac_to_queue[IEEE80211_AC_VI];
 548	/* Priority 4b: No BE, BK nor VI - use VO if exists */
 549	else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
 550		queue = ac_to_queue[IEEE80211_AC_VO];
 551
 552	/* Make sure queue found (or not) is legal */
 553	if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
 554	    !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
 555	    (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
 556		IWL_ERR(mvm, "No DATA queues available to share\n");
 557		return -ENOSPC;
 558	}
 559
 560	/* Make sure the queue isn't in the middle of being reconfigured */
 561	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
 562		IWL_ERR(mvm,
 563			"TXQ %d is in the middle of re-config - try again\n",
 564			queue);
 565		return -EBUSY;
 566	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 567
 568	return queue;
 569}
 570
 571/*
 572 * If a given queue has a higher AC than the TID stream that is being compared
 573 * to, the queue needs to be redirected to the lower AC. This function does that
 574 * in such a case, otherwise - if no redirection required - it does nothing,
 575 * unless the %force param is true.
 576 */
 577int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
 578			       int ac, int ssn, unsigned int wdg_timeout,
 579			       bool force)
 580{
 581	struct iwl_scd_txq_cfg_cmd cmd = {
 582		.scd_queue = queue,
 583		.action = SCD_CFG_DISABLE_QUEUE,
 584	};
 585	bool shared_queue;
 586	unsigned long mq;
 587	int ret;
 588
 589	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 590		return -EINVAL;
 591
 592	/*
 593	 * If the AC is lower than current one - FIFO needs to be redirected to
 594	 * the lowest one of the streams in the queue. Check if this is needed
 595	 * here.
 596	 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
 597	 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
 598	 * we need to check if the numerical value of X is LARGER than of Y.
 599	 */
 600	spin_lock_bh(&mvm->queue_info_lock);
 601	if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
 602		spin_unlock_bh(&mvm->queue_info_lock);
 603
 604		IWL_DEBUG_TX_QUEUES(mvm,
 605				    "No redirection needed on TXQ #%d\n",
 606				    queue);
 607		return 0;
 608	}
 609
 610	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 611	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
 612	cmd.tid = mvm->queue_info[queue].txq_tid;
 613	mq = mvm->hw_queue_to_mac80211[queue];
 614	shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
 615	spin_unlock_bh(&mvm->queue_info_lock);
 616
 617	IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
 618			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
 619
 620	/* Stop MAC queues and wait for this queue to empty */
 621	iwl_mvm_stop_mac_queues(mvm, mq);
 
 622	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
 623	if (ret) {
 624		IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
 625			queue);
 626		ret = -EIO;
 627		goto out;
 628	}
 629
 630	/* Before redirecting the queue we need to de-activate it */
 631	iwl_trans_txq_disable(mvm->trans, queue, false);
 632	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 633	if (ret)
 634		IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
 635			ret);
 636
 637	/* Make sure the SCD wrptr is correctly set before reconfiguring */
 638	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
 639
 640	/* Update the TID "owner" of the queue */
 641	spin_lock_bh(&mvm->queue_info_lock);
 642	mvm->queue_info[queue].txq_tid = tid;
 643	spin_unlock_bh(&mvm->queue_info_lock);
 644
 645	/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
 646
 647	/* Redirect to lower AC */
 648	iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
 649			     cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
 650
 651	/* Update AC marking of the queue */
 652	spin_lock_bh(&mvm->queue_info_lock);
 653	mvm->queue_info[queue].mac80211_ac = ac;
 654	spin_unlock_bh(&mvm->queue_info_lock);
 655
 656	/*
 657	 * Mark queue as shared in transport if shared
 658	 * Note this has to be done after queue enablement because enablement
 659	 * can also set this value, and there is no indication there to shared
 660	 * queues
 661	 */
 662	if (shared_queue)
 663		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
 664
 665out:
 666	/* Continue using the MAC queues */
 667	iwl_mvm_start_mac_queues(mvm, mq);
 668
 669	return ret;
 670}
 671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 672static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
 673					struct ieee80211_sta *sta, u8 ac,
 674					int tid)
 675{
 676	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
 
 677	unsigned int wdg_timeout =
 678		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
 679	u8 mac_queue = mvmsta->vif->hw_queue[ac];
 680	int queue = -1;
 681
 682	lockdep_assert_held(&mvm->mutex);
 683
 684	IWL_DEBUG_TX_QUEUES(mvm,
 685			    "Allocating queue for sta %d on tid %d\n",
 686			    mvmsta->sta_id, tid);
 687	queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
 688					wdg_timeout);
 689	if (queue < 0)
 690		return queue;
 691
 
 
 
 
 692	IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
 693
 694	spin_lock_bh(&mvmsta->lock);
 695	mvmsta->tid_data[tid].txq_id = queue;
 696	mvmsta->tid_data[tid].is_tid_active = true;
 697	spin_unlock_bh(&mvmsta->lock);
 698
 699	return 0;
 700}
 701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 702static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
 703				   struct ieee80211_sta *sta, u8 ac, int tid,
 704				   struct ieee80211_hdr *hdr)
 705{
 706	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 707	struct iwl_trans_txq_scd_cfg cfg = {
 708		.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
 709		.sta_id = mvmsta->sta_id,
 710		.tid = tid,
 711		.frame_limit = IWL_FRAME_LIMIT,
 712	};
 713	unsigned int wdg_timeout =
 714		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
 715	u8 mac_queue = mvmsta->vif->hw_queue[ac];
 716	int queue = -1;
 717	bool using_inactive_queue = false, same_sta = false;
 718	unsigned long disable_agg_tids = 0;
 719	enum iwl_mvm_agg_state queue_state;
 720	bool shared_queue = false, inc_ssn;
 721	int ssn;
 722	unsigned long tfd_queue_mask;
 723	int ret;
 724
 725	lockdep_assert_held(&mvm->mutex);
 726
 727	if (iwl_mvm_has_new_tx_api(mvm))
 728		return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
 729
 730	spin_lock_bh(&mvmsta->lock);
 731	tfd_queue_mask = mvmsta->tfd_queue_msk;
 
 732	spin_unlock_bh(&mvmsta->lock);
 733
 734	spin_lock_bh(&mvm->queue_info_lock);
 735
 736	/*
 737	 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
 738	 * exists
 739	 */
 740	if (!ieee80211_is_data_qos(hdr->frame_control) ||
 741	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
 742		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 743						IWL_MVM_DQA_MIN_MGMT_QUEUE,
 744						IWL_MVM_DQA_MAX_MGMT_QUEUE);
 745		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
 746			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
 747					    queue);
 748
 749		/* If no such queue is found, we'll use a DATA queue instead */
 750	}
 751
 752	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
 753	    (mvm->queue_info[mvmsta->reserved_queue].status ==
 754	     IWL_MVM_QUEUE_RESERVED ||
 755	     mvm->queue_info[mvmsta->reserved_queue].status ==
 756	     IWL_MVM_QUEUE_INACTIVE)) {
 757		queue = mvmsta->reserved_queue;
 758		mvm->queue_info[queue].reserved = true;
 759		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
 760	}
 761
 762	if (queue < 0)
 763		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 764						IWL_MVM_DQA_MIN_DATA_QUEUE,
 765						IWL_MVM_DQA_MAX_DATA_QUEUE);
 766
 767	/*
 768	 * Check if this queue is already allocated but inactive.
 769	 * In such a case, we'll need to first free this queue before enabling
 770	 * it again, so we'll mark it as reserved to make sure no new traffic
 771	 * arrives on it
 772	 */
 773	if (queue > 0 &&
 774	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
 775		mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
 776		using_inactive_queue = true;
 777		same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
 778		IWL_DEBUG_TX_QUEUES(mvm,
 779				    "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
 780				    queue, mvmsta->sta_id, tid);
 781	}
 782
 783	/* No free queue - we'll have to share */
 784	if (queue <= 0) {
 785		queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
 786		if (queue > 0) {
 787			shared_queue = true;
 788			mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
 789		}
 790	}
 791
 792	/*
 793	 * Mark TXQ as ready, even though it hasn't been fully configured yet,
 794	 * to make sure no one else takes it.
 795	 * This will allow avoiding re-acquiring the lock at the end of the
 796	 * configuration. On error we'll mark it back as free.
 797	 */
 798	if ((queue > 0) && !shared_queue)
 799		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
 800
 801	spin_unlock_bh(&mvm->queue_info_lock);
 802
 803	/* This shouldn't happen - out of queues */
 804	if (WARN_ON(queue <= 0)) {
 805		IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
 806			tid, cfg.sta_id);
 807		return queue;
 808	}
 809
 810	/*
 811	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
 812	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
 813	 * as aggregatable.
 814	 * Mark all DATA queues as allowing to be aggregated at some point
 815	 */
 816	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
 817			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
 818
 819	/*
 820	 * If this queue was previously inactive (idle) - we need to free it
 821	 * first
 822	 */
 823	if (using_inactive_queue) {
 824		ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
 825		if (ret)
 826			return ret;
 827	}
 828
 829	IWL_DEBUG_TX_QUEUES(mvm,
 830			    "Allocating %squeue #%d to sta %d on tid %d\n",
 831			    shared_queue ? "shared " : "", queue,
 832			    mvmsta->sta_id, tid);
 833
 834	if (shared_queue) {
 835		/* Disable any open aggs on this queue */
 836		disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
 837
 838		if (disable_agg_tids) {
 839			IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
 840					    queue);
 841			iwl_mvm_invalidate_sta_queue(mvm, queue,
 842						     disable_agg_tids, false);
 843		}
 844	}
 845
 846	ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
 847	inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
 848				     ssn, &cfg, wdg_timeout);
 849	if (inc_ssn) {
 850		ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
 851		le16_add_cpu(&hdr->seq_ctrl, 0x10);
 852	}
 853
 854	/*
 855	 * Mark queue as shared in transport if shared
 856	 * Note this has to be done after queue enablement because enablement
 857	 * can also set this value, and there is no indication there to shared
 858	 * queues
 859	 */
 860	if (shared_queue)
 861		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
 862
 863	spin_lock_bh(&mvmsta->lock);
 864	/*
 865	 * This looks racy, but it is not. We have only one packet for
 866	 * this ra/tid in our Tx path since we stop the Qdisc when we
 867	 * need to allocate a new TFD queue.
 868	 */
 869	if (inc_ssn)
 870		mvmsta->tid_data[tid].seq_number += 0x10;
 
 
 871	mvmsta->tid_data[tid].txq_id = queue;
 872	mvmsta->tid_data[tid].is_tid_active = true;
 873	mvmsta->tfd_queue_msk |= BIT(queue);
 874	queue_state = mvmsta->tid_data[tid].state;
 875
 876	if (mvmsta->reserved_queue == queue)
 877		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
 878	spin_unlock_bh(&mvmsta->lock);
 879
 880	if (!shared_queue) {
 881		ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
 882		if (ret)
 883			goto out_err;
 884
 885		/* If we need to re-enable aggregations... */
 886		if (queue_state == IWL_AGG_ON) {
 887			ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
 888			if (ret)
 889				goto out_err;
 890		}
 891	} else {
 892		/* Redirect queue, if needed */
 893		ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
 894						 wdg_timeout, false);
 
 895		if (ret)
 896			goto out_err;
 897	}
 898
 899	return 0;
 900
 901out_err:
 902	iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
 
 903
 904	return ret;
 905}
 906
 907static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
 
 908{
 909	struct iwl_scd_txq_cfg_cmd cmd = {
 910		.scd_queue = queue,
 911		.action = SCD_CFG_UPDATE_QUEUE_TID,
 912	};
 913	int tid;
 914	unsigned long tid_bitmap;
 915	int ret;
 916
 917	lockdep_assert_held(&mvm->mutex);
 918
 919	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 920		return;
 921
 922	spin_lock_bh(&mvm->queue_info_lock);
 923	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 924	spin_unlock_bh(&mvm->queue_info_lock);
 925
 926	if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
 927		return;
 928
 929	/* Find any TID for queue */
 930	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
 931	cmd.tid = tid;
 932	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 933
 934	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 935	if (ret) {
 936		IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
 937			queue, ret);
 938		return;
 939	}
 940
 941	spin_lock_bh(&mvm->queue_info_lock);
 942	mvm->queue_info[queue].txq_tid = tid;
 943	spin_unlock_bh(&mvm->queue_info_lock);
 944	IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
 945			    queue, tid);
 946}
 947
 948static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
 949{
 950	struct ieee80211_sta *sta;
 951	struct iwl_mvm_sta *mvmsta;
 952	u8 sta_id;
 953	int tid = -1;
 954	unsigned long tid_bitmap;
 955	unsigned int wdg_timeout;
 956	int ssn;
 957	int ret = true;
 958
 959	/* queue sharing is disabled on new TX path */
 960	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 961		return;
 962
 963	lockdep_assert_held(&mvm->mutex);
 964
 965	spin_lock_bh(&mvm->queue_info_lock);
 966	sta_id = mvm->queue_info[queue].ra_sta_id;
 967	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 968	spin_unlock_bh(&mvm->queue_info_lock);
 969
 970	/* Find TID for queue, and make sure it is the only one on the queue */
 971	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
 972	if (tid_bitmap != BIT(tid)) {
 973		IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
 974			queue, tid_bitmap);
 975		return;
 976	}
 977
 978	IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
 979			    tid);
 980
 981	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 982					lockdep_is_held(&mvm->mutex));
 983
 984	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
 985		return;
 986
 987	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 988	wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
 989
 990	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
 991
 992	ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
 993					 tid_to_mac80211_ac[tid], ssn,
 994					 wdg_timeout, true);
 995	if (ret) {
 996		IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
 997		return;
 998	}
 999
1000	/* If aggs should be turned back on - do it */
1001	if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1002		struct iwl_mvm_add_sta_cmd cmd = {0};
1003
1004		mvmsta->tid_disable_agg &= ~BIT(tid);
1005
1006		cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1007		cmd.sta_id = mvmsta->sta_id;
1008		cmd.add_modify = STA_MODE_MODIFY;
1009		cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1010		cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1011		cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1012
1013		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1014					   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1015		if (!ret) {
1016			IWL_DEBUG_TX_QUEUES(mvm,
1017					    "TXQ #%d is now aggregated again\n",
1018					    queue);
1019
1020			/* Mark queue intenally as aggregating again */
1021			iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1022		}
1023	}
1024
1025	spin_lock_bh(&mvm->queue_info_lock);
1026	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1027	spin_unlock_bh(&mvm->queue_info_lock);
1028}
1029
1030static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1031{
1032	if (tid == IWL_MAX_TID_COUNT)
1033		return IEEE80211_AC_VO; /* MGMT */
1034
1035	return tid_to_mac80211_ac[tid];
1036}
1037
1038static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1039				       struct ieee80211_sta *sta, int tid)
1040{
1041	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1042	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1043	struct sk_buff *skb;
1044	struct ieee80211_hdr *hdr;
1045	struct sk_buff_head deferred_tx;
1046	u8 mac_queue;
1047	bool no_queue = false; /* Marks if there is a problem with the queue */
1048	u8 ac;
1049
1050	lockdep_assert_held(&mvm->mutex);
1051
1052	skb = skb_peek(&tid_data->deferred_tx_frames);
1053	if (!skb)
1054		return;
1055	hdr = (void *)skb->data;
1056
1057	ac = iwl_mvm_tid_to_ac_queue(tid);
1058	mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1059
1060	if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
1061	    iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1062		IWL_ERR(mvm,
1063			"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1064			mvmsta->sta_id, tid);
1065
1066		/*
1067		 * Mark queue as problematic so later the deferred traffic is
1068		 * freed, as we can do nothing with it
1069		 */
1070		no_queue = true;
1071	}
1072
1073	__skb_queue_head_init(&deferred_tx);
1074
1075	/* Disable bottom-halves when entering TX path */
1076	local_bh_disable();
1077	spin_lock(&mvmsta->lock);
1078	skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1079	mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
1080	spin_unlock(&mvmsta->lock);
1081
1082	while ((skb = __skb_dequeue(&deferred_tx)))
1083		if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1084			ieee80211_free_txskb(mvm->hw, skb);
1085	local_bh_enable();
1086
1087	/* Wake queue */
1088	iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1089}
1090
1091void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1092{
1093	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1094					   add_stream_wk);
1095	struct ieee80211_sta *sta;
1096	struct iwl_mvm_sta *mvmsta;
1097	unsigned long deferred_tid_traffic;
1098	int queue, sta_id, tid;
1099
1100	/* Check inactivity of queues */
1101	iwl_mvm_inactivity_check(mvm);
1102
1103	mutex_lock(&mvm->mutex);
1104
1105	/* No queue reconfiguration in TVQM mode */
1106	if (iwl_mvm_has_new_tx_api(mvm))
1107		goto alloc_queues;
1108
1109	/* Reconfigure queues requiring reconfiguation */
1110	for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
1111		bool reconfig;
1112		bool change_owner;
1113
1114		spin_lock_bh(&mvm->queue_info_lock);
1115		reconfig = (mvm->queue_info[queue].status ==
1116			    IWL_MVM_QUEUE_RECONFIGURING);
 
 
 
 
 
1117
1118		/*
1119		 * We need to take into account a situation in which a TXQ was
1120		 * allocated to TID x, and then turned shared by adding TIDs y
1121		 * and z. If TID x becomes inactive and is removed from the TXQ,
1122		 * ownership must be given to one of the remaining TIDs.
1123		 * This is mainly because if TID x continues - a new queue can't
1124		 * be allocated for it as long as it is an owner of another TXQ.
1125		 */
1126		change_owner = !(mvm->queue_info[queue].tid_bitmap &
1127				 BIT(mvm->queue_info[queue].txq_tid)) &&
1128			       (mvm->queue_info[queue].status ==
1129				IWL_MVM_QUEUE_SHARED);
1130		spin_unlock_bh(&mvm->queue_info_lock);
1131
1132		if (reconfig)
1133			iwl_mvm_unshare_queue(mvm, queue);
1134		else if (change_owner)
1135			iwl_mvm_change_queue_owner(mvm, queue);
1136	}
1137
1138alloc_queues:
1139	/* Go over all stations with deferred traffic */
1140	for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1141			 IWL_MVM_STATION_COUNT) {
1142		clear_bit(sta_id, mvm->sta_deferred_frames);
1143		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1144						lockdep_is_held(&mvm->mutex));
1145		if (IS_ERR_OR_NULL(sta))
1146			continue;
 
1147
1148		mvmsta = iwl_mvm_sta_from_mac80211(sta);
1149		deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
 
 
 
 
 
 
 
1150
1151		for_each_set_bit(tid, &deferred_tid_traffic,
1152				 IWL_MAX_TID_COUNT + 1)
1153			iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1154	}
1155
1156	mutex_unlock(&mvm->mutex);
1157}
1158
1159static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1160				      struct ieee80211_sta *sta,
1161				      enum nl80211_iftype vif_type)
1162{
1163	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1164	int queue;
1165	bool using_inactive_queue = false, same_sta = false;
1166
1167	/* queue reserving is disabled on new TX path */
1168	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1169		return 0;
1170
1171	/*
1172	 * Check for inactive queues, so we don't reach a situation where we
1173	 * can't add a STA due to a shortage in queues that doesn't really exist
1174	 */
1175	iwl_mvm_inactivity_check(mvm);
1176
1177	spin_lock_bh(&mvm->queue_info_lock);
1178
1179	/* Make sure we have free resources for this STA */
1180	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1181	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
1182	    (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1183	     IWL_MVM_QUEUE_FREE))
1184		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1185	else
1186		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1187						IWL_MVM_DQA_MIN_DATA_QUEUE,
1188						IWL_MVM_DQA_MAX_DATA_QUEUE);
1189	if (queue < 0) {
1190		spin_unlock_bh(&mvm->queue_info_lock);
1191		IWL_ERR(mvm, "No available queues for new station\n");
1192		return -ENOSPC;
1193	} else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1194		/*
1195		 * If this queue is already allocated but inactive we'll need to
1196		 * first free this queue before enabling it again, we'll mark
1197		 * it as reserved to make sure no new traffic arrives on it
1198		 */
1199		using_inactive_queue = true;
1200		same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
1201	}
1202	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1203
1204	spin_unlock_bh(&mvm->queue_info_lock);
1205
1206	mvmsta->reserved_queue = queue;
1207
1208	if (using_inactive_queue)
1209		iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1210
1211	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1212			    queue, mvmsta->sta_id);
1213
1214	return 0;
1215}
1216
1217/*
1218 * In DQA mode, after a HW restart the queues should be allocated as before, in
1219 * order to avoid race conditions when there are shared queues. This function
1220 * does the re-mapping and queue allocation.
1221 *
1222 * Note that re-enabling aggregations isn't done in this function.
1223 */
1224static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1225						 struct iwl_mvm_sta *mvm_sta)
1226{
1227	unsigned int wdg_timeout =
1228			iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
 
1229	int i;
1230	struct iwl_trans_txq_scd_cfg cfg = {
1231		.sta_id = mvm_sta->sta_id,
1232		.frame_limit = IWL_FRAME_LIMIT,
1233	};
1234
1235	/* Make sure reserved queue is still marked as such (if allocated) */
1236	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1237		mvm->queue_info[mvm_sta->reserved_queue].status =
1238			IWL_MVM_QUEUE_RESERVED;
1239
1240	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1241		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1242		int txq_id = tid_data->txq_id;
1243		int ac;
1244		u8 mac_queue;
1245
1246		if (txq_id == IWL_MVM_INVALID_QUEUE)
1247			continue;
1248
1249		skb_queue_head_init(&tid_data->deferred_tx_frames);
1250
1251		ac = tid_to_mac80211_ac[i];
1252		mac_queue = mvm_sta->vif->hw_queue[ac];
1253
1254		if (iwl_mvm_has_new_tx_api(mvm)) {
1255			IWL_DEBUG_TX_QUEUES(mvm,
1256					    "Re-mapping sta %d tid %d\n",
1257					    mvm_sta->sta_id, i);
1258			txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1259							 mvm_sta->sta_id,
1260							 i, wdg_timeout);
 
 
 
 
 
 
 
1261			tid_data->txq_id = txq_id;
1262
1263			/*
1264			 * Since we don't set the seq number after reset, and HW
1265			 * sets it now, FW reset will cause the seq num to start
1266			 * at 0 again, so driver will need to update it
1267			 * internally as well, so it keeps in sync with real val
1268			 */
1269			tid_data->seq_number = 0;
1270		} else {
1271			u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1272
1273			cfg.tid = i;
1274			cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1275			cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1276					 txq_id ==
1277					 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1278
1279			IWL_DEBUG_TX_QUEUES(mvm,
1280					    "Re-mapping sta %d tid %d to queue %d\n",
1281					    mvm_sta->sta_id, i, txq_id);
 
1282
1283			iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1284					   wdg_timeout);
1285			mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1286		}
1287	}
1288}
1289
1290static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1291				      struct iwl_mvm_int_sta *sta,
1292				      const u8 *addr,
1293				      u16 mac_id, u16 color)
1294{
1295	struct iwl_mvm_add_sta_cmd cmd;
1296	int ret;
1297	u32 status = ADD_STA_SUCCESS;
1298
1299	lockdep_assert_held(&mvm->mutex);
1300
1301	memset(&cmd, 0, sizeof(cmd));
1302	cmd.sta_id = sta->sta_id;
1303	cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1304							     color));
 
 
 
 
 
 
1305	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1306		cmd.station_type = sta->type;
1307
1308	if (!iwl_mvm_has_new_tx_api(mvm))
1309		cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1310	cmd.tid_disable_tx = cpu_to_le16(0xffff);
1311
1312	if (addr)
1313		memcpy(cmd.addr, addr, ETH_ALEN);
1314
1315	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1316					  iwl_mvm_add_sta_cmd_size(mvm),
1317					  &cmd, &status);
1318	if (ret)
1319		return ret;
1320
1321	switch (status & IWL_ADD_STA_STATUS_MASK) {
1322	case ADD_STA_SUCCESS:
1323		IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1324		return 0;
1325	default:
1326		ret = -EIO;
1327		IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1328			status);
1329		break;
1330	}
1331	return ret;
1332}
1333
1334int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1335		    struct ieee80211_vif *vif,
1336		    struct ieee80211_sta *sta)
1337{
1338	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1339	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1340	struct iwl_mvm_rxq_dup_data *dup_data;
1341	int i, ret, sta_id;
1342	bool sta_update = false;
1343	unsigned int sta_flags = 0;
1344
1345	lockdep_assert_held(&mvm->mutex);
1346
1347	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1348		sta_id = iwl_mvm_find_free_sta_id(mvm,
1349						  ieee80211_vif_type_p2p(vif));
1350	else
1351		sta_id = mvm_sta->sta_id;
1352
1353	if (sta_id == IWL_MVM_INVALID_STA)
1354		return -ENOSPC;
 
 
 
 
1355
1356	spin_lock_init(&mvm_sta->lock);
 
1357
1358	/* if this is a HW restart re-alloc existing queues */
1359	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1360		struct iwl_mvm_int_sta tmp_sta = {
1361			.sta_id = sta_id,
1362			.type = mvm_sta->sta_type,
1363		};
1364
1365		/*
1366		 * First add an empty station since allocating
1367		 * a queue requires a valid station
1368		 */
1369		ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1370						 mvmvif->id, mvmvif->color);
1371		if (ret)
1372			goto err;
1373
1374		iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1375		sta_update = true;
1376		sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1377		goto update_fw;
1378	}
1379
1380	mvm_sta->sta_id = sta_id;
1381	mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1382						      mvmvif->color);
1383	mvm_sta->vif = vif;
1384	if (!mvm->trans->cfg->gen2)
1385		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1386	else
1387		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1388	mvm_sta->tx_protection = 0;
1389	mvm_sta->tt_tx_protection = false;
1390	mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1391
1392	/* HW restart, don't assume the memory has been zeroed */
1393	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1394	mvm_sta->tfd_queue_msk = 0;
1395
1396	/* for HW restart - reset everything but the sequence number */
1397	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1398		u16 seq = mvm_sta->tid_data[i].seq_number;
1399		memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1400		mvm_sta->tid_data[i].seq_number = seq;
1401
1402		/*
1403		 * Mark all queues for this STA as unallocated and defer TX
1404		 * frames until the queue is allocated
1405		 */
1406		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1407		skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1408	}
1409	mvm_sta->deferred_traffic_tid_map = 0;
1410	mvm_sta->agg_tids = 0;
1411
1412	if (iwl_mvm_has_new_rx_api(mvm) &&
1413	    !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
 
 
 
 
 
 
 
 
1414		int q;
1415
1416		dup_data = kcalloc(mvm->trans->num_rx_queues,
1417				   sizeof(*dup_data), GFP_KERNEL);
1418		if (!dup_data)
1419			return -ENOMEM;
1420		/*
1421		 * Initialize all the last_seq values to 0xffff which can never
1422		 * compare equal to the frame's seq_ctrl in the check in
1423		 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1424		 * number and fragmented packets don't reach that function.
1425		 *
1426		 * This thus allows receiving a packet with seqno 0 and the
1427		 * retry bit set as the very first packet on a new TID.
1428		 */
1429		for (q = 0; q < mvm->trans->num_rx_queues; q++)
1430			memset(dup_data[q].last_seq, 0xff,
1431			       sizeof(dup_data[q].last_seq));
1432		mvm_sta->dup_data = dup_data;
1433	}
1434
1435	if (!iwl_mvm_has_new_tx_api(mvm)) {
1436		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1437						 ieee80211_vif_type_p2p(vif));
1438		if (ret)
1439			goto err;
1440	}
1441
1442	/*
1443	 * if rs is registered with mac80211, then "add station" will be handled
1444	 * via the corresponding ops, otherwise need to notify rate scaling here
1445	 */
1446	if (iwl_mvm_has_tlc_offload(mvm))
1447		iwl_mvm_rs_add_sta(mvm, mvm_sta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1448
1449update_fw:
1450	ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1451	if (ret)
1452		goto err;
1453
1454	if (vif->type == NL80211_IFTYPE_STATION) {
1455		if (!sta->tdls) {
1456			WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1457			mvmvif->ap_sta_id = sta_id;
1458		} else {
1459			WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1460		}
1461	}
1462
1463	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1464
1465	return 0;
1466
1467err:
1468	return ret;
1469}
1470
1471int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1472		      bool drain)
1473{
1474	struct iwl_mvm_add_sta_cmd cmd = {};
1475	int ret;
1476	u32 status;
1477
1478	lockdep_assert_held(&mvm->mutex);
1479
1480	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1481	cmd.sta_id = mvmsta->sta_id;
1482	cmd.add_modify = STA_MODE_MODIFY;
1483	cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1484	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1485
1486	status = ADD_STA_SUCCESS;
1487	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1488					  iwl_mvm_add_sta_cmd_size(mvm),
1489					  &cmd, &status);
1490	if (ret)
1491		return ret;
1492
1493	switch (status & IWL_ADD_STA_STATUS_MASK) {
1494	case ADD_STA_SUCCESS:
1495		IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1496			       mvmsta->sta_id);
1497		break;
1498	default:
1499		ret = -EIO;
1500		IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1501			mvmsta->sta_id);
1502		break;
1503	}
1504
1505	return ret;
1506}
1507
1508/*
1509 * Remove a station from the FW table. Before sending the command to remove
1510 * the station validate that the station is indeed known to the driver (sanity
1511 * only).
1512 */
1513static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1514{
1515	struct ieee80211_sta *sta;
1516	struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1517		.sta_id = sta_id,
1518	};
1519	int ret;
1520
1521	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1522					lockdep_is_held(&mvm->mutex));
1523
1524	/* Note: internal stations are marked as error values */
1525	if (!sta) {
1526		IWL_ERR(mvm, "Invalid station id\n");
1527		return -EINVAL;
1528	}
1529
1530	ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1531				   sizeof(rm_sta_cmd), &rm_sta_cmd);
1532	if (ret) {
1533		IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1534		return ret;
1535	}
1536
1537	return 0;
1538}
1539
1540static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1541				       struct ieee80211_vif *vif,
1542				       struct iwl_mvm_sta *mvm_sta)
1543{
1544	int ac;
1545	int i;
1546
1547	lockdep_assert_held(&mvm->mutex);
1548
1549	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1550		if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1551			continue;
1552
1553		ac = iwl_mvm_tid_to_ac_queue(i);
1554		iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1555				    vif->hw_queue[ac], i, 0);
1556		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1557	}
 
 
 
 
 
 
 
 
 
 
 
1558}
1559
1560int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1561				  struct iwl_mvm_sta *mvm_sta)
1562{
1563	int i;
1564
1565	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1566		u16 txq_id;
1567		int ret;
1568
1569		spin_lock_bh(&mvm_sta->lock);
1570		txq_id = mvm_sta->tid_data[i].txq_id;
1571		spin_unlock_bh(&mvm_sta->lock);
1572
1573		if (txq_id == IWL_MVM_INVALID_QUEUE)
1574			continue;
1575
1576		ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1577		if (ret)
1578			return ret;
1579	}
1580
1581	return 0;
1582}
1583
1584int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1585		   struct ieee80211_vif *vif,
1586		   struct ieee80211_sta *sta)
 
 
 
 
1587{
1588	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
 
1589	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1590	u8 sta_id = mvm_sta->sta_id;
1591	int ret;
1592
1593	lockdep_assert_held(&mvm->mutex);
1594
1595	if (iwl_mvm_has_new_rx_api(mvm))
1596		kfree(mvm_sta->dup_data);
1597
1598	ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1599	if (ret)
1600		return ret;
1601
1602	/* flush its queues here since we are freeing mvm_sta */
1603	ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1604	if (ret)
1605		return ret;
1606	if (iwl_mvm_has_new_tx_api(mvm)) {
1607		ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1608	} else {
1609		u32 q_mask = mvm_sta->tfd_queue_msk;
1610
1611		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1612						     q_mask);
1613	}
1614	if (ret)
1615		return ret;
1616
1617	ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1618
1619	iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1620
1621	/* If there is a TXQ still marked as reserved - free it */
1622	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1623		u8 reserved_txq = mvm_sta->reserved_queue;
1624		enum iwl_mvm_queue_status *status;
1625
1626		/*
1627		 * If no traffic has gone through the reserved TXQ - it
1628		 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1629		 * should be manually marked as free again
1630		 */
1631		spin_lock_bh(&mvm->queue_info_lock);
1632		status = &mvm->queue_info[reserved_txq].status;
1633		if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1634			 (*status != IWL_MVM_QUEUE_FREE),
1635			 "sta_id %d reserved txq %d status %d",
1636			 sta_id, reserved_txq, *status)) {
1637			spin_unlock_bh(&mvm->queue_info_lock);
1638			return -EINVAL;
1639		}
1640
1641		*status = IWL_MVM_QUEUE_FREE;
1642		spin_unlock_bh(&mvm->queue_info_lock);
1643	}
1644
1645	if (vif->type == NL80211_IFTYPE_STATION &&
1646	    mvmvif->ap_sta_id == sta_id) {
1647		/* if associated - we can't remove the AP STA now */
1648		if (vif->bss_conf.assoc)
1649			return ret;
 
 
 
1650
1651		/* unassoc - go ahead - remove the AP STA now */
1652		mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1653
1654		/* clear d0i3_ap_sta_id if no longer relevant */
1655		if (mvm->d0i3_ap_sta_id == sta_id)
1656			mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1657	}
1658
1659	/*
1660	 * This shouldn't happen - the TDLS channel switch should be canceled
1661	 * before the STA is removed.
1662	 */
1663	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1664		mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1665		cancel_delayed_work(&mvm->tdls_cs.dwork);
1666	}
1667
1668	/*
1669	 * Make sure that the tx response code sees the station as -EBUSY and
1670	 * calls the drain worker.
1671	 */
1672	spin_lock_bh(&mvm_sta->lock);
1673	spin_unlock_bh(&mvm_sta->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1674
1675	ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1676	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1677
1678	return ret;
1679}
1680
1681int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1682		      struct ieee80211_vif *vif,
1683		      u8 sta_id)
1684{
1685	int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1686
1687	lockdep_assert_held(&mvm->mutex);
1688
1689	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1690	return ret;
1691}
1692
1693int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1694			     struct iwl_mvm_int_sta *sta,
1695			     u32 qmask, enum nl80211_iftype iftype,
1696			     enum iwl_sta_type type)
1697{
1698	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1699	    sta->sta_id == IWL_MVM_INVALID_STA) {
1700		sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1701		if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1702			return -ENOSPC;
1703	}
1704
1705	sta->tfd_queue_msk = qmask;
1706	sta->type = type;
1707
1708	/* put a non-NULL value so iterating over the stations won't stop */
1709	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1710	return 0;
1711}
1712
1713void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1714{
1715	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1716	memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1717	sta->sta_id = IWL_MVM_INVALID_STA;
1718}
1719
1720static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1721					  u8 sta_id, u8 fifo)
1722{
1723	unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1724					mvm->cfg->base_params->wd_timeout :
1725					IWL_WATCHDOG_DISABLED;
 
 
 
 
 
 
1726
1727	if (iwl_mvm_has_new_tx_api(mvm)) {
1728		int tvqm_queue =
1729			iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
1730						IWL_MAX_TID_COUNT,
1731						wdg_timeout);
1732		*queue = tvqm_queue;
1733	} else {
1734		struct iwl_trans_txq_scd_cfg cfg = {
1735			.fifo = fifo,
1736			.sta_id = sta_id,
1737			.tid = IWL_MAX_TID_COUNT,
1738			.aggregate = false,
1739			.frame_limit = IWL_FRAME_LIMIT,
1740		};
1741
1742		iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
1743	}
1744}
1745
1746int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1747{
1748	int ret;
 
 
 
1749
1750	lockdep_assert_held(&mvm->mutex);
 
 
1751
1752	/* Allocate aux station and assign to it the aux queue */
1753	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1754				       NL80211_IFTYPE_UNSPECIFIED,
1755				       IWL_STA_AUX_ACTIVITY);
1756	if (ret)
1757		return ret;
1758
1759	/* Map Aux queue to fifo - needs to happen before adding Aux station */
1760	if (!iwl_mvm_has_new_tx_api(mvm))
1761		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1762					      mvm->aux_sta.sta_id,
1763					      IWL_MVM_TX_FIFO_MCAST);
1764
1765	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1766					 MAC_INDEX_AUX, 0);
1767	if (ret) {
1768		iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
 
 
1769		return ret;
1770	}
1771
1772	/*
1773	 * For 22000 firmware and on we cannot add queue to a station unknown
1774	 * to firmware so enable queue here - after the station was added
1775	 */
1776	if (iwl_mvm_has_new_tx_api(mvm))
1777		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1778					      mvm->aux_sta.sta_id,
1779					      IWL_MVM_TX_FIFO_MCAST);
 
 
 
 
 
 
 
1780
1781	return 0;
1782}
1783
1784int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1785{
1786	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1787	int ret;
 
 
1788
1789	lockdep_assert_held(&mvm->mutex);
1790
1791	/* Map snif queue to fifo - must happen before adding snif station */
1792	if (!iwl_mvm_has_new_tx_api(mvm))
1793		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1794					      mvm->snif_sta.sta_id,
1795					      IWL_MVM_TX_FIFO_BE);
1796
1797	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1798					 mvmvif->id, 0);
1799	if (ret)
1800		return ret;
1801
1802	/*
1803	 * For 22000 firmware and on we cannot add queue to a station unknown
1804	 * to firmware so enable queue here - after the station was added
1805	 */
1806	if (iwl_mvm_has_new_tx_api(mvm))
1807		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1808					      mvm->snif_sta.sta_id,
1809					      IWL_MVM_TX_FIFO_BE);
 
 
 
1810
1811	return 0;
1812}
1813
 
 
 
 
 
 
 
 
 
 
 
 
1814int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1815{
1816	int ret;
1817
1818	lockdep_assert_held(&mvm->mutex);
1819
1820	iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
1821			    IWL_MAX_TID_COUNT, 0);
 
 
 
1822	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1823	if (ret)
1824		IWL_WARN(mvm, "Failed sending remove station\n");
1825
1826	return ret;
1827}
1828
1829void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1830{
1831	iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1832}
1833
1834void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1835{
1836	lockdep_assert_held(&mvm->mutex);
1837
 
 
 
 
 
 
 
 
1838	iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
 
 
 
 
 
 
 
1839}
1840
1841/*
1842 * Send the add station command for the vif's broadcast station.
1843 * Assumes that the station was already allocated.
1844 *
1845 * @mvm: the mvm component
1846 * @vif: the interface to which the broadcast station is added
1847 * @bsta: the broadcast station to add.
1848 */
1849int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1850{
1851	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1852	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1853	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1854	const u8 *baddr = _baddr;
1855	int queue;
1856	int ret;
1857	unsigned int wdg_timeout =
1858		iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1859	struct iwl_trans_txq_scd_cfg cfg = {
1860		.fifo = IWL_MVM_TX_FIFO_VO,
1861		.sta_id = mvmvif->bcast_sta.sta_id,
1862		.tid = IWL_MAX_TID_COUNT,
1863		.aggregate = false,
1864		.frame_limit = IWL_FRAME_LIMIT,
1865	};
1866
1867	lockdep_assert_held(&mvm->mutex);
1868
1869	if (!iwl_mvm_has_new_tx_api(mvm)) {
1870		if (vif->type == NL80211_IFTYPE_AP ||
1871		    vif->type == NL80211_IFTYPE_ADHOC)
1872			queue = mvm->probe_queue;
1873		else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1874			queue = mvm->p2p_dev_queue;
1875		else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
 
1876			return -EINVAL;
 
1877
1878		bsta->tfd_queue_msk |= BIT(queue);
1879
1880		iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1881				   &cfg, wdg_timeout);
1882	}
1883
1884	if (vif->type == NL80211_IFTYPE_ADHOC)
1885		baddr = vif->bss_conf.bssid;
1886
1887	if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
1888		return -ENOSPC;
1889
1890	ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1891					 mvmvif->id, mvmvif->color);
1892	if (ret)
1893		return ret;
1894
1895	/*
1896	 * For 22000 firmware and on we cannot add queue to a station unknown
1897	 * to firmware so enable queue here - after the station was added
1898	 */
1899	if (iwl_mvm_has_new_tx_api(mvm)) {
1900		queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1901						bsta->sta_id,
1902						IWL_MAX_TID_COUNT,
1903						wdg_timeout);
 
 
 
 
1904
1905		if (vif->type == NL80211_IFTYPE_AP ||
1906		    vif->type == NL80211_IFTYPE_ADHOC)
 
1907			mvm->probe_queue = queue;
1908		else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
 
 
1909			mvm->p2p_dev_queue = queue;
 
 
 
 
 
1910	}
1911
1912	return 0;
1913}
1914
1915static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1916					  struct ieee80211_vif *vif)
1917{
1918	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1919	int queue;
1920
1921	lockdep_assert_held(&mvm->mutex);
1922
1923	iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
 
1924
1925	switch (vif->type) {
1926	case NL80211_IFTYPE_AP:
1927	case NL80211_IFTYPE_ADHOC:
1928		queue = mvm->probe_queue;
1929		break;
1930	case NL80211_IFTYPE_P2P_DEVICE:
1931		queue = mvm->p2p_dev_queue;
1932		break;
1933	default:
1934		WARN(1, "Can't free bcast queue on vif type %d\n",
1935		     vif->type);
1936		return;
1937	}
1938
1939	iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
 
 
 
 
 
 
1940	if (iwl_mvm_has_new_tx_api(mvm))
1941		return;
1942
1943	WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
1944	mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
1945}
1946
1947/* Send the FW a request to remove the station from it's internal data
1948 * structures, but DO NOT remove the entry from the local data structures. */
1949int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1950{
1951	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1952	int ret;
1953
1954	lockdep_assert_held(&mvm->mutex);
1955
1956	iwl_mvm_free_bcast_sta_queues(mvm, vif);
1957
1958	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
1959	if (ret)
1960		IWL_WARN(mvm, "Failed sending remove station\n");
1961	return ret;
1962}
1963
1964int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1965{
1966	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1967
1968	lockdep_assert_held(&mvm->mutex);
1969
1970	return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
1971					ieee80211_vif_type_p2p(vif),
1972					IWL_STA_GENERAL_PURPOSE);
1973}
1974
1975/* Allocate a new station entry for the broadcast station to the given vif,
1976 * and send it to the FW.
1977 * Note that each P2P mac should have its own broadcast station.
1978 *
1979 * @mvm: the mvm component
1980 * @vif: the interface to which the broadcast station is added
1981 * @bsta: the broadcast station to add. */
1982int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1983{
1984	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1985	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1986	int ret;
1987
1988	lockdep_assert_held(&mvm->mutex);
1989
1990	ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1991	if (ret)
1992		return ret;
1993
1994	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
1995
1996	if (ret)
1997		iwl_mvm_dealloc_int_sta(mvm, bsta);
1998
1999	return ret;
2000}
2001
2002void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2003{
2004	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2005
2006	iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2007}
2008
2009/*
2010 * Send the FW a request to remove the station from it's internal data
2011 * structures, and in addition remove it from the local data structure.
2012 */
2013int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2014{
2015	int ret;
2016
2017	lockdep_assert_held(&mvm->mutex);
2018
2019	ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2020
2021	iwl_mvm_dealloc_bcast_sta(mvm, vif);
2022
2023	return ret;
2024}
2025
2026/*
2027 * Allocate a new station entry for the multicast station to the given vif,
2028 * and send it to the FW.
2029 * Note that each AP/GO mac should have its own multicast station.
2030 *
2031 * @mvm: the mvm component
2032 * @vif: the interface to which the multicast station is added
2033 */
2034int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2035{
2036	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2037	struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2038	static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2039	const u8 *maddr = _maddr;
2040	struct iwl_trans_txq_scd_cfg cfg = {
2041		.fifo = IWL_MVM_TX_FIFO_MCAST,
 
2042		.sta_id = msta->sta_id,
2043		.tid = 0,
2044		.aggregate = false,
2045		.frame_limit = IWL_FRAME_LIMIT,
2046	};
2047	unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2048	int ret;
2049
2050	lockdep_assert_held(&mvm->mutex);
2051
2052	if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2053		    vif->type != NL80211_IFTYPE_ADHOC))
2054		return -ENOTSUPP;
2055
2056	/*
2057	 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2058	 * invalid, so make sure we use the queue we want.
2059	 * Note that this is done here as we want to avoid making DQA
2060	 * changes in mac80211 layer.
2061	 */
2062	if (vif->type == NL80211_IFTYPE_ADHOC) {
2063		vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2064		mvmvif->cab_queue = vif->cab_queue;
2065	}
2066
2067	/*
2068	 * While in previous FWs we had to exclude cab queue from TFD queue
2069	 * mask, now it is needed as any other queue.
2070	 */
2071	if (!iwl_mvm_has_new_tx_api(mvm) &&
2072	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2073		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2074				   &cfg, timeout);
2075		msta->tfd_queue_msk |= BIT(vif->cab_queue);
 
2076	}
2077	ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2078					 mvmvif->id, mvmvif->color);
2079	if (ret) {
2080		iwl_mvm_dealloc_int_sta(mvm, msta);
2081		return ret;
2082	}
2083
2084	/*
2085	 * Enable cab queue after the ADD_STA command is sent.
2086	 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2087	 * command with unknown station id, and for FW that doesn't support
2088	 * station API since the cab queue is not included in the
2089	 * tfd_queue_mask.
2090	 */
2091	if (iwl_mvm_has_new_tx_api(mvm)) {
2092		int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2093						    msta->sta_id,
2094						    0,
2095						    timeout);
2096		mvmvif->cab_queue = queue;
 
 
2097	} else if (!fw_has_api(&mvm->fw->ucode_capa,
2098			       IWL_UCODE_TLV_API_STA_TYPE))
2099		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2100				   &cfg, timeout);
 
2101
2102	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2103}
2104
2105/*
2106 * Send the FW a request to remove the station from it's internal data
2107 * structures, and in addition remove it from the local data structure.
2108 */
2109int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2110{
2111	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2112	int ret;
2113
2114	lockdep_assert_held(&mvm->mutex);
2115
2116	iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
 
2117
2118	iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2119			    0, 0);
2120
2121	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2122	if (ret)
2123		IWL_WARN(mvm, "Failed sending remove station\n");
2124
2125	return ret;
2126}
2127
2128#define IWL_MAX_RX_BA_SESSIONS 16
2129
2130static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2131{
2132	struct iwl_mvm_delba_notif notif = {
2133		.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2134		.metadata.sync = 1,
2135		.delba.baid = baid,
2136	};
2137	iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
 
 
2138};
2139
2140static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2141				 struct iwl_mvm_baid_data *data)
2142{
2143	int i;
2144
2145	iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2146
2147	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2148		int j;
2149		struct iwl_mvm_reorder_buffer *reorder_buf =
2150			&data->reorder_buf[i];
2151		struct iwl_mvm_reorder_buf_entry *entries =
2152			&data->entries[i * data->entries_per_queue];
2153
2154		spin_lock_bh(&reorder_buf->lock);
2155		if (likely(!reorder_buf->num_stored)) {
2156			spin_unlock_bh(&reorder_buf->lock);
2157			continue;
2158		}
2159
2160		/*
2161		 * This shouldn't happen in regular DELBA since the internal
2162		 * delBA notification should trigger a release of all frames in
2163		 * the reorder buffer.
2164		 */
2165		WARN_ON(1);
2166
2167		for (j = 0; j < reorder_buf->buf_size; j++)
2168			__skb_queue_purge(&entries[j].e.frames);
2169		/*
2170		 * Prevent timer re-arm. This prevents a very far fetched case
2171		 * where we timed out on the notification. There may be prior
2172		 * RX frames pending in the RX queue before the notification
2173		 * that might get processed between now and the actual deletion
2174		 * and we would re-arm the timer although we are deleting the
2175		 * reorder buffer.
2176		 */
2177		reorder_buf->removed = true;
2178		spin_unlock_bh(&reorder_buf->lock);
2179		del_timer_sync(&reorder_buf->reorder_timer);
2180	}
2181}
2182
2183static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2184					struct iwl_mvm_baid_data *data,
2185					u16 ssn, u8 buf_size)
2186{
2187	int i;
2188
2189	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2190		struct iwl_mvm_reorder_buffer *reorder_buf =
2191			&data->reorder_buf[i];
2192		struct iwl_mvm_reorder_buf_entry *entries =
2193			&data->entries[i * data->entries_per_queue];
2194		int j;
2195
2196		reorder_buf->num_stored = 0;
2197		reorder_buf->head_sn = ssn;
2198		reorder_buf->buf_size = buf_size;
2199		/* rx reorder timer */
2200		timer_setup(&reorder_buf->reorder_timer,
2201			    iwl_mvm_reorder_timer_expired, 0);
2202		spin_lock_init(&reorder_buf->lock);
2203		reorder_buf->mvm = mvm;
2204		reorder_buf->queue = i;
2205		reorder_buf->valid = false;
2206		for (j = 0; j < reorder_buf->buf_size; j++)
2207			__skb_queue_head_init(&entries[j].e.frames);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2208	}
2209}
2210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2211int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2212		       int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
2213{
2214	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2215	struct iwl_mvm_add_sta_cmd cmd = {};
2216	struct iwl_mvm_baid_data *baid_data = NULL;
2217	int ret;
2218	u32 status;
 
2219
2220	lockdep_assert_held(&mvm->mutex);
2221
2222	if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2223		IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2224		return -ENOSPC;
2225	}
2226
2227	if (iwl_mvm_has_new_rx_api(mvm) && start) {
2228		u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2229
2230		/* sparse doesn't like the __align() so don't check */
2231#ifndef __CHECKER__
2232		/*
2233		 * The division below will be OK if either the cache line size
2234		 * can be divided by the entry size (ALIGN will round up) or if
2235		 * if the entry size can be divided by the cache line size, in
2236		 * which case the ALIGN() will do nothing.
2237		 */
2238		BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2239			     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2240#endif
2241
2242		/*
2243		 * Upward align the reorder buffer size to fill an entire cache
2244		 * line for each queue, to avoid sharing cache lines between
2245		 * different queues.
2246		 */
2247		reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2248
2249		/*
2250		 * Allocate here so if allocation fails we can bail out early
2251		 * before starting the BA session in the firmware
2252		 */
2253		baid_data = kzalloc(sizeof(*baid_data) +
2254				    mvm->trans->num_rx_queues *
2255				    reorder_buf_size,
2256				    GFP_KERNEL);
2257		if (!baid_data)
2258			return -ENOMEM;
2259
2260		/*
2261		 * This division is why we need the above BUILD_BUG_ON(),
2262		 * if that doesn't hold then this will not be right.
2263		 */
2264		baid_data->entries_per_queue =
2265			reorder_buf_size / sizeof(baid_data->entries[0]);
2266	}
2267
2268	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2269	cmd.sta_id = mvm_sta->sta_id;
2270	cmd.add_modify = STA_MODE_MODIFY;
2271	if (start) {
2272		cmd.add_immediate_ba_tid = (u8) tid;
2273		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2274		cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
2275	} else {
2276		cmd.remove_immediate_ba_tid = (u8) tid;
 
2277	}
2278	cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2279				  STA_MODIFY_REMOVE_BA_TID;
2280
2281	status = ADD_STA_SUCCESS;
2282	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2283					  iwl_mvm_add_sta_cmd_size(mvm),
2284					  &cmd, &status);
2285	if (ret)
 
 
2286		goto out_free;
2287
2288	switch (status & IWL_ADD_STA_STATUS_MASK) {
2289	case ADD_STA_SUCCESS:
2290		IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2291			     start ? "start" : "stopp");
2292		break;
2293	case ADD_STA_IMMEDIATE_BA_FAILURE:
2294		IWL_WARN(mvm, "RX BA Session refused by fw\n");
2295		ret = -ENOSPC;
2296		break;
2297	default:
2298		ret = -EIO;
2299		IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2300			start ? "start" : "stopp", status);
2301		break;
2302	}
2303
2304	if (ret)
2305		goto out_free;
2306
2307	if (start) {
2308		u8 baid;
2309
2310		mvm->rx_ba_sessions++;
2311
2312		if (!iwl_mvm_has_new_rx_api(mvm))
2313			return 0;
2314
2315		if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2316			ret = -EINVAL;
2317			goto out_free;
2318		}
2319		baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2320			    IWL_ADD_STA_BAID_SHIFT);
2321		baid_data->baid = baid;
2322		baid_data->timeout = timeout;
2323		baid_data->last_rx = jiffies;
2324		baid_data->rcu_ptr = &mvm->baid_map[baid];
2325		timer_setup(&baid_data->session_timer,
2326			    iwl_mvm_rx_agg_session_expired, 0);
2327		baid_data->mvm = mvm;
2328		baid_data->tid = tid;
2329		baid_data->sta_id = mvm_sta->sta_id;
 
2330
2331		mvm_sta->tid_to_baid[tid] = baid;
2332		if (timeout)
2333			mod_timer(&baid_data->session_timer,
2334				  TU_TO_EXP_TIME(timeout * 2));
2335
2336		iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2337		/*
2338		 * protect the BA data with RCU to cover a case where our
2339		 * internal RX sync mechanism will timeout (not that it's
2340		 * supposed to happen) and we will free the session data while
2341		 * RX is being processed in parallel
2342		 */
2343		IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2344			     mvm_sta->sta_id, tid, baid);
2345		WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2346		rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2347	} else  {
2348		u8 baid = mvm_sta->tid_to_baid[tid];
2349
2350		if (mvm->rx_ba_sessions > 0)
2351			/* check that restart flow didn't zero the counter */
2352			mvm->rx_ba_sessions--;
2353		if (!iwl_mvm_has_new_rx_api(mvm))
2354			return 0;
2355
2356		if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2357			return -EINVAL;
2358
2359		baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2360		if (WARN_ON(!baid_data))
2361			return -EINVAL;
2362
2363		/* synchronize all rx queues so we can safely delete */
2364		iwl_mvm_free_reorder(mvm, baid_data);
2365		del_timer_sync(&baid_data->session_timer);
2366		RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2367		kfree_rcu(baid_data, rcu_head);
2368		IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2369	}
2370	return 0;
2371
2372out_free:
2373	kfree(baid_data);
2374	return ret;
2375}
2376
2377int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2378		       int tid, u8 queue, bool start)
2379{
2380	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2381	struct iwl_mvm_add_sta_cmd cmd = {};
2382	int ret;
2383	u32 status;
2384
2385	lockdep_assert_held(&mvm->mutex);
2386
2387	if (start) {
2388		mvm_sta->tfd_queue_msk |= BIT(queue);
2389		mvm_sta->tid_disable_agg &= ~BIT(tid);
2390	} else {
2391		/* In DQA-mode the queue isn't removed on agg termination */
2392		mvm_sta->tid_disable_agg |= BIT(tid);
2393	}
2394
2395	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2396	cmd.sta_id = mvm_sta->sta_id;
2397	cmd.add_modify = STA_MODE_MODIFY;
2398	if (!iwl_mvm_has_new_tx_api(mvm))
2399		cmd.modify_mask = STA_MODIFY_QUEUES;
2400	cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2401	cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2402	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2403
2404	status = ADD_STA_SUCCESS;
2405	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2406					  iwl_mvm_add_sta_cmd_size(mvm),
2407					  &cmd, &status);
2408	if (ret)
2409		return ret;
2410
2411	switch (status & IWL_ADD_STA_STATUS_MASK) {
2412	case ADD_STA_SUCCESS:
2413		break;
2414	default:
2415		ret = -EIO;
2416		IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2417			start ? "start" : "stopp", status);
2418		break;
2419	}
2420
2421	return ret;
2422}
2423
2424const u8 tid_to_mac80211_ac[] = {
2425	IEEE80211_AC_BE,
2426	IEEE80211_AC_BK,
2427	IEEE80211_AC_BK,
2428	IEEE80211_AC_BE,
2429	IEEE80211_AC_VI,
2430	IEEE80211_AC_VI,
2431	IEEE80211_AC_VO,
2432	IEEE80211_AC_VO,
2433	IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2434};
2435
2436static const u8 tid_to_ucode_ac[] = {
2437	AC_BE,
2438	AC_BK,
2439	AC_BK,
2440	AC_BE,
2441	AC_VI,
2442	AC_VI,
2443	AC_VO,
2444	AC_VO,
2445};
2446
2447int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2448			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2449{
2450	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2451	struct iwl_mvm_tid_data *tid_data;
2452	u16 normalized_ssn;
2453	int txq_id;
2454	int ret;
2455
2456	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2457		return -EINVAL;
2458
2459	if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2460	    mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2461		IWL_ERR(mvm,
2462			"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2463			mvmsta->tid_data[tid].state);
2464		return -ENXIO;
2465	}
2466
2467	lockdep_assert_held(&mvm->mutex);
2468
2469	spin_lock_bh(&mvmsta->lock);
 
 
2470
2471	/* possible race condition - we entered D0i3 while starting agg */
2472	if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2473		spin_unlock_bh(&mvmsta->lock);
2474		IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2475		return -EIO;
2476	}
2477
2478	spin_lock(&mvm->queue_info_lock);
2479
2480	/*
2481	 * Note the possible cases:
2482	 *  1. An enabled TXQ - TXQ needs to become agg'ed
2483	 *  2. The TXQ hasn't yet been enabled, so find a free one and mark
2484	 *	it as reserved
2485	 */
2486	txq_id = mvmsta->tid_data[tid].txq_id;
2487	if (txq_id == IWL_MVM_INVALID_QUEUE) {
2488		txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2489						 IWL_MVM_DQA_MIN_DATA_QUEUE,
2490						 IWL_MVM_DQA_MAX_DATA_QUEUE);
2491		if (txq_id < 0) {
2492			ret = txq_id;
2493			IWL_ERR(mvm, "Failed to allocate agg queue\n");
2494			goto release_locks;
2495		}
2496
 
 
2497		/* TXQ hasn't yet been enabled, so mark it only as reserved */
2498		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
 
 
 
 
 
 
2499	} else if (unlikely(mvm->queue_info[txq_id].status ==
2500			    IWL_MVM_QUEUE_SHARED)) {
2501		ret = -ENXIO;
2502		IWL_DEBUG_TX_QUEUES(mvm,
2503				    "Can't start tid %d agg on shared queue!\n",
2504				    tid);
2505		goto release_locks;
2506	}
2507
2508	spin_unlock(&mvm->queue_info_lock);
2509
2510	IWL_DEBUG_TX_QUEUES(mvm,
2511			    "AGG for tid %d will be on queue #%d\n",
2512			    tid, txq_id);
2513
2514	tid_data = &mvmsta->tid_data[tid];
2515	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2516	tid_data->txq_id = txq_id;
2517	*ssn = tid_data->ssn;
2518
2519	IWL_DEBUG_TX_QUEUES(mvm,
2520			    "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2521			    mvmsta->sta_id, tid, txq_id, tid_data->ssn,
 
2522			    tid_data->next_reclaimed);
2523
2524	/*
2525	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2526	 * to align the wrap around of ssn so we compare relevant values.
2527	 */
2528	normalized_ssn = tid_data->ssn;
2529	if (mvm->trans->cfg->gen2)
2530		normalized_ssn &= 0xff;
2531
2532	if (normalized_ssn == tid_data->next_reclaimed) {
2533		tid_data->state = IWL_AGG_STARTING;
2534		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2535	} else {
2536		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
 
2537	}
2538
2539	ret = 0;
2540	goto out;
2541
2542release_locks:
2543	spin_unlock(&mvm->queue_info_lock);
2544out:
2545	spin_unlock_bh(&mvmsta->lock);
2546
2547	return ret;
2548}
2549
2550int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2551			    struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2552			    bool amsdu)
2553{
2554	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2555	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2556	unsigned int wdg_timeout =
2557		iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2558	int queue, ret;
2559	bool alloc_queue = true;
2560	enum iwl_mvm_queue_status queue_status;
2561	u16 ssn;
2562
2563	struct iwl_trans_txq_scd_cfg cfg = {
2564		.sta_id = mvmsta->sta_id,
2565		.tid = tid,
2566		.frame_limit = buf_size,
2567		.aggregate = true,
2568	};
2569
2570	/*
2571	 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2572	 * manager, so this function should never be called in this case.
2573	 */
2574	if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2575		return -EINVAL;
2576
2577	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2578		     != IWL_MAX_TID_COUNT);
2579
2580	spin_lock_bh(&mvmsta->lock);
2581	ssn = tid_data->ssn;
2582	queue = tid_data->txq_id;
2583	tid_data->state = IWL_AGG_ON;
2584	mvmsta->agg_tids |= BIT(tid);
2585	tid_data->ssn = 0xffff;
2586	tid_data->amsdu_in_ampdu_allowed = amsdu;
2587	spin_unlock_bh(&mvmsta->lock);
2588
2589	if (iwl_mvm_has_new_tx_api(mvm)) {
2590		/*
2591		 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2592		 * would have failed, so if we are here there is no need to
2593		 * allocate a queue.
2594		 * However, if aggregation size is different than the default
2595		 * size, the scheduler should be reconfigured.
2596		 * We cannot do this with the new TX API, so return unsupported
2597		 * for now, until it will be offloaded to firmware..
2598		 * Note that if SCD default value changes - this condition
2599		 * should be updated as well.
2600		 */
2601		if (buf_size < IWL_FRAME_LIMIT)
2602			return -ENOTSUPP;
2603
2604		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2605		if (ret)
2606			return -EIO;
2607		goto out;
2608	}
2609
2610	cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2611
2612	spin_lock_bh(&mvm->queue_info_lock);
2613	queue_status = mvm->queue_info[queue].status;
2614	spin_unlock_bh(&mvm->queue_info_lock);
2615
2616	/* Maybe there is no need to even alloc a queue... */
2617	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2618		alloc_queue = false;
2619
2620	/*
2621	 * Only reconfig the SCD for the queue if the window size has
2622	 * changed from current (become smaller)
2623	 */
2624	if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2625		/*
2626		 * If reconfiguring an existing queue, it first must be
2627		 * drained
2628		 */
2629		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2630						     BIT(queue));
2631		if (ret) {
2632			IWL_ERR(mvm,
2633				"Error draining queue before reconfig\n");
2634			return ret;
2635		}
2636
2637		ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2638					   mvmsta->sta_id, tid,
2639					   buf_size, ssn);
2640		if (ret) {
2641			IWL_ERR(mvm,
2642				"Error reconfiguring TXQ #%d\n", queue);
2643			return ret;
2644		}
2645	}
2646
2647	if (alloc_queue)
2648		iwl_mvm_enable_txq(mvm, queue,
2649				   vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2650				   &cfg, wdg_timeout);
2651
2652	/* Send ADD_STA command to enable aggs only if the queue isn't shared */
2653	if (queue_status != IWL_MVM_QUEUE_SHARED) {
2654		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2655		if (ret)
2656			return -EIO;
2657	}
2658
2659	/* No need to mark as reserved */
2660	spin_lock_bh(&mvm->queue_info_lock);
2661	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2662	spin_unlock_bh(&mvm->queue_info_lock);
2663
2664out:
2665	/*
2666	 * Even though in theory the peer could have different
2667	 * aggregation reorder buffer sizes for different sessions,
2668	 * our ucode doesn't allow for that and has a global limit
2669	 * for each station. Therefore, use the minimum of all the
2670	 * aggregation sessions and our default value.
2671	 */
2672	mvmsta->max_agg_bufsize =
2673		min(mvmsta->max_agg_bufsize, buf_size);
2674	mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
 
 
2675
2676	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2677		     sta->addr, tid);
2678
2679	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
2680}
2681
2682static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2683					struct iwl_mvm_sta *mvmsta,
2684					struct iwl_mvm_tid_data *tid_data)
2685{
2686	u16 txq_id = tid_data->txq_id;
2687
 
 
2688	if (iwl_mvm_has_new_tx_api(mvm))
2689		return;
2690
2691	spin_lock_bh(&mvm->queue_info_lock);
2692	/*
2693	 * The TXQ is marked as reserved only if no traffic came through yet
2694	 * This means no traffic has been sent on this TID (agg'd or not), so
2695	 * we no longer have use for the queue. Since it hasn't even been
2696	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2697	 * free.
2698	 */
2699	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
2700		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2701		tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
2702	}
2703
2704	spin_unlock_bh(&mvm->queue_info_lock);
2705}
2706
2707int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2708			    struct ieee80211_sta *sta, u16 tid)
2709{
2710	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2711	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2712	u16 txq_id;
2713	int err;
2714
2715	/*
2716	 * If mac80211 is cleaning its state, then say that we finished since
2717	 * our state has been cleared anyway.
2718	 */
2719	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2720		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2721		return 0;
2722	}
2723
2724	spin_lock_bh(&mvmsta->lock);
2725
2726	txq_id = tid_data->txq_id;
2727
2728	IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2729			    mvmsta->sta_id, tid, txq_id, tid_data->state);
 
2730
2731	mvmsta->agg_tids &= ~BIT(tid);
2732
2733	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
2734
2735	switch (tid_data->state) {
2736	case IWL_AGG_ON:
2737		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2738
2739		IWL_DEBUG_TX_QUEUES(mvm,
2740				    "ssn = %d, next_recl = %d\n",
2741				    tid_data->ssn, tid_data->next_reclaimed);
2742
2743		tid_data->ssn = 0xffff;
2744		tid_data->state = IWL_AGG_OFF;
2745		spin_unlock_bh(&mvmsta->lock);
2746
2747		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2748
2749		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2750		return 0;
2751	case IWL_AGG_STARTING:
2752	case IWL_EMPTYING_HW_QUEUE_ADDBA:
2753		/*
2754		 * The agg session has been stopped before it was set up. This
2755		 * can happen when the AddBA timer times out for example.
2756		 */
2757
2758		/* No barriers since we are under mutex */
2759		lockdep_assert_held(&mvm->mutex);
2760
2761		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2762		tid_data->state = IWL_AGG_OFF;
2763		err = 0;
2764		break;
2765	default:
2766		IWL_ERR(mvm,
2767			"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2768			mvmsta->sta_id, tid, tid_data->state);
2769		IWL_ERR(mvm,
2770			"\ttid_data->txq_id = %d\n", tid_data->txq_id);
2771		err = -EINVAL;
2772	}
2773
2774	spin_unlock_bh(&mvmsta->lock);
2775
2776	return err;
2777}
2778
2779int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2780			    struct ieee80211_sta *sta, u16 tid)
2781{
2782	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2783	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2784	u16 txq_id;
2785	enum iwl_mvm_agg_state old_state;
2786
2787	/*
2788	 * First set the agg state to OFF to avoid calling
2789	 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2790	 */
2791	spin_lock_bh(&mvmsta->lock);
2792	txq_id = tid_data->txq_id;
2793	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2794			    mvmsta->sta_id, tid, txq_id, tid_data->state);
 
2795	old_state = tid_data->state;
2796	tid_data->state = IWL_AGG_OFF;
2797	mvmsta->agg_tids &= ~BIT(tid);
2798	spin_unlock_bh(&mvmsta->lock);
2799
2800	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
2801
2802	if (old_state >= IWL_AGG_ON) {
2803		iwl_mvm_drain_sta(mvm, mvmsta, true);
2804
2805		if (iwl_mvm_has_new_tx_api(mvm)) {
2806			if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
2807						   BIT(tid), 0))
2808				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2809			iwl_trans_wait_txq_empty(mvm->trans, txq_id);
2810		} else {
2811			if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2812				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2813			iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
2814		}
2815
2816		iwl_mvm_drain_sta(mvm, mvmsta, false);
2817
2818		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2819	}
2820
2821	return 0;
2822}
2823
2824static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2825{
2826	int i, max = -1, max_offs = -1;
2827
2828	lockdep_assert_held(&mvm->mutex);
2829
2830	/* Pick the unused key offset with the highest 'deleted'
2831	 * counter. Every time a key is deleted, all the counters
2832	 * are incremented and the one that was just deleted is
2833	 * reset to zero. Thus, the highest counter is the one
2834	 * that was deleted longest ago. Pick that one.
2835	 */
2836	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2837		if (test_bit(i, mvm->fw_key_table))
2838			continue;
2839		if (mvm->fw_key_deleted[i] > max) {
2840			max = mvm->fw_key_deleted[i];
2841			max_offs = i;
2842		}
2843	}
2844
2845	if (max_offs < 0)
2846		return STA_KEY_IDX_INVALID;
2847
2848	return max_offs;
2849}
2850
2851static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2852					       struct ieee80211_vif *vif,
2853					       struct ieee80211_sta *sta)
2854{
2855	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2856
2857	if (sta)
2858		return iwl_mvm_sta_from_mac80211(sta);
2859
2860	/*
2861	 * The device expects GTKs for station interfaces to be
2862	 * installed as GTKs for the AP station. If we have no
2863	 * station ID, then use AP's station ID.
2864	 */
2865	if (vif->type == NL80211_IFTYPE_STATION &&
2866	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
2867		u8 sta_id = mvmvif->ap_sta_id;
2868
2869		sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2870					    lockdep_is_held(&mvm->mutex));
2871
2872		/*
2873		 * It is possible that the 'sta' parameter is NULL,
2874		 * for example when a GTK is removed - the sta_id will then
2875		 * be the AP ID, and no station was passed by mac80211.
2876		 */
2877		if (IS_ERR_OR_NULL(sta))
2878			return NULL;
2879
2880		return iwl_mvm_sta_from_mac80211(sta);
2881	}
2882
2883	return NULL;
2884}
2885
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2886static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2887				u32 sta_id,
2888				struct ieee80211_key_conf *key, bool mcast,
2889				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2890				u8 key_offset)
2891{
2892	union {
2893		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2894		struct iwl_mvm_add_sta_key_cmd cmd;
2895	} u = {};
2896	__le16 key_flags;
2897	int ret;
2898	u32 status;
2899	u16 keyidx;
2900	u64 pn = 0;
2901	int i, size;
2902	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2903				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
 
 
2904
2905	if (sta_id == IWL_MVM_INVALID_STA)
2906		return -EINVAL;
2907
2908	keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
2909		 STA_KEY_FLG_KEYID_MSK;
2910	key_flags = cpu_to_le16(keyidx);
2911	key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2912
 
 
 
2913	switch (key->cipher) {
2914	case WLAN_CIPHER_SUITE_TKIP:
2915		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
2916		if (new_api) {
2917			memcpy((void *)&u.cmd.tx_mic_key,
2918			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2919			       IWL_MIC_KEY_SIZE);
2920
2921			memcpy((void *)&u.cmd.rx_mic_key,
2922			       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2923			       IWL_MIC_KEY_SIZE);
2924			pn = atomic64_read(&key->tx_pn);
2925
2926		} else {
2927			u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
2928			for (i = 0; i < 5; i++)
2929				u.cmd_v1.tkip_rx_ttak[i] =
2930					cpu_to_le16(tkip_p1k[i]);
2931		}
2932		memcpy(u.cmd.common.key, key->key, key->keylen);
2933		break;
2934	case WLAN_CIPHER_SUITE_CCMP:
2935		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
2936		memcpy(u.cmd.common.key, key->key, key->keylen);
2937		if (new_api)
2938			pn = atomic64_read(&key->tx_pn);
2939		break;
2940	case WLAN_CIPHER_SUITE_WEP104:
2941		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
2942		/* fall through */
2943	case WLAN_CIPHER_SUITE_WEP40:
2944		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2945		memcpy(u.cmd.common.key + 3, key->key, key->keylen);
2946		break;
2947	case WLAN_CIPHER_SUITE_GCMP_256:
2948		key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2949		/* fall through */
2950	case WLAN_CIPHER_SUITE_GCMP:
2951		key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
2952		memcpy(u.cmd.common.key, key->key, key->keylen);
2953		if (new_api)
2954			pn = atomic64_read(&key->tx_pn);
2955		break;
2956	default:
2957		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2958		memcpy(u.cmd.common.key, key->key, key->keylen);
2959	}
2960
2961	if (mcast)
2962		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
 
 
2963
2964	u.cmd.common.key_offset = key_offset;
2965	u.cmd.common.key_flags = key_flags;
2966	u.cmd.common.sta_id = sta_id;
2967
2968	if (new_api) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2969		u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
2970		size = sizeof(u.cmd);
2971	} else {
2972		size = sizeof(u.cmd_v1);
2973	}
2974
2975	status = ADD_STA_SUCCESS;
2976	if (cmd_flags & CMD_ASYNC)
2977		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
2978					   &u.cmd);
2979	else
2980		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
2981						  &u.cmd, &status);
2982
2983	switch (status) {
2984	case ADD_STA_SUCCESS:
2985		IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2986		break;
2987	default:
2988		ret = -EIO;
2989		IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2990		break;
2991	}
2992
2993	return ret;
2994}
2995
2996static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2997				 struct ieee80211_key_conf *keyconf,
2998				 u8 sta_id, bool remove_key)
2999{
3000	struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3001
3002	/* verify the key details match the required command's expectations */
3003	if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3004		    (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
 
3005		    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3006		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3007		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3008		return -EINVAL;
3009
3010	if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3011		    keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3012		return -EINVAL;
3013
3014	igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3015	igtk_cmd.sta_id = cpu_to_le32(sta_id);
3016
3017	if (remove_key) {
 
 
 
 
3018		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3019	} else {
3020		struct ieee80211_key_seq seq;
3021		const u8 *pn;
3022
3023		switch (keyconf->cipher) {
3024		case WLAN_CIPHER_SUITE_AES_CMAC:
3025			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3026			break;
3027		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3028		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3029			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3030			break;
3031		default:
3032			return -EINVAL;
3033		}
3034
3035		memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3036		if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3037			igtk_cmd.ctrl_flags |=
3038				cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3039		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3040		pn = seq.aes_cmac.pn;
3041		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3042						       ((u64) pn[4] << 8) |
3043						       ((u64) pn[3] << 16) |
3044						       ((u64) pn[2] << 24) |
3045						       ((u64) pn[1] << 32) |
3046						       ((u64) pn[0] << 40));
3047	}
3048
3049	IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3050		       remove_key ? "removing" : "installing",
3051		       igtk_cmd.sta_id);
 
3052
3053	if (!iwl_mvm_has_new_rx_api(mvm)) {
3054		struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3055			.ctrl_flags = igtk_cmd.ctrl_flags,
3056			.key_id = igtk_cmd.key_id,
3057			.sta_id = igtk_cmd.sta_id,
3058			.receive_seq_cnt = igtk_cmd.receive_seq_cnt
3059		};
3060
3061		memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3062		       ARRAY_SIZE(igtk_cmd_v1.igtk));
3063		return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3064					    sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3065	}
3066	return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3067				    sizeof(igtk_cmd), &igtk_cmd);
3068}
3069
3070
3071static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3072				       struct ieee80211_vif *vif,
3073				       struct ieee80211_sta *sta)
3074{
3075	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3076
3077	if (sta)
3078		return sta->addr;
3079
3080	if (vif->type == NL80211_IFTYPE_STATION &&
3081	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3082		u8 sta_id = mvmvif->ap_sta_id;
3083		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3084						lockdep_is_held(&mvm->mutex));
 
 
 
3085		return sta->addr;
3086	}
3087
3088
3089	return NULL;
3090}
3091
3092static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3093				 struct ieee80211_vif *vif,
3094				 struct ieee80211_sta *sta,
3095				 struct ieee80211_key_conf *keyconf,
3096				 u8 key_offset,
3097				 bool mcast)
3098{
3099	int ret;
3100	const u8 *addr;
3101	struct ieee80211_key_seq seq;
3102	u16 p1k[5];
3103	u32 sta_id;
 
3104
3105	if (sta) {
3106		struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3107
3108		sta_id = mvm_sta->sta_id;
 
3109	} else if (vif->type == NL80211_IFTYPE_AP &&
3110		   !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3111		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3112
3113		sta_id = mvmvif->mcast_sta.sta_id;
3114	} else {
3115		IWL_ERR(mvm, "Failed to find station id\n");
3116		return -EINVAL;
3117	}
3118
3119	switch (keyconf->cipher) {
3120	case WLAN_CIPHER_SUITE_TKIP:
3121		if (vif->type == NL80211_IFTYPE_AP) {
3122			ret = -EINVAL;
3123			break;
3124		}
3125		addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3126		/* get phase 1 key from mac80211 */
3127		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3128		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3129		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3130					   seq.tkip.iv32, p1k, 0, key_offset);
3131		break;
3132	case WLAN_CIPHER_SUITE_CCMP:
3133	case WLAN_CIPHER_SUITE_WEP40:
3134	case WLAN_CIPHER_SUITE_WEP104:
3135	case WLAN_CIPHER_SUITE_GCMP:
3136	case WLAN_CIPHER_SUITE_GCMP_256:
3137		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3138					   0, NULL, 0, key_offset);
3139		break;
3140	default:
3141		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3142					   0, NULL, 0, key_offset);
3143	}
3144
3145	return ret;
3146}
3147
3148static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3149				    struct ieee80211_key_conf *keyconf,
3150				    bool mcast)
3151{
3152	union {
3153		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3154		struct iwl_mvm_add_sta_key_cmd cmd;
3155	} u = {};
3156	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3157				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3158	__le16 key_flags;
3159	int ret, size;
3160	u32 status;
3161
3162	/* This is a valid situation for GTK removal */
3163	if (sta_id == IWL_MVM_INVALID_STA)
3164		return 0;
3165
3166	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3167				 STA_KEY_FLG_KEYID_MSK);
3168	key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3169	key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3170
3171	if (mcast)
3172		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3173
3174	/*
3175	 * The fields assigned here are in the same location at the start
3176	 * of the command, so we can do this union trick.
3177	 */
3178	u.cmd.common.key_flags = key_flags;
3179	u.cmd.common.key_offset = keyconf->hw_key_idx;
3180	u.cmd.common.sta_id = sta_id;
3181
3182	size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
3183
3184	status = ADD_STA_SUCCESS;
3185	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3186					  &status);
3187
3188	switch (status) {
3189	case ADD_STA_SUCCESS:
3190		IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3191		break;
3192	default:
3193		ret = -EIO;
3194		IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3195		break;
3196	}
3197
3198	return ret;
 
3199}
3200
3201int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3202			struct ieee80211_vif *vif,
3203			struct ieee80211_sta *sta,
3204			struct ieee80211_key_conf *keyconf,
3205			u8 key_offset)
3206{
3207	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3208	struct iwl_mvm_sta *mvm_sta;
3209	u8 sta_id = IWL_MVM_INVALID_STA;
3210	int ret;
3211	static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3212
3213	lockdep_assert_held(&mvm->mutex);
3214
3215	if (vif->type != NL80211_IFTYPE_AP ||
3216	    keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3217		/* Get the station id from the mvm local station table */
3218		mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3219		if (!mvm_sta) {
3220			IWL_ERR(mvm, "Failed to find station\n");
3221			return -EINVAL;
3222		}
3223		sta_id = mvm_sta->sta_id;
3224
3225		/*
3226		 * It is possible that the 'sta' parameter is NULL, and thus
3227		 * there is a need to retrieve the sta from the local station
3228		 * table.
3229		 */
3230		if (!sta) {
3231			sta = rcu_dereference_protected(
3232				mvm->fw_id_to_mac_id[sta_id],
3233				lockdep_is_held(&mvm->mutex));
3234			if (IS_ERR_OR_NULL(sta)) {
3235				IWL_ERR(mvm, "Invalid station id\n");
3236				return -EINVAL;
3237			}
3238		}
3239
3240		if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3241			return -EINVAL;
3242	} else {
3243		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3244
3245		sta_id = mvmvif->mcast_sta.sta_id;
3246	}
3247
3248	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3249	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3250	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3251		ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3252		goto end;
3253	}
3254
3255	/* If the key_offset is not pre-assigned, we need to find a
3256	 * new offset to use.  In normal cases, the offset is not
3257	 * pre-assigned, but during HW_RESTART we want to reuse the
3258	 * same indices, so we pass them when this function is called.
3259	 *
3260	 * In D3 entry, we need to hardcoded the indices (because the
3261	 * firmware hardcodes the PTK offset to 0).  In this case, we
3262	 * need to make sure we don't overwrite the hw_key_idx in the
3263	 * keyconf structure, because otherwise we cannot configure
3264	 * the original ones back when resuming.
3265	 */
3266	if (key_offset == STA_KEY_IDX_INVALID) {
3267		key_offset  = iwl_mvm_set_fw_key_idx(mvm);
3268		if (key_offset == STA_KEY_IDX_INVALID)
3269			return -ENOSPC;
3270		keyconf->hw_key_idx = key_offset;
3271	}
3272
3273	ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3274	if (ret)
3275		goto end;
3276
3277	/*
3278	 * For WEP, the same key is used for multicast and unicast. Upload it
3279	 * again, using the same key offset, and now pointing the other one
3280	 * to the same key slot (offset).
3281	 * If this fails, remove the original as well.
3282	 */
3283	if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3284	     keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3285	    sta) {
3286		ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3287					    key_offset, !mcast);
3288		if (ret) {
3289			__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3290			goto end;
3291		}
3292	}
3293
3294	__set_bit(key_offset, mvm->fw_key_table);
3295
3296end:
3297	IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3298		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3299		      sta ? sta->addr : zero_addr, ret);
3300	return ret;
3301}
3302
3303int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3304			   struct ieee80211_vif *vif,
3305			   struct ieee80211_sta *sta,
3306			   struct ieee80211_key_conf *keyconf)
3307{
3308	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3309	struct iwl_mvm_sta *mvm_sta;
3310	u8 sta_id = IWL_MVM_INVALID_STA;
3311	int ret, i;
3312
3313	lockdep_assert_held(&mvm->mutex);
3314
3315	/* Get the station from the mvm local station table */
3316	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3317	if (mvm_sta)
3318		sta_id = mvm_sta->sta_id;
3319	else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3320		sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3321
3322
3323	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3324		      keyconf->keyidx, sta_id);
3325
3326	if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3327			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3328			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3329		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3330
3331	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3332		IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3333			keyconf->hw_key_idx);
3334		return -ENOENT;
3335	}
3336
3337	/* track which key was deleted last */
3338	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3339		if (mvm->fw_key_deleted[i] < U8_MAX)
3340			mvm->fw_key_deleted[i]++;
3341	}
3342	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3343
3344	if (sta && !mvm_sta) {
3345		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3346		return 0;
3347	}
3348
3349	ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3350	if (ret)
3351		return ret;
3352
3353	/* delete WEP key twice to get rid of (now useless) offset */
3354	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3355	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3356		ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3357
3358	return ret;
3359}
3360
3361void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3362			     struct ieee80211_vif *vif,
3363			     struct ieee80211_key_conf *keyconf,
3364			     struct ieee80211_sta *sta, u32 iv32,
3365			     u16 *phase1key)
3366{
3367	struct iwl_mvm_sta *mvm_sta;
3368	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
 
3369
3370	rcu_read_lock();
3371
3372	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3373	if (WARN_ON_ONCE(!mvm_sta))
3374		goto unlock;
3375	iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3376			     iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
 
3377
3378 unlock:
3379	rcu_read_unlock();
3380}
3381
3382void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3383				struct ieee80211_sta *sta)
3384{
3385	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3386	struct iwl_mvm_add_sta_cmd cmd = {
3387		.add_modify = STA_MODE_MODIFY,
3388		.sta_id = mvmsta->sta_id,
3389		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
3390		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3391	};
3392	int ret;
3393
3394	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3395				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3396	if (ret)
3397		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3398}
3399
3400void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3401				       struct ieee80211_sta *sta,
3402				       enum ieee80211_frame_release_type reason,
3403				       u16 cnt, u16 tids, bool more_data,
3404				       bool single_sta_queue)
3405{
3406	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3407	struct iwl_mvm_add_sta_cmd cmd = {
3408		.add_modify = STA_MODE_MODIFY,
3409		.sta_id = mvmsta->sta_id,
3410		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3411		.sleep_tx_count = cpu_to_le16(cnt),
3412		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3413	};
3414	int tid, ret;
3415	unsigned long _tids = tids;
3416
3417	/* convert TIDs to ACs - we don't support TSPEC so that's OK
3418	 * Note that this field is reserved and unused by firmware not
3419	 * supporting GO uAPSD, so it's safe to always do this.
3420	 */
3421	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3422		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3423
3424	/* If we're releasing frames from aggregation or dqa queues then check
3425	 * if all the queues that we're releasing frames from, combined, have:
3426	 *  - more frames than the service period, in which case more_data
3427	 *    needs to be set
3428	 *  - fewer than 'cnt' frames, in which case we need to adjust the
3429	 *    firmware command (but do that unconditionally)
3430	 */
3431	if (single_sta_queue) {
3432		int remaining = cnt;
3433		int sleep_tx_count;
3434
3435		spin_lock_bh(&mvmsta->lock);
3436		for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3437			struct iwl_mvm_tid_data *tid_data;
3438			u16 n_queued;
3439
3440			tid_data = &mvmsta->tid_data[tid];
3441
3442			n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3443			if (n_queued > remaining) {
3444				more_data = true;
3445				remaining = 0;
3446				break;
3447			}
3448			remaining -= n_queued;
3449		}
3450		sleep_tx_count = cnt - remaining;
3451		if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3452			mvmsta->sleep_tx_count = sleep_tx_count;
3453		spin_unlock_bh(&mvmsta->lock);
3454
3455		cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3456		if (WARN_ON(cnt - remaining == 0)) {
3457			ieee80211_sta_eosp(sta);
3458			return;
3459		}
3460	}
3461
3462	/* Note: this is ignored by firmware not supporting GO uAPSD */
3463	if (more_data)
3464		cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3465
3466	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3467		mvmsta->next_status_eosp = true;
3468		cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3469	} else {
3470		cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3471	}
3472
3473	/* block the Tx queues until the FW updated the sleep Tx count */
3474	iwl_trans_block_txq_ptrs(mvm->trans, true);
3475
3476	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3477				   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3478				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3479	if (ret)
3480		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3481}
3482
3483void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3484			   struct iwl_rx_cmd_buffer *rxb)
3485{
3486	struct iwl_rx_packet *pkt = rxb_addr(rxb);
3487	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3488	struct ieee80211_sta *sta;
3489	u32 sta_id = le32_to_cpu(notif->sta_id);
3490
3491	if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3492		return;
3493
3494	rcu_read_lock();
3495	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3496	if (!IS_ERR_OR_NULL(sta))
3497		ieee80211_sta_eosp(sta);
3498	rcu_read_unlock();
3499}
3500
3501void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3502				   struct iwl_mvm_sta *mvmsta, bool disable)
 
3503{
3504	struct iwl_mvm_add_sta_cmd cmd = {
3505		.add_modify = STA_MODE_MODIFY,
3506		.sta_id = mvmsta->sta_id,
3507		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3508		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3509		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3510	};
3511	int ret;
3512
 
 
 
 
 
 
3513	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3514				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3515	if (ret)
3516		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3517}
3518
3519void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3520				      struct ieee80211_sta *sta,
3521				      bool disable)
3522{
3523	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3524
 
 
 
 
 
 
3525	spin_lock_bh(&mvm_sta->lock);
3526
3527	if (mvm_sta->disable_tx == disable) {
3528		spin_unlock_bh(&mvm_sta->lock);
3529		return;
3530	}
3531
3532	mvm_sta->disable_tx = disable;
3533
3534	/* Tell mac80211 to start/stop queuing tx for this station */
3535	ieee80211_sta_block_awake(mvm->hw, sta, disable);
 
 
 
 
3536
3537	iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3538
3539	spin_unlock_bh(&mvm_sta->lock);
3540}
3541
3542static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3543					      struct iwl_mvm_vif *mvmvif,
3544					      struct iwl_mvm_int_sta *sta,
3545					      bool disable)
3546{
3547	u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3548	struct iwl_mvm_add_sta_cmd cmd = {
3549		.add_modify = STA_MODE_MODIFY,
3550		.sta_id = sta->sta_id,
3551		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3552		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3553		.mac_id_n_color = cpu_to_le32(id),
3554	};
3555	int ret;
3556
3557	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3558				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3559	if (ret)
3560		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3561}
3562
3563void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3564				       struct iwl_mvm_vif *mvmvif,
3565				       bool disable)
3566{
3567	struct ieee80211_sta *sta;
3568	struct iwl_mvm_sta *mvm_sta;
3569	int i;
3570
3571	lockdep_assert_held(&mvm->mutex);
 
 
 
 
 
 
 
3572
3573	/* Block/unblock all the stations of the given mvmvif */
3574	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3575		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3576						lockdep_is_held(&mvm->mutex));
3577		if (IS_ERR_OR_NULL(sta))
3578			continue;
3579
3580		mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3581		if (mvm_sta->mac_id_n_color !=
3582		    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3583			continue;
3584
3585		iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3586	}
3587
 
 
3588	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3589		return;
3590
3591	/* Need to block/unblock also multicast station */
3592	if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3593		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3594						  &mvmvif->mcast_sta, disable);
 
3595
3596	/*
3597	 * Only unblock the broadcast station (FW blocks it for immediate
3598	 * quiet, not the driver)
3599	 */
3600	if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3601		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3602						  &mvmvif->bcast_sta, disable);
 
3603}
3604
3605void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3606{
3607	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3608	struct iwl_mvm_sta *mvmsta;
3609
3610	rcu_read_lock();
3611
3612	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3613
3614	if (!WARN_ON(!mvmsta))
3615		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3616
3617	rcu_read_unlock();
3618}
3619
3620u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3621{
3622	u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3623
3624	/*
3625	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3626	 * to align the wrap around of ssn so we compare relevant values.
3627	 */
3628	if (mvm->trans->cfg->gen2)
3629		sn &= 0xff;
3630
3631	return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3632}