Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
   4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
   5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
   6 */
   7#include <net/mac80211.h>
   8
   9#include "iwl-debug.h"
  10#include "iwl-io.h"
  11#include "iwl-prph.h"
  12#include "iwl-csr.h"
  13#include "mvm.h"
  14#include "fw/api/rs.h"
  15#include "fw/img.h"
  16
  17/*
  18 * Will return 0 even if the cmd failed when RFKILL is asserted unless
  19 * CMD_WANT_SKB is set in cmd->flags.
  20 */
  21int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
  22{
  23	int ret;
  24
  25#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
  26	if (WARN_ON(mvm->d3_test_active))
  27		return -EIO;
  28#endif
  29
  30	/*
  31	 * Synchronous commands from this op-mode must hold
  32	 * the mutex, this ensures we don't try to send two
  33	 * (or more) synchronous commands at a time.
  34	 */
  35	if (!(cmd->flags & CMD_ASYNC))
  36		lockdep_assert_held(&mvm->mutex);
  37
  38	ret = iwl_trans_send_cmd(mvm->trans, cmd);
  39
  40	/*
  41	 * If the caller wants the SKB, then don't hide any problems, the
  42	 * caller might access the response buffer which will be NULL if
  43	 * the command failed.
  44	 */
  45	if (cmd->flags & CMD_WANT_SKB)
  46		return ret;
  47
  48	/*
  49	 * Silently ignore failures if RFKILL is asserted or
  50	 * we are in suspend\resume process
  51	 */
  52	if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
  53		return 0;
  54	return ret;
  55}
  56
  57int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
  58			 u32 flags, u16 len, const void *data)
  59{
  60	struct iwl_host_cmd cmd = {
  61		.id = id,
  62		.len = { len, },
  63		.data = { data, },
  64		.flags = flags,
  65	};
  66
  67	return iwl_mvm_send_cmd(mvm, &cmd);
  68}
  69
  70/*
  71 * We assume that the caller set the status to the success value
  72 */
  73int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
  74			    u32 *status)
  75{
  76	struct iwl_rx_packet *pkt;
  77	struct iwl_cmd_response *resp;
  78	int ret, resp_len;
  79
  80	lockdep_assert_held(&mvm->mutex);
  81
  82#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
  83	if (WARN_ON(mvm->d3_test_active))
  84		return -EIO;
  85#endif
  86
  87	/*
  88	 * Only synchronous commands can wait for status,
  89	 * we use WANT_SKB so the caller can't.
  90	 */
  91	if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
  92		      "cmd flags %x", cmd->flags))
  93		return -EINVAL;
  94
  95	cmd->flags |= CMD_WANT_SKB;
  96
  97	ret = iwl_trans_send_cmd(mvm->trans, cmd);
  98	if (ret == -ERFKILL) {
  99		/*
 100		 * The command failed because of RFKILL, don't update
 101		 * the status, leave it as success and return 0.
 102		 */
 103		return 0;
 104	} else if (ret) {
 105		return ret;
 106	}
 107
 108	pkt = cmd->resp_pkt;
 109
 110	resp_len = iwl_rx_packet_payload_len(pkt);
 111	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
 112		ret = -EIO;
 113		goto out_free_resp;
 114	}
 115
 116	resp = (void *)pkt->data;
 117	*status = le32_to_cpu(resp->status);
 118 out_free_resp:
 119	iwl_free_resp(cmd);
 120	return ret;
 121}
 122
 123/*
 124 * We assume that the caller set the status to the sucess value
 125 */
 126int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
 127				const void *data, u32 *status)
 128{
 129	struct iwl_host_cmd cmd = {
 130		.id = id,
 131		.len = { len, },
 132		.data = { data, },
 133	};
 134
 135	return iwl_mvm_send_cmd_status(mvm, &cmd, status);
 136}
 137
 138int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
 139					  enum nl80211_band band)
 140{
 141	int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
 142	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
 143	bool is_LB = band == NL80211_BAND_2GHZ;
 144
 145	if (format == RATE_MCS_LEGACY_OFDM_MSK)
 146		return is_LB ? rate + IWL_FIRST_OFDM_RATE :
 147			rate;
 148
 149	/* CCK is not allowed in HB */
 150	return is_LB ? rate : -1;
 151}
 152
 153int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
 154					enum nl80211_band band)
 155{
 156	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
 157	int idx;
 158	int band_offset = 0;
 159
 160	/* Legacy rate format, search for match in table */
 161	if (band != NL80211_BAND_2GHZ)
 162		band_offset = IWL_FIRST_OFDM_RATE;
 163	for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
 164		if (iwl_fw_rate_idx_to_plcp(idx) == rate)
 165			return idx - band_offset;
 166
 167	return -1;
 168}
 169
 170u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
 171{
 172	if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
 173		/* In the new rate legacy rates are indexed:
 174		 * 0 - 3 for CCK and 0 - 7 for OFDM.
 175		 */
 176		return (rate_idx >= IWL_FIRST_OFDM_RATE ?
 177			rate_idx - IWL_FIRST_OFDM_RATE :
 178			rate_idx);
 179
 180	return iwl_fw_rate_idx_to_plcp(rate_idx);
 181}
 182
 183u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
 184{
 185	static const u8 mac80211_ac_to_ucode_ac[] = {
 186		AC_VO,
 187		AC_VI,
 188		AC_BE,
 189		AC_BK
 190	};
 191
 192	return mac80211_ac_to_ucode_ac[ac];
 193}
 194
 195void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 196{
 197	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 198	struct iwl_error_resp *err_resp = (void *)pkt->data;
 199
 200	IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
 201		le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
 202	IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
 203		le16_to_cpu(err_resp->bad_cmd_seq_num),
 204		le32_to_cpu(err_resp->error_service));
 205	IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
 206		le64_to_cpu(err_resp->timestamp));
 207}
 208
 209/*
 210 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
 211 * The parameter should also be a combination of ANT_[ABC].
 212 */
 213u8 first_antenna(u8 mask)
 214{
 215	BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
 216	if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
 217		return BIT(0);
 218	return BIT(ffs(mask) - 1);
 219}
 220
 221#define MAX_ANT_NUM 2
 222/*
 223 * Toggles between TX antennas to send the probe request on.
 224 * Receives the bitmask of valid TX antennas and the *index* used
 225 * for the last TX, and returns the next valid *index* to use.
 226 * In order to set it in the tx_cmd, must do BIT(idx).
 227 */
 228u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
 229{
 230	u8 ind = last_idx;
 231	int i;
 232
 233	for (i = 0; i < MAX_ANT_NUM; i++) {
 234		ind = (ind + 1) % MAX_ANT_NUM;
 235		if (valid & BIT(ind))
 236			return ind;
 237	}
 238
 239	WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
 240	return last_idx;
 241}
 242
 243/**
 244 * iwl_mvm_send_lq_cmd() - Send link quality command
 245 * @mvm: Driver data.
 246 * @lq: Link quality command to send.
 247 *
 248 * The link quality command is sent as the last step of station creation.
 249 * This is the special case in which init is set and we call a callback in
 250 * this case to clear the state indicating that station creation is in
 251 * progress.
 252 *
 253 * Returns: an error code indicating success or failure
 254 */
 255int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
 256{
 257	struct iwl_host_cmd cmd = {
 258		.id = LQ_CMD,
 259		.len = { sizeof(struct iwl_lq_cmd), },
 260		.flags = CMD_ASYNC,
 261		.data = { lq, },
 262	};
 263
 264	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
 265		    iwl_mvm_has_tlc_offload(mvm)))
 266		return -EINVAL;
 267
 268	return iwl_mvm_send_cmd(mvm, &cmd);
 269}
 270
 271/**
 272 * iwl_mvm_update_smps - Get a request to change the SMPS mode
 273 * @mvm: Driver data.
 274 * @vif: Pointer to the ieee80211_vif structure
 275 * @req_type: The part of the driver who call for a change.
 276 * @smps_request: The request to change the SMPS mode.
 277 * @link_id: for MLO link_id, otherwise 0 (deflink)
 278 *
 279 * Get a requst to change the SMPS mode,
 280 * and change it according to all other requests in the driver.
 281 */
 282void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 283			 enum iwl_mvm_smps_type_request req_type,
 284			 enum ieee80211_smps_mode smps_request,
 285			 unsigned int link_id)
 286{
 287	struct iwl_mvm_vif *mvmvif;
 288	enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
 289	int i;
 290
 291	lockdep_assert_held(&mvm->mutex);
 292
 293	/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
 294	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
 295		return;
 296
 297	if (vif->type != NL80211_IFTYPE_STATION)
 298		return;
 299
 300	mvmvif = iwl_mvm_vif_from_mac80211(vif);
 301
 302	if (WARN_ON_ONCE(!mvmvif->link[link_id]))
 303		return;
 304
 305	mvmvif->link[link_id]->smps_requests[req_type] = smps_request;
 306	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
 307		if (mvmvif->link[link_id]->smps_requests[i] ==
 308		    IEEE80211_SMPS_STATIC) {
 309			smps_mode = IEEE80211_SMPS_STATIC;
 310			break;
 311		}
 312		if (mvmvif->link[link_id]->smps_requests[i] ==
 313		    IEEE80211_SMPS_DYNAMIC)
 314			smps_mode = IEEE80211_SMPS_DYNAMIC;
 315	}
 316
 317	/* SMPS is disabled in eSR */
 318	if (mvmvif->esr_active)
 319		smps_mode = IEEE80211_SMPS_OFF;
 320
 321	ieee80211_request_smps(vif, link_id, smps_mode);
 322}
 323
 324void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm,
 325					 struct ieee80211_vif *vif,
 326					 enum iwl_mvm_smps_type_request req_type,
 327					 enum ieee80211_smps_mode smps_request)
 328{
 329	struct ieee80211_bss_conf *link_conf;
 330	unsigned int link_id;
 331
 332	rcu_read_lock();
 333	for_each_vif_active_link(vif, link_conf, link_id)
 334		iwl_mvm_update_smps(mvm, vif, req_type, smps_request,
 335				    link_id);
 336	rcu_read_unlock();
 337}
 338
 339static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
 340				    struct iwl_rx_packet *pkt, void *data)
 341{
 342	WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
 343
 344	return true;
 345}
 346
 347static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
 348					     u8 cmd_ver)
 349{
 350	struct iwl_system_statistics_cmd system_cmd = {
 351		.cfg_mask = clear ?
 352			    cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) :
 353			    cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK |
 354					IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK),
 355		.type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER |
 356					    IWL_STATS_NTFY_TYPE_ID_OPER_PART1),
 357	};
 358	struct iwl_host_cmd cmd = {
 359		.id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD),
 360		.len[0] = sizeof(system_cmd),
 361		.data[0] = &system_cmd,
 362	};
 363	struct iwl_notification_wait stats_wait;
 364	static const u16 stats_complete[] = {
 365		WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
 366	};
 367	int ret;
 368
 369	if (cmd_ver != 1) {
 370		IWL_FW_CHECK_FAILED(mvm,
 371				    "Invalid system statistics command version:%d\n",
 372				    cmd_ver);
 373		return -EOPNOTSUPP;
 374	}
 375
 376	iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
 377				   stats_complete, ARRAY_SIZE(stats_complete),
 378				   NULL, NULL);
 379
 380	mvm->statistics_clear = clear;
 381	ret = iwl_mvm_send_cmd(mvm, &cmd);
 382	if (ret) {
 383		iwl_remove_notification(&mvm->notif_wait, &stats_wait);
 384		return ret;
 385	}
 386
 387	/* 500ms for OPERATIONAL, PART1 and END notification should be enough
 388	 * for FW to collect data from all LMACs and send
 389	 * STATISTICS_NOTIFICATION to host
 390	 */
 391	ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2);
 392	if (ret)
 393		return ret;
 394
 395	if (clear)
 396		iwl_mvm_accu_radio_stats(mvm);
 397
 398	return ret;
 399}
 400
 401int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
 402{
 403	struct iwl_statistics_cmd scmd = {
 404		.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
 405	};
 406
 407	struct iwl_host_cmd cmd = {
 408		.id = STATISTICS_CMD,
 409		.len[0] = sizeof(scmd),
 410		.data[0] = &scmd,
 411	};
 412	u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
 413					   WIDE_ID(SYSTEM_GROUP,
 414						   SYSTEM_STATISTICS_CMD),
 415					   IWL_FW_CMD_VER_UNKNOWN);
 416	int ret;
 417
 418	if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
 419		return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
 420
 421	/* From version 15 - STATISTICS_NOTIFICATION, the reply for
 422	 * STATISTICS_CMD is empty, and the response is with
 423	 * STATISTICS_NOTIFICATION notification
 424	 */
 425	if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
 426				    STATISTICS_NOTIFICATION, 0) < 15) {
 427		cmd.flags = CMD_WANT_SKB;
 428
 429		ret = iwl_mvm_send_cmd(mvm, &cmd);
 430		if (ret)
 431			return ret;
 432
 433		iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
 434		iwl_free_resp(&cmd);
 435	} else {
 436		struct iwl_notification_wait stats_wait;
 437		static const u16 stats_complete[] = {
 438			STATISTICS_NOTIFICATION,
 439		};
 440
 441		iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
 442					   stats_complete, ARRAY_SIZE(stats_complete),
 443					   iwl_wait_stats_complete, NULL);
 444
 445		ret = iwl_mvm_send_cmd(mvm, &cmd);
 446		if (ret) {
 447			iwl_remove_notification(&mvm->notif_wait, &stats_wait);
 448			return ret;
 449		}
 450
 451		/* 200ms should be enough for FW to collect data from all
 452		 * LMACs and send STATISTICS_NOTIFICATION to host
 453		 */
 454		ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
 455		if (ret)
 456			return ret;
 457	}
 458
 459	if (clear)
 460		iwl_mvm_accu_radio_stats(mvm);
 461
 462	return 0;
 463}
 464
 465void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
 466{
 467	mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
 468	mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
 469	mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
 470	mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
 471}
 472
 473struct iwl_mvm_diversity_iter_data {
 474	struct iwl_mvm_phy_ctxt *ctxt;
 475	bool result;
 476};
 477
 478static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
 479				   struct ieee80211_vif *vif)
 480{
 481	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 482	struct iwl_mvm_diversity_iter_data *data = _data;
 483	int i, link_id;
 484
 485	for_each_mvm_vif_valid_link(mvmvif, link_id) {
 486		struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
 487
 488		if (link_info->phy_ctxt != data->ctxt)
 489			continue;
 490
 491		for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
 492			if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC ||
 493			    link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
 494				data->result = false;
 495				break;
 496			}
 497		}
 498	}
 499}
 500
 501bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
 502				  struct iwl_mvm_phy_ctxt *ctxt)
 503{
 504	struct iwl_mvm_diversity_iter_data data = {
 505		.ctxt = ctxt,
 506		.result = true,
 507	};
 508
 509	lockdep_assert_held(&mvm->mutex);
 510
 511	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
 512		return false;
 513
 514	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
 515		return false;
 516
 517	if (mvm->cfg->rx_with_siso_diversity)
 518		return false;
 519
 520	ieee80211_iterate_active_interfaces_atomic(
 521			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 522			iwl_mvm_diversity_iter, &data);
 523
 524	return data.result;
 525}
 526
 527void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
 528				  bool low_latency, u16 mac_id)
 529{
 530	struct iwl_mac_low_latency_cmd cmd = {
 531		.mac_id = cpu_to_le32(mac_id)
 532	};
 533
 534	if (!fw_has_capa(&mvm->fw->ucode_capa,
 535			 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
 536		return;
 537
 538	if (low_latency) {
 539		/* currently we don't care about the direction */
 540		cmd.low_latency_rx = 1;
 541		cmd.low_latency_tx = 1;
 542	}
 543
 544	if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
 545				 0, sizeof(cmd), &cmd))
 546		IWL_ERR(mvm, "Failed to send low latency command\n");
 547}
 548
 549int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 550			       bool low_latency,
 551			       enum iwl_mvm_low_latency_cause cause)
 552{
 553	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 554	int res;
 555	bool prev;
 556
 557	lockdep_assert_held(&mvm->mutex);
 558
 559	prev = iwl_mvm_vif_low_latency(mvmvif);
 560	iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
 561
 562	low_latency = iwl_mvm_vif_low_latency(mvmvif);
 563
 564	if (low_latency == prev)
 565		return 0;
 566
 567	iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
 568
 569	res = iwl_mvm_update_quotas(mvm, false, NULL);
 570	if (res)
 571		return res;
 572
 573	iwl_mvm_bt_coex_vif_change(mvm);
 574
 575	return iwl_mvm_power_update_mac(mvm);
 576}
 577
 578struct iwl_mvm_low_latency_iter {
 579	bool result;
 580	bool result_per_band[NUM_NL80211_BANDS];
 581};
 582
 583static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
 584{
 585	struct iwl_mvm_low_latency_iter *result = _data;
 586	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 587	enum nl80211_band band;
 588
 589	if (iwl_mvm_vif_low_latency(mvmvif)) {
 590		result->result = true;
 591
 592		if (!mvmvif->deflink.phy_ctxt)
 593			return;
 594
 595		band = mvmvif->deflink.phy_ctxt->channel->band;
 596		result->result_per_band[band] = true;
 597	}
 598}
 599
 600bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
 601{
 602	struct iwl_mvm_low_latency_iter data = {};
 603
 604	ieee80211_iterate_active_interfaces_atomic(
 605			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 606			iwl_mvm_ll_iter, &data);
 607
 608	return data.result;
 609}
 610
 611bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
 612{
 613	struct iwl_mvm_low_latency_iter data = {};
 614
 615	ieee80211_iterate_active_interfaces_atomic(
 616			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 617			iwl_mvm_ll_iter, &data);
 618
 619	return data.result_per_band[band];
 620}
 621
 622struct iwl_bss_iter_data {
 623	struct ieee80211_vif *vif;
 624	bool error;
 625};
 626
 627static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
 628				       struct ieee80211_vif *vif)
 629{
 630	struct iwl_bss_iter_data *data = _data;
 631
 632	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
 633		return;
 634
 635	if (data->vif) {
 636		data->error = true;
 637		return;
 638	}
 639
 640	data->vif = vif;
 641}
 642
 643struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
 644{
 645	struct iwl_bss_iter_data bss_iter_data = {};
 646
 647	ieee80211_iterate_active_interfaces_atomic(
 648		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 649		iwl_mvm_bss_iface_iterator, &bss_iter_data);
 650
 651	if (bss_iter_data.error) {
 652		IWL_ERR(mvm, "More than one managed interface active!\n");
 653		return ERR_PTR(-EINVAL);
 654	}
 655
 656	return bss_iter_data.vif;
 657}
 658
 659struct iwl_bss_find_iter_data {
 660	struct ieee80211_vif *vif;
 661	u32 macid;
 662};
 663
 664static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
 665					    struct ieee80211_vif *vif)
 666{
 667	struct iwl_bss_find_iter_data *data = _data;
 668	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 669
 670	if (mvmvif->id == data->macid)
 671		data->vif = vif;
 672}
 673
 674struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
 675{
 676	struct iwl_bss_find_iter_data data = {
 677		.macid = macid,
 678	};
 679
 680	lockdep_assert_held(&mvm->mutex);
 681
 682	ieee80211_iterate_active_interfaces_atomic(
 683		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 684		iwl_mvm_bss_find_iface_iterator, &data);
 685
 686	return data.vif;
 687}
 688
 689struct iwl_sta_iter_data {
 690	bool assoc;
 691};
 692
 693static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
 694				       struct ieee80211_vif *vif)
 695{
 696	struct iwl_sta_iter_data *data = _data;
 697
 698	if (vif->type != NL80211_IFTYPE_STATION)
 699		return;
 700
 701	if (vif->cfg.assoc)
 702		data->assoc = true;
 703}
 704
 705bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
 706{
 707	struct iwl_sta_iter_data data = {
 708		.assoc = false,
 709	};
 710
 711	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
 712						   IEEE80211_IFACE_ITER_NORMAL,
 713						   iwl_mvm_sta_iface_iterator,
 714						   &data);
 715	return data.assoc;
 716}
 717
 718unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
 719				    struct ieee80211_vif *vif,
 720				    bool tdls, bool cmd_q)
 721{
 722	struct iwl_fw_dbg_trigger_tlv *trigger;
 723	struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
 724	unsigned int default_timeout = cmd_q ?
 725		IWL_DEF_WD_TIMEOUT :
 726		mvm->trans->trans_cfg->base_params->wd_timeout;
 727
 728	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
 729		/*
 730		 * We can't know when the station is asleep or awake, so we
 731		 * must disable the queue hang detection.
 732		 */
 733		if (fw_has_capa(&mvm->fw->ucode_capa,
 734				IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
 735		    vif && vif->type == NL80211_IFTYPE_AP)
 736			return IWL_WATCHDOG_DISABLED;
 737		return default_timeout;
 738	}
 739
 740	trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
 741	txq_timer = (void *)trigger->data;
 742
 743	if (tdls)
 744		return le32_to_cpu(txq_timer->tdls);
 745
 746	if (cmd_q)
 747		return le32_to_cpu(txq_timer->command_queue);
 748
 749	if (WARN_ON(!vif))
 750		return default_timeout;
 751
 752	switch (ieee80211_vif_type_p2p(vif)) {
 753	case NL80211_IFTYPE_ADHOC:
 754		return le32_to_cpu(txq_timer->ibss);
 755	case NL80211_IFTYPE_STATION:
 756		return le32_to_cpu(txq_timer->bss);
 757	case NL80211_IFTYPE_AP:
 758		return le32_to_cpu(txq_timer->softap);
 759	case NL80211_IFTYPE_P2P_CLIENT:
 760		return le32_to_cpu(txq_timer->p2p_client);
 761	case NL80211_IFTYPE_P2P_GO:
 762		return le32_to_cpu(txq_timer->p2p_go);
 763	case NL80211_IFTYPE_P2P_DEVICE:
 764		return le32_to_cpu(txq_timer->p2p_device);
 765	case NL80211_IFTYPE_MONITOR:
 766		return default_timeout;
 767	default:
 768		WARN_ON(1);
 769		return mvm->trans->trans_cfg->base_params->wd_timeout;
 770	}
 771}
 772
 773void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 774			     const char *errmsg)
 775{
 776	struct iwl_fw_dbg_trigger_tlv *trig;
 777	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
 778
 779	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
 780				     FW_DBG_TRIGGER_MLME);
 781	if (!trig)
 782		goto out;
 783
 784	trig_mlme = (void *)trig->data;
 785
 786	if (trig_mlme->stop_connection_loss &&
 787	    --trig_mlme->stop_connection_loss)
 788		goto out;
 789
 790	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
 791
 792out:
 793	ieee80211_connection_loss(vif);
 794}
 795
 796void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
 797					  struct ieee80211_vif *vif,
 798					  const struct ieee80211_sta *sta,
 799					  u16 tid)
 800{
 801	struct iwl_fw_dbg_trigger_tlv *trig;
 802	struct iwl_fw_dbg_trigger_ba *ba_trig;
 803
 804	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
 805				     FW_DBG_TRIGGER_BA);
 806	if (!trig)
 807		return;
 808
 809	ba_trig = (void *)trig->data;
 810
 811	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
 812		return;
 813
 814	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 815				"Frame from %pM timed out, tid %d",
 816				sta->addr, tid);
 817}
 818
 819u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
 820{
 821	if (!elapsed)
 822		return 0;
 823
 824	return (100 * airtime / elapsed) / USEC_PER_MSEC;
 825}
 826
 827static enum iwl_mvm_traffic_load
 828iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
 829{
 830	u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
 831
 832	if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
 833		return IWL_MVM_TRAFFIC_HIGH;
 834	if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
 835		return IWL_MVM_TRAFFIC_MEDIUM;
 836
 837	return IWL_MVM_TRAFFIC_LOW;
 838}
 839
 840static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
 841{
 842	struct iwl_mvm *mvm = _data;
 843	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 844	bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
 845
 846	if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
 847		return;
 848
 849	low_latency = mvm->tcm.result.low_latency[mvmvif->id];
 850
 851	if (!mvm->tcm.result.change[mvmvif->id] &&
 852	    prev == low_latency) {
 853		iwl_mvm_update_quotas(mvm, false, NULL);
 854		return;
 855	}
 856
 857	if (prev != low_latency) {
 858		/* this sends traffic load and updates quota as well */
 859		iwl_mvm_update_low_latency(mvm, vif, low_latency,
 860					   LOW_LATENCY_TRAFFIC);
 861	} else {
 862		iwl_mvm_update_quotas(mvm, false, NULL);
 863	}
 864}
 865
 866static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
 867{
 868	mutex_lock(&mvm->mutex);
 869
 870	ieee80211_iterate_active_interfaces(
 871		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 872		iwl_mvm_tcm_iter, mvm);
 873
 874	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
 875		iwl_mvm_config_scan(mvm);
 876
 877	mutex_unlock(&mvm->mutex);
 878}
 879
 880static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
 881{
 882	struct iwl_mvm *mvm;
 883	struct iwl_mvm_vif *mvmvif;
 884	struct ieee80211_vif *vif;
 885
 886	mvmvif = container_of(wk, struct iwl_mvm_vif,
 887			      uapsd_nonagg_detected_wk.work);
 888	vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
 889	mvm = mvmvif->mvm;
 890
 891	if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
 892		return;
 893
 894	/* remember that this AP is broken */
 895	memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
 896	       vif->bss_conf.bssid, ETH_ALEN);
 897	mvm->uapsd_noagg_bssid_write_idx++;
 898	if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
 899		mvm->uapsd_noagg_bssid_write_idx = 0;
 900
 901	iwl_mvm_connection_loss(mvm, vif,
 902				"AP isn't using AMPDU with uAPSD enabled");
 903}
 904
 905static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
 906					 struct ieee80211_vif *vif)
 907{
 908	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 909
 910	if (vif->type != NL80211_IFTYPE_STATION)
 911		return;
 912
 913	if (!vif->cfg.assoc)
 914		return;
 915
 916	if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd &&
 917	    !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd &&
 918	    !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd &&
 919	    !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd)
 920		return;
 921
 922	if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
 923		return;
 924
 925	mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
 926	IWL_INFO(mvm,
 927		 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
 928	schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk,
 929			      15 * HZ);
 930}
 931
 932static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
 933						 unsigned int elapsed,
 934						 int mac)
 935{
 936	u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
 937	u64 tpt;
 938	unsigned long rate;
 939	struct ieee80211_vif *vif;
 940
 941	rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
 942
 943	if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
 944	    mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
 945		return;
 946
 947	if (iwl_mvm_has_new_rx_api(mvm)) {
 948		tpt = 8 * bytes; /* kbps */
 949		do_div(tpt, elapsed);
 950		rate *= 1000; /* kbps */
 951		if (tpt < 22 * rate / 100)
 952			return;
 953	} else {
 954		/*
 955		 * the rate here is actually the threshold, in 100Kbps units,
 956		 * so do the needed conversion from bytes to 100Kbps:
 957		 * 100kb = bits / (100 * 1000),
 958		 * 100kbps = 100kb / (msecs / 1000) ==
 959		 *           (bits / (100 * 1000)) / (msecs / 1000) ==
 960		 *           bits / (100 * msecs)
 961		 */
 962		tpt = (8 * bytes);
 963		do_div(tpt, elapsed * 100);
 964		if (tpt < rate)
 965			return;
 966	}
 967
 968	rcu_read_lock();
 969	vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
 970	if (vif)
 971		iwl_mvm_uapsd_agg_disconnect(mvm, vif);
 972	rcu_read_unlock();
 973}
 974
 975static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
 976				 struct ieee80211_vif *vif)
 977{
 978	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 979	u32 *band = _data;
 980
 981	if (!mvmvif->deflink.phy_ctxt)
 982		return;
 983
 984	band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band;
 985}
 986
 987static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
 988					    unsigned long ts,
 989					    bool handle_uapsd)
 990{
 991	unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
 992	unsigned int uapsd_elapsed =
 993		jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
 994	u32 total_airtime = 0;
 995	u32 band_airtime[NUM_NL80211_BANDS] = {0};
 996	u32 band[NUM_MAC_INDEX_DRIVER] = {0};
 997	int ac, mac, i;
 998	bool low_latency = false;
 999	enum iwl_mvm_traffic_load load, band_load;
1000	bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
1001
1002	if (handle_ll)
1003		mvm->tcm.ll_ts = ts;
1004	if (handle_uapsd)
1005		mvm->tcm.uapsd_nonagg_ts = ts;
1006
1007	mvm->tcm.result.elapsed = elapsed;
1008
1009	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1010						   IEEE80211_IFACE_ITER_NORMAL,
1011						   iwl_mvm_tcm_iterator,
1012						   &band);
1013
1014	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1015		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1016		u32 vo_vi_pkts = 0;
1017		u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
1018
1019		total_airtime += airtime;
1020		band_airtime[band[mac]] += airtime;
1021
1022		load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
1023		mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
1024		mvm->tcm.result.load[mac] = load;
1025		mvm->tcm.result.airtime[mac] = airtime;
1026
1027		for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
1028			vo_vi_pkts += mdata->rx.pkts[ac] +
1029				      mdata->tx.pkts[ac];
1030
1031		/* enable immediately with enough packets but defer disabling */
1032		if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
1033			mvm->tcm.result.low_latency[mac] = true;
1034		else if (handle_ll)
1035			mvm->tcm.result.low_latency[mac] = false;
1036
1037		if (handle_ll) {
1038			/* clear old data */
1039			memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1040			memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1041		}
1042		low_latency |= mvm->tcm.result.low_latency[mac];
1043
1044		if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
1045			iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
1046							     mac);
1047		/* clear old data */
1048		if (handle_uapsd)
1049			mdata->uapsd_nonagg_detect.rx_bytes = 0;
1050		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1051		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1052	}
1053
1054	load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
1055	mvm->tcm.result.global_load = load;
1056
1057	for (i = 0; i < NUM_NL80211_BANDS; i++) {
1058		band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
1059		mvm->tcm.result.band_load[i] = band_load;
1060	}
1061
1062	/*
1063	 * If the current load isn't low we need to force re-evaluation
1064	 * in the TCM period, so that we can return to low load if there
1065	 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
1066	 * triggered by traffic).
1067	 */
1068	if (load != IWL_MVM_TRAFFIC_LOW)
1069		return MVM_TCM_PERIOD;
1070	/*
1071	 * If low-latency is active we need to force re-evaluation after
1072	 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
1073	 * when there's no traffic at all.
1074	 */
1075	if (low_latency)
1076		return MVM_LL_PERIOD;
1077	/*
1078	 * Otherwise, we don't need to run the work struct because we're
1079	 * in the default "idle" state - traffic indication is low (which
1080	 * also covers the "no traffic" case) and low-latency is disabled
1081	 * so there's no state that may need to be disabled when there's
1082	 * no traffic at all.
1083	 *
1084	 * Note that this has no impact on the regular scheduling of the
1085	 * updates triggered by traffic - those happen whenever one of the
1086	 * two timeouts expire (if there's traffic at all.)
1087	 */
1088	return 0;
1089}
1090
1091void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1092{
1093	unsigned long ts = jiffies;
1094	bool handle_uapsd =
1095		time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1096			       msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1097
1098	spin_lock(&mvm->tcm.lock);
1099	if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1100		spin_unlock(&mvm->tcm.lock);
1101		return;
1102	}
1103	spin_unlock(&mvm->tcm.lock);
1104
1105	if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1106		mutex_lock(&mvm->mutex);
1107		if (iwl_mvm_request_statistics(mvm, true))
1108			handle_uapsd = false;
1109		mutex_unlock(&mvm->mutex);
1110	}
1111
1112	spin_lock(&mvm->tcm.lock);
1113	/* re-check if somebody else won the recheck race */
1114	if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1115		/* calculate statistics */
1116		unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1117								  handle_uapsd);
1118
1119		/* the memset needs to be visible before the timestamp */
1120		smp_mb();
1121		mvm->tcm.ts = ts;
1122		if (work_delay)
1123			schedule_delayed_work(&mvm->tcm.work, work_delay);
1124	}
1125	spin_unlock(&mvm->tcm.lock);
1126
1127	iwl_mvm_tcm_results(mvm);
1128}
1129
1130void iwl_mvm_tcm_work(struct work_struct *work)
1131{
1132	struct delayed_work *delayed_work = to_delayed_work(work);
1133	struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1134					   tcm.work);
1135
1136	iwl_mvm_recalc_tcm(mvm);
1137}
1138
1139void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1140{
1141	spin_lock_bh(&mvm->tcm.lock);
1142	mvm->tcm.paused = true;
1143	spin_unlock_bh(&mvm->tcm.lock);
1144	if (with_cancel)
1145		cancel_delayed_work_sync(&mvm->tcm.work);
1146}
1147
1148void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1149{
1150	int mac;
1151	bool low_latency = false;
1152
1153	spin_lock_bh(&mvm->tcm.lock);
1154	mvm->tcm.ts = jiffies;
1155	mvm->tcm.ll_ts = jiffies;
1156	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1157		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1158
1159		memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1160		memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1161		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1162		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1163
1164		if (mvm->tcm.result.low_latency[mac])
1165			low_latency = true;
1166	}
1167	/* The TCM data needs to be reset before "paused" flag changes */
1168	smp_mb();
1169	mvm->tcm.paused = false;
1170
1171	/*
1172	 * if the current load is not low or low latency is active, force
1173	 * re-evaluation to cover the case of no traffic.
1174	 */
1175	if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1176		schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1177	else if (low_latency)
1178		schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1179
1180	spin_unlock_bh(&mvm->tcm.lock);
1181}
1182
1183void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1184{
1185	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1186
1187	INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1188			  iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1189}
1190
1191void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1192{
1193	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1194
1195	cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1196}
1197
1198u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1199{
1200	u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1201
1202	if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1203	    mvm->trans->cfg->gp2_reg_addr)
1204		reg_addr = mvm->trans->cfg->gp2_reg_addr;
1205
1206	return iwl_read_prph(mvm->trans, reg_addr);
1207}
1208
1209void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
1210			   u32 *gp2, u64 *boottime, ktime_t *realtime)
1211{
1212	bool ps_disabled;
1213
1214	lockdep_assert_held(&mvm->mutex);
1215
1216	/* Disable power save when reading GP2 */
1217	ps_disabled = mvm->ps_disabled;
1218	if (!ps_disabled) {
1219		mvm->ps_disabled = true;
1220		iwl_mvm_power_update_device(mvm);
1221	}
1222
1223	*gp2 = iwl_mvm_get_systime(mvm);
1224
1225	if (clock_type == CLOCK_BOOTTIME && boottime)
1226		*boottime = ktime_get_boottime_ns();
1227	else if (clock_type == CLOCK_REALTIME && realtime)
1228		*realtime = ktime_get_real();
1229
1230	if (!ps_disabled) {
1231		mvm->ps_disabled = ps_disabled;
1232		iwl_mvm_power_update_device(mvm);
1233	}
1234}
1235
1236/* Find if at least two links from different vifs use same channel
1237 * FIXME: consider having a refcount array in struct iwl_mvm_vif for
1238 * used phy_ctxt ids.
1239 */
1240bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1,
1241				     struct iwl_mvm_vif *vif2)
1242{
1243	unsigned int i, j;
1244
1245	for_each_mvm_vif_valid_link(vif1, i) {
1246		for_each_mvm_vif_valid_link(vif2, j) {
1247			if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt)
1248				return true;
1249		}
1250	}
1251
1252	return false;
1253}
1254
1255bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif)
1256{
1257	unsigned int i;
1258
1259	/* FIXME: can it fail when phy_ctxt is assigned? */
1260	for_each_mvm_vif_valid_link(mvmvif, i) {
1261		if (mvmvif->link[i]->phy_ctxt &&
1262		    mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX)
1263			return true;
1264	}
1265
1266	return false;
1267}