Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include "mac.h"
  19
  20#include <net/mac80211.h>
  21#include <linux/etherdevice.h>
  22#include <linux/acpi.h>
  23
  24#include "hif.h"
  25#include "core.h"
  26#include "debug.h"
  27#include "wmi.h"
  28#include "htt.h"
  29#include "txrx.h"
  30#include "testmode.h"
  31#include "wmi.h"
  32#include "wmi-tlv.h"
  33#include "wmi-ops.h"
  34#include "wow.h"
  35
  36/*********/
  37/* Rates */
  38/*********/
  39
  40static struct ieee80211_rate ath10k_rates[] = {
  41	{ .bitrate = 10,
  42	  .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
  43	{ .bitrate = 20,
  44	  .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
  45	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
  46	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
  47	{ .bitrate = 55,
  48	  .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
  49	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
  50	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
  51	{ .bitrate = 110,
  52	  .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
  53	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
  54	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
  55
  56	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
  57	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
  58	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
  59	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
  60	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
  61	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
  62	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
  63	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
  64};
  65
  66static struct ieee80211_rate ath10k_rates_rev2[] = {
  67	{ .bitrate = 10,
  68	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
  69	{ .bitrate = 20,
  70	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
  71	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
  72	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
  73	{ .bitrate = 55,
  74	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
  75	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
  76	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
  77	{ .bitrate = 110,
  78	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
  79	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
  80	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
  81
  82	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
  83	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
  84	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
  85	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
  86	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
  87	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
  88	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
  89	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
  90};
  91
  92#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
  93
  94#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
  95#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
  96			     ATH10K_MAC_FIRST_OFDM_RATE_IDX)
  97#define ath10k_g_rates (ath10k_rates + 0)
  98#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
  99
 100#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
 101#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
 102
 103static bool ath10k_mac_bitrate_is_cck(int bitrate)
 104{
 105	switch (bitrate) {
 106	case 10:
 107	case 20:
 108	case 55:
 109	case 110:
 110		return true;
 111	}
 112
 113	return false;
 114}
 115
 116static u8 ath10k_mac_bitrate_to_rate(int bitrate)
 117{
 118	return DIV_ROUND_UP(bitrate, 5) |
 119	       (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
 120}
 121
 122u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
 123			     u8 hw_rate, bool cck)
 124{
 125	const struct ieee80211_rate *rate;
 126	int i;
 127
 128	for (i = 0; i < sband->n_bitrates; i++) {
 129		rate = &sband->bitrates[i];
 130
 131		if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
 132			continue;
 133
 134		if (rate->hw_value == hw_rate)
 135			return i;
 136		else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
 137			 rate->hw_value_short == hw_rate)
 138			return i;
 139	}
 140
 141	return 0;
 142}
 143
 144u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
 145			     u32 bitrate)
 146{
 147	int i;
 148
 149	for (i = 0; i < sband->n_bitrates; i++)
 150		if (sband->bitrates[i].bitrate == bitrate)
 151			return i;
 152
 153	return 0;
 154}
 155
 156static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
 157{
 158	switch ((mcs_map >> (2 * nss)) & 0x3) {
 159	case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
 160	case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
 161	case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
 162	}
 163	return 0;
 164}
 165
 166static u32
 167ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
 168{
 169	int nss;
 170
 171	for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
 172		if (ht_mcs_mask[nss])
 173			return nss + 1;
 174
 175	return 1;
 176}
 177
 178static u32
 179ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
 180{
 181	int nss;
 182
 183	for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
 184		if (vht_mcs_mask[nss])
 185			return nss + 1;
 186
 187	return 1;
 188}
 189
 190int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
 191{
 192	enum wmi_host_platform_type platform_type;
 193	int ret;
 194
 195	if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
 196		platform_type = WMI_HOST_PLATFORM_LOW_PERF;
 197	else
 198		platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
 199
 200	ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
 201
 202	if (ret && ret != -EOPNOTSUPP) {
 203		ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
 204		return ret;
 205	}
 206
 207	return 0;
 208}
 209
 210/**********/
 211/* Crypto */
 212/**********/
 213
 214static int ath10k_send_key(struct ath10k_vif *arvif,
 215			   struct ieee80211_key_conf *key,
 216			   enum set_key_cmd cmd,
 217			   const u8 *macaddr, u32 flags)
 218{
 219	struct ath10k *ar = arvif->ar;
 220	struct wmi_vdev_install_key_arg arg = {
 221		.vdev_id = arvif->vdev_id,
 222		.key_idx = key->keyidx,
 223		.key_len = key->keylen,
 224		.key_data = key->key,
 225		.key_flags = flags,
 226		.macaddr = macaddr,
 227	};
 228
 229	lockdep_assert_held(&arvif->ar->conf_mutex);
 230
 231	switch (key->cipher) {
 232	case WLAN_CIPHER_SUITE_CCMP:
 233		arg.key_cipher = WMI_CIPHER_AES_CCM;
 234		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
 235		break;
 236	case WLAN_CIPHER_SUITE_TKIP:
 237		arg.key_cipher = WMI_CIPHER_TKIP;
 238		arg.key_txmic_len = 8;
 239		arg.key_rxmic_len = 8;
 240		break;
 241	case WLAN_CIPHER_SUITE_WEP40:
 242	case WLAN_CIPHER_SUITE_WEP104:
 243		arg.key_cipher = WMI_CIPHER_WEP;
 244		break;
 245	case WLAN_CIPHER_SUITE_AES_CMAC:
 246		WARN_ON(1);
 247		return -EINVAL;
 248	default:
 249		ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
 250		return -EOPNOTSUPP;
 251	}
 252
 253	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
 254		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
 255
 256	if (cmd == DISABLE_KEY) {
 257		arg.key_cipher = WMI_CIPHER_NONE;
 258		arg.key_data = NULL;
 259	}
 260
 261	return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
 262}
 263
 264static int ath10k_install_key(struct ath10k_vif *arvif,
 265			      struct ieee80211_key_conf *key,
 266			      enum set_key_cmd cmd,
 267			      const u8 *macaddr, u32 flags)
 268{
 269	struct ath10k *ar = arvif->ar;
 270	int ret;
 271	unsigned long time_left;
 272
 273	lockdep_assert_held(&ar->conf_mutex);
 274
 275	reinit_completion(&ar->install_key_done);
 276
 277	if (arvif->nohwcrypt)
 278		return 1;
 279
 280	ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
 281	if (ret)
 282		return ret;
 283
 284	time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
 285	if (time_left == 0)
 286		return -ETIMEDOUT;
 287
 288	return 0;
 289}
 290
 291static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
 292					const u8 *addr)
 293{
 294	struct ath10k *ar = arvif->ar;
 295	struct ath10k_peer *peer;
 296	int ret;
 297	int i;
 298	u32 flags;
 299
 300	lockdep_assert_held(&ar->conf_mutex);
 301
 302	if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
 303		    arvif->vif->type != NL80211_IFTYPE_ADHOC &&
 304		    arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
 305		return -EINVAL;
 306
 307	spin_lock_bh(&ar->data_lock);
 308	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
 309	spin_unlock_bh(&ar->data_lock);
 310
 311	if (!peer)
 312		return -ENOENT;
 313
 314	for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
 315		if (arvif->wep_keys[i] == NULL)
 316			continue;
 317
 318		switch (arvif->vif->type) {
 319		case NL80211_IFTYPE_AP:
 320			flags = WMI_KEY_PAIRWISE;
 321
 322			if (arvif->def_wep_key_idx == i)
 323				flags |= WMI_KEY_TX_USAGE;
 324
 325			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
 326						 SET_KEY, addr, flags);
 327			if (ret < 0)
 328				return ret;
 329			break;
 330		case NL80211_IFTYPE_ADHOC:
 331			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
 332						 SET_KEY, addr,
 333						 WMI_KEY_PAIRWISE);
 334			if (ret < 0)
 335				return ret;
 336
 337			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
 338						 SET_KEY, addr, WMI_KEY_GROUP);
 339			if (ret < 0)
 340				return ret;
 341			break;
 342		default:
 343			WARN_ON(1);
 344			return -EINVAL;
 345		}
 346
 347		spin_lock_bh(&ar->data_lock);
 348		peer->keys[i] = arvif->wep_keys[i];
 349		spin_unlock_bh(&ar->data_lock);
 350	}
 351
 352	/* In some cases (notably with static WEP IBSS with multiple keys)
 353	 * multicast Tx becomes broken. Both pairwise and groupwise keys are
 354	 * installed already. Using WMI_KEY_TX_USAGE in different combinations
 355	 * didn't seem help. Using def_keyid vdev parameter seems to be
 356	 * effective so use that.
 357	 *
 358	 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
 359	 */
 360	if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
 361		return 0;
 362
 363	if (arvif->def_wep_key_idx == -1)
 364		return 0;
 365
 366	ret = ath10k_wmi_vdev_set_param(arvif->ar,
 367					arvif->vdev_id,
 368					arvif->ar->wmi.vdev_param->def_keyid,
 369					arvif->def_wep_key_idx);
 370	if (ret) {
 371		ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
 372			    arvif->vdev_id, ret);
 373		return ret;
 374	}
 375
 376	return 0;
 377}
 378
 379static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
 380				  const u8 *addr)
 381{
 382	struct ath10k *ar = arvif->ar;
 383	struct ath10k_peer *peer;
 384	int first_errno = 0;
 385	int ret;
 386	int i;
 387	u32 flags = 0;
 388
 389	lockdep_assert_held(&ar->conf_mutex);
 390
 391	spin_lock_bh(&ar->data_lock);
 392	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
 393	spin_unlock_bh(&ar->data_lock);
 394
 395	if (!peer)
 396		return -ENOENT;
 397
 398	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
 399		if (peer->keys[i] == NULL)
 400			continue;
 401
 402		/* key flags are not required to delete the key */
 403		ret = ath10k_install_key(arvif, peer->keys[i],
 404					 DISABLE_KEY, addr, flags);
 405		if (ret < 0 && first_errno == 0)
 406			first_errno = ret;
 407
 408		if (ret < 0)
 409			ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
 410				    i, ret);
 411
 412		spin_lock_bh(&ar->data_lock);
 413		peer->keys[i] = NULL;
 414		spin_unlock_bh(&ar->data_lock);
 415	}
 416
 417	return first_errno;
 418}
 419
 420bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
 421				    u8 keyidx)
 422{
 423	struct ath10k_peer *peer;
 424	int i;
 425
 426	lockdep_assert_held(&ar->data_lock);
 427
 428	/* We don't know which vdev this peer belongs to,
 429	 * since WMI doesn't give us that information.
 430	 *
 431	 * FIXME: multi-bss needs to be handled.
 432	 */
 433	peer = ath10k_peer_find(ar, 0, addr);
 434	if (!peer)
 435		return false;
 436
 437	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
 438		if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
 439			return true;
 440	}
 441
 442	return false;
 443}
 444
 445static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
 446				 struct ieee80211_key_conf *key)
 447{
 448	struct ath10k *ar = arvif->ar;
 449	struct ath10k_peer *peer;
 450	u8 addr[ETH_ALEN];
 451	int first_errno = 0;
 452	int ret;
 453	int i;
 454	u32 flags = 0;
 455
 456	lockdep_assert_held(&ar->conf_mutex);
 457
 458	for (;;) {
 459		/* since ath10k_install_key we can't hold data_lock all the
 460		 * time, so we try to remove the keys incrementally */
 461		spin_lock_bh(&ar->data_lock);
 462		i = 0;
 463		list_for_each_entry(peer, &ar->peers, list) {
 464			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
 465				if (peer->keys[i] == key) {
 466					ether_addr_copy(addr, peer->addr);
 467					peer->keys[i] = NULL;
 468					break;
 469				}
 470			}
 471
 472			if (i < ARRAY_SIZE(peer->keys))
 473				break;
 474		}
 475		spin_unlock_bh(&ar->data_lock);
 476
 477		if (i == ARRAY_SIZE(peer->keys))
 478			break;
 479		/* key flags are not required to delete the key */
 480		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
 481		if (ret < 0 && first_errno == 0)
 482			first_errno = ret;
 483
 484		if (ret)
 485			ath10k_warn(ar, "failed to remove key for %pM: %d\n",
 486				    addr, ret);
 487	}
 488
 489	return first_errno;
 490}
 491
 492static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
 493					 struct ieee80211_key_conf *key)
 494{
 495	struct ath10k *ar = arvif->ar;
 496	struct ath10k_peer *peer;
 497	int ret;
 498
 499	lockdep_assert_held(&ar->conf_mutex);
 500
 501	list_for_each_entry(peer, &ar->peers, list) {
 502		if (ether_addr_equal(peer->addr, arvif->vif->addr))
 503			continue;
 504
 505		if (ether_addr_equal(peer->addr, arvif->bssid))
 506			continue;
 507
 508		if (peer->keys[key->keyidx] == key)
 509			continue;
 510
 511		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
 512			   arvif->vdev_id, key->keyidx);
 513
 514		ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
 515		if (ret) {
 516			ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
 517				    arvif->vdev_id, peer->addr, ret);
 518			return ret;
 519		}
 520	}
 521
 522	return 0;
 523}
 524
 525/*********************/
 526/* General utilities */
 527/*********************/
 528
 529static inline enum wmi_phy_mode
 530chan_to_phymode(const struct cfg80211_chan_def *chandef)
 531{
 532	enum wmi_phy_mode phymode = MODE_UNKNOWN;
 533
 534	switch (chandef->chan->band) {
 535	case NL80211_BAND_2GHZ:
 536		switch (chandef->width) {
 537		case NL80211_CHAN_WIDTH_20_NOHT:
 538			if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
 539				phymode = MODE_11B;
 540			else
 541				phymode = MODE_11G;
 542			break;
 543		case NL80211_CHAN_WIDTH_20:
 544			phymode = MODE_11NG_HT20;
 545			break;
 546		case NL80211_CHAN_WIDTH_40:
 547			phymode = MODE_11NG_HT40;
 548			break;
 549		case NL80211_CHAN_WIDTH_5:
 550		case NL80211_CHAN_WIDTH_10:
 551		case NL80211_CHAN_WIDTH_80:
 552		case NL80211_CHAN_WIDTH_80P80:
 553		case NL80211_CHAN_WIDTH_160:
 554			phymode = MODE_UNKNOWN;
 555			break;
 556		}
 557		break;
 558	case NL80211_BAND_5GHZ:
 559		switch (chandef->width) {
 560		case NL80211_CHAN_WIDTH_20_NOHT:
 561			phymode = MODE_11A;
 562			break;
 563		case NL80211_CHAN_WIDTH_20:
 564			phymode = MODE_11NA_HT20;
 565			break;
 566		case NL80211_CHAN_WIDTH_40:
 567			phymode = MODE_11NA_HT40;
 568			break;
 569		case NL80211_CHAN_WIDTH_80:
 570			phymode = MODE_11AC_VHT80;
 571			break;
 572		case NL80211_CHAN_WIDTH_5:
 573		case NL80211_CHAN_WIDTH_10:
 574		case NL80211_CHAN_WIDTH_80P80:
 575		case NL80211_CHAN_WIDTH_160:
 576			phymode = MODE_UNKNOWN;
 577			break;
 578		}
 579		break;
 580	default:
 581		break;
 582	}
 583
 584	WARN_ON(phymode == MODE_UNKNOWN);
 585	return phymode;
 586}
 587
 588static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
 589{
 590/*
 591 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
 592 *   0 for no restriction
 593 *   1 for 1/4 us
 594 *   2 for 1/2 us
 595 *   3 for 1 us
 596 *   4 for 2 us
 597 *   5 for 4 us
 598 *   6 for 8 us
 599 *   7 for 16 us
 600 */
 601	switch (mpdudensity) {
 602	case 0:
 603		return 0;
 604	case 1:
 605	case 2:
 606	case 3:
 607	/* Our lower layer calculations limit our precision to
 608	   1 microsecond */
 609		return 1;
 610	case 4:
 611		return 2;
 612	case 5:
 613		return 4;
 614	case 6:
 615		return 8;
 616	case 7:
 617		return 16;
 618	default:
 619		return 0;
 620	}
 621}
 622
 623int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
 624			struct cfg80211_chan_def *def)
 625{
 626	struct ieee80211_chanctx_conf *conf;
 627
 628	rcu_read_lock();
 629	conf = rcu_dereference(vif->chanctx_conf);
 630	if (!conf) {
 631		rcu_read_unlock();
 632		return -ENOENT;
 633	}
 634
 635	*def = conf->def;
 636	rcu_read_unlock();
 637
 638	return 0;
 639}
 640
 641static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
 642					 struct ieee80211_chanctx_conf *conf,
 643					 void *data)
 644{
 645	int *num = data;
 646
 647	(*num)++;
 648}
 649
 650static int ath10k_mac_num_chanctxs(struct ath10k *ar)
 651{
 652	int num = 0;
 653
 654	ieee80211_iter_chan_contexts_atomic(ar->hw,
 655					    ath10k_mac_num_chanctxs_iter,
 656					    &num);
 657
 658	return num;
 659}
 660
 661static void
 662ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
 663				struct ieee80211_chanctx_conf *conf,
 664				void *data)
 665{
 666	struct cfg80211_chan_def **def = data;
 667
 668	*def = &conf->def;
 669}
 670
 671static int ath10k_peer_create(struct ath10k *ar,
 672			      struct ieee80211_vif *vif,
 673			      struct ieee80211_sta *sta,
 674			      u32 vdev_id,
 675			      const u8 *addr,
 676			      enum wmi_peer_type peer_type)
 677{
 678	struct ath10k_vif *arvif;
 679	struct ath10k_peer *peer;
 680	int num_peers = 0;
 681	int ret;
 682
 683	lockdep_assert_held(&ar->conf_mutex);
 684
 685	num_peers = ar->num_peers;
 686
 687	/* Each vdev consumes a peer entry as well */
 688	list_for_each_entry(arvif, &ar->arvifs, list)
 689		num_peers++;
 690
 691	if (num_peers >= ar->max_num_peers)
 692		return -ENOBUFS;
 693
 694	ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
 695	if (ret) {
 696		ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
 697			    addr, vdev_id, ret);
 698		return ret;
 699	}
 700
 701	ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
 702	if (ret) {
 703		ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
 704			    addr, vdev_id, ret);
 705		return ret;
 706	}
 707
 708	spin_lock_bh(&ar->data_lock);
 709
 710	peer = ath10k_peer_find(ar, vdev_id, addr);
 711	if (!peer) {
 712		spin_unlock_bh(&ar->data_lock);
 713		ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
 714			    addr, vdev_id);
 715		ath10k_wmi_peer_delete(ar, vdev_id, addr);
 716		return -ENOENT;
 717	}
 718
 719	peer->vif = vif;
 720	peer->sta = sta;
 721
 722	spin_unlock_bh(&ar->data_lock);
 723
 724	ar->num_peers++;
 725
 726	return 0;
 727}
 728
 729static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
 730{
 731	struct ath10k *ar = arvif->ar;
 732	u32 param;
 733	int ret;
 734
 735	param = ar->wmi.pdev_param->sta_kickout_th;
 736	ret = ath10k_wmi_pdev_set_param(ar, param,
 737					ATH10K_KICKOUT_THRESHOLD);
 738	if (ret) {
 739		ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
 740			    arvif->vdev_id, ret);
 741		return ret;
 742	}
 743
 744	param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
 745	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
 746					ATH10K_KEEPALIVE_MIN_IDLE);
 747	if (ret) {
 748		ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
 749			    arvif->vdev_id, ret);
 750		return ret;
 751	}
 752
 753	param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
 754	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
 755					ATH10K_KEEPALIVE_MAX_IDLE);
 756	if (ret) {
 757		ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
 758			    arvif->vdev_id, ret);
 759		return ret;
 760	}
 761
 762	param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
 763	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
 764					ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
 765	if (ret) {
 766		ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
 767			    arvif->vdev_id, ret);
 768		return ret;
 769	}
 770
 771	return 0;
 772}
 773
 774static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
 775{
 776	struct ath10k *ar = arvif->ar;
 777	u32 vdev_param;
 778
 779	vdev_param = ar->wmi.vdev_param->rts_threshold;
 780	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 781}
 782
 783static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 784{
 785	int ret;
 786
 787	lockdep_assert_held(&ar->conf_mutex);
 788
 789	ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
 790	if (ret)
 791		return ret;
 792
 793	ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
 794	if (ret)
 795		return ret;
 796
 797	ar->num_peers--;
 798
 799	return 0;
 800}
 801
 802static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
 803{
 804	struct ath10k_peer *peer, *tmp;
 805	int peer_id;
 806	int i;
 807
 808	lockdep_assert_held(&ar->conf_mutex);
 809
 810	spin_lock_bh(&ar->data_lock);
 811	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
 812		if (peer->vdev_id != vdev_id)
 813			continue;
 814
 815		ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
 816			    peer->addr, vdev_id);
 817
 818		for_each_set_bit(peer_id, peer->peer_ids,
 819				 ATH10K_MAX_NUM_PEER_IDS) {
 820			ar->peer_map[peer_id] = NULL;
 821		}
 822
 823		/* Double check that peer is properly un-referenced from
 824		 * the peer_map
 825		 */
 826		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
 827			if (ar->peer_map[i] == peer) {
 828				ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
 829					    peer->addr, peer, i);
 830				ar->peer_map[i] = NULL;
 831			}
 832		}
 833
 834		list_del(&peer->list);
 835		kfree(peer);
 836		ar->num_peers--;
 837	}
 838	spin_unlock_bh(&ar->data_lock);
 839}
 840
 841static void ath10k_peer_cleanup_all(struct ath10k *ar)
 842{
 843	struct ath10k_peer *peer, *tmp;
 844	int i;
 845
 846	lockdep_assert_held(&ar->conf_mutex);
 847
 848	spin_lock_bh(&ar->data_lock);
 849	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
 850		list_del(&peer->list);
 851		kfree(peer);
 852	}
 853
 854	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
 855		ar->peer_map[i] = NULL;
 856
 857	spin_unlock_bh(&ar->data_lock);
 858
 859	ar->num_peers = 0;
 860	ar->num_stations = 0;
 861}
 862
 863static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
 864				       struct ieee80211_sta *sta,
 865				       enum wmi_tdls_peer_state state)
 866{
 867	int ret;
 868	struct wmi_tdls_peer_update_cmd_arg arg = {};
 869	struct wmi_tdls_peer_capab_arg cap = {};
 870	struct wmi_channel_arg chan_arg = {};
 871
 872	lockdep_assert_held(&ar->conf_mutex);
 873
 874	arg.vdev_id = vdev_id;
 875	arg.peer_state = state;
 876	ether_addr_copy(arg.addr, sta->addr);
 877
 878	cap.peer_max_sp = sta->max_sp;
 879	cap.peer_uapsd_queues = sta->uapsd_queues;
 880
 881	if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
 882	    !sta->tdls_initiator)
 883		cap.is_peer_responder = 1;
 884
 885	ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
 886	if (ret) {
 887		ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
 888			    arg.addr, vdev_id, ret);
 889		return ret;
 890	}
 891
 892	return 0;
 893}
 894
 895/************************/
 896/* Interface management */
 897/************************/
 898
 899void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
 900{
 901	struct ath10k *ar = arvif->ar;
 902
 903	lockdep_assert_held(&ar->data_lock);
 904
 905	if (!arvif->beacon)
 906		return;
 907
 908	if (!arvif->beacon_buf)
 909		dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
 910				 arvif->beacon->len, DMA_TO_DEVICE);
 911
 912	if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
 913		    arvif->beacon_state != ATH10K_BEACON_SENT))
 914		return;
 915
 916	dev_kfree_skb_any(arvif->beacon);
 917
 918	arvif->beacon = NULL;
 919	arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
 920}
 921
 922static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
 923{
 924	struct ath10k *ar = arvif->ar;
 925
 926	lockdep_assert_held(&ar->data_lock);
 927
 928	ath10k_mac_vif_beacon_free(arvif);
 929
 930	if (arvif->beacon_buf) {
 931		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
 932				  arvif->beacon_buf, arvif->beacon_paddr);
 933		arvif->beacon_buf = NULL;
 934	}
 935}
 936
 937static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
 938{
 939	unsigned long time_left;
 940
 941	lockdep_assert_held(&ar->conf_mutex);
 942
 943	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
 944		return -ESHUTDOWN;
 945
 946	time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
 947						ATH10K_VDEV_SETUP_TIMEOUT_HZ);
 948	if (time_left == 0)
 949		return -ETIMEDOUT;
 950
 951	return 0;
 952}
 953
 954static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 955{
 956	struct cfg80211_chan_def *chandef = NULL;
 957	struct ieee80211_channel *channel = NULL;
 958	struct wmi_vdev_start_request_arg arg = {};
 959	int ret = 0;
 960
 961	lockdep_assert_held(&ar->conf_mutex);
 962
 963	ieee80211_iter_chan_contexts_atomic(ar->hw,
 964					    ath10k_mac_get_any_chandef_iter,
 965					    &chandef);
 966	if (WARN_ON_ONCE(!chandef))
 967		return -ENOENT;
 968
 969	channel = chandef->chan;
 970
 971	arg.vdev_id = vdev_id;
 972	arg.channel.freq = channel->center_freq;
 973	arg.channel.band_center_freq1 = chandef->center_freq1;
 974
 975	/* TODO setup this dynamically, what in case we
 976	   don't have any vifs? */
 977	arg.channel.mode = chan_to_phymode(chandef);
 978	arg.channel.chan_radar =
 979			!!(channel->flags & IEEE80211_CHAN_RADAR);
 980
 981	arg.channel.min_power = 0;
 982	arg.channel.max_power = channel->max_power * 2;
 983	arg.channel.max_reg_power = channel->max_reg_power * 2;
 984	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
 985
 986	reinit_completion(&ar->vdev_setup_done);
 987
 988	ret = ath10k_wmi_vdev_start(ar, &arg);
 989	if (ret) {
 990		ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
 991			    vdev_id, ret);
 992		return ret;
 993	}
 994
 995	ret = ath10k_vdev_setup_sync(ar);
 996	if (ret) {
 997		ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
 998			    vdev_id, ret);
 999		return ret;
1000	}
1001
1002	ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
1003	if (ret) {
1004		ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
1005			    vdev_id, ret);
1006		goto vdev_stop;
1007	}
1008
1009	ar->monitor_vdev_id = vdev_id;
1010
1011	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
1012		   ar->monitor_vdev_id);
1013	return 0;
1014
1015vdev_stop:
1016	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1017	if (ret)
1018		ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
1019			    ar->monitor_vdev_id, ret);
1020
1021	return ret;
1022}
1023
1024static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1025{
1026	int ret = 0;
1027
1028	lockdep_assert_held(&ar->conf_mutex);
1029
1030	ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
1031	if (ret)
1032		ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
1033			    ar->monitor_vdev_id, ret);
1034
1035	reinit_completion(&ar->vdev_setup_done);
1036
1037	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1038	if (ret)
1039		ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
1040			    ar->monitor_vdev_id, ret);
1041
1042	ret = ath10k_vdev_setup_sync(ar);
1043	if (ret)
1044		ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
1045			    ar->monitor_vdev_id, ret);
1046
1047	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1048		   ar->monitor_vdev_id);
1049	return ret;
1050}
1051
1052static int ath10k_monitor_vdev_create(struct ath10k *ar)
1053{
1054	int bit, ret = 0;
1055
1056	lockdep_assert_held(&ar->conf_mutex);
1057
1058	if (ar->free_vdev_map == 0) {
1059		ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1060		return -ENOMEM;
1061	}
1062
1063	bit = __ffs64(ar->free_vdev_map);
1064
1065	ar->monitor_vdev_id = bit;
1066
1067	ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1068				     WMI_VDEV_TYPE_MONITOR,
1069				     0, ar->mac_addr);
1070	if (ret) {
1071		ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1072			    ar->monitor_vdev_id, ret);
1073		return ret;
1074	}
1075
1076	ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1077	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1078		   ar->monitor_vdev_id);
1079
1080	return 0;
1081}
1082
1083static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1084{
1085	int ret = 0;
1086
1087	lockdep_assert_held(&ar->conf_mutex);
1088
1089	ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1090	if (ret) {
1091		ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1092			    ar->monitor_vdev_id, ret);
1093		return ret;
1094	}
1095
1096	ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1097
1098	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1099		   ar->monitor_vdev_id);
1100	return ret;
1101}
1102
1103static int ath10k_monitor_start(struct ath10k *ar)
1104{
1105	int ret;
1106
1107	lockdep_assert_held(&ar->conf_mutex);
1108
1109	ret = ath10k_monitor_vdev_create(ar);
1110	if (ret) {
1111		ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1112		return ret;
1113	}
1114
1115	ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1116	if (ret) {
1117		ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1118		ath10k_monitor_vdev_delete(ar);
1119		return ret;
1120	}
1121
1122	ar->monitor_started = true;
1123	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1124
1125	return 0;
1126}
1127
1128static int ath10k_monitor_stop(struct ath10k *ar)
1129{
1130	int ret;
1131
1132	lockdep_assert_held(&ar->conf_mutex);
1133
1134	ret = ath10k_monitor_vdev_stop(ar);
1135	if (ret) {
1136		ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1137		return ret;
1138	}
1139
1140	ret = ath10k_monitor_vdev_delete(ar);
1141	if (ret) {
1142		ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1143		return ret;
1144	}
1145
1146	ar->monitor_started = false;
1147	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1148
1149	return 0;
1150}
1151
1152static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1153{
1154	int num_ctx;
1155
1156	/* At least one chanctx is required to derive a channel to start
1157	 * monitor vdev on.
1158	 */
1159	num_ctx = ath10k_mac_num_chanctxs(ar);
1160	if (num_ctx == 0)
1161		return false;
1162
1163	/* If there's already an existing special monitor interface then don't
1164	 * bother creating another monitor vdev.
1165	 */
1166	if (ar->monitor_arvif)
1167		return false;
1168
1169	return ar->monitor ||
1170	       (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
1171			  ar->running_fw->fw_file.fw_features) &&
1172		(ar->filter_flags & FIF_OTHER_BSS)) ||
1173	       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1174}
1175
1176static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1177{
1178	int num_ctx;
1179
1180	num_ctx = ath10k_mac_num_chanctxs(ar);
1181
1182	/* FIXME: Current interface combinations and cfg80211/mac80211 code
1183	 * shouldn't allow this but make sure to prevent handling the following
1184	 * case anyway since multi-channel DFS hasn't been tested at all.
1185	 */
1186	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1187		return false;
1188
1189	return true;
1190}
1191
1192static int ath10k_monitor_recalc(struct ath10k *ar)
1193{
1194	bool needed;
1195	bool allowed;
1196	int ret;
1197
1198	lockdep_assert_held(&ar->conf_mutex);
1199
1200	needed = ath10k_mac_monitor_vdev_is_needed(ar);
1201	allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1202
1203	ath10k_dbg(ar, ATH10K_DBG_MAC,
1204		   "mac monitor recalc started? %d needed? %d allowed? %d\n",
1205		   ar->monitor_started, needed, allowed);
1206
1207	if (WARN_ON(needed && !allowed)) {
1208		if (ar->monitor_started) {
1209			ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1210
1211			ret = ath10k_monitor_stop(ar);
1212			if (ret)
1213				ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1214					    ret);
1215				/* not serious */
1216		}
1217
1218		return -EPERM;
1219	}
1220
1221	if (needed == ar->monitor_started)
1222		return 0;
1223
1224	if (needed)
1225		return ath10k_monitor_start(ar);
1226	else
1227		return ath10k_monitor_stop(ar);
1228}
1229
1230static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1231{
1232	struct ath10k *ar = arvif->ar;
1233	u32 vdev_param, rts_cts = 0;
1234
1235	lockdep_assert_held(&ar->conf_mutex);
1236
1237	vdev_param = ar->wmi.vdev_param->enable_rtscts;
1238
1239	rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1240
1241	if (arvif->num_legacy_stations > 0)
1242		rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1243			      WMI_RTSCTS_PROFILE);
1244	else
1245		rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1246			      WMI_RTSCTS_PROFILE);
1247
1248	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1249					 rts_cts);
1250}
1251
1252static int ath10k_start_cac(struct ath10k *ar)
1253{
1254	int ret;
1255
1256	lockdep_assert_held(&ar->conf_mutex);
1257
1258	set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1259
1260	ret = ath10k_monitor_recalc(ar);
1261	if (ret) {
1262		ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1263		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1264		return ret;
1265	}
1266
1267	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1268		   ar->monitor_vdev_id);
1269
1270	return 0;
1271}
1272
1273static int ath10k_stop_cac(struct ath10k *ar)
1274{
1275	lockdep_assert_held(&ar->conf_mutex);
1276
1277	/* CAC is not running - do nothing */
1278	if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1279		return 0;
1280
1281	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1282	ath10k_monitor_stop(ar);
1283
1284	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1285
1286	return 0;
1287}
1288
1289static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1290				      struct ieee80211_chanctx_conf *conf,
1291				      void *data)
1292{
1293	bool *ret = data;
1294
1295	if (!*ret && conf->radar_enabled)
1296		*ret = true;
1297}
1298
1299static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1300{
1301	bool has_radar = false;
1302
1303	ieee80211_iter_chan_contexts_atomic(ar->hw,
1304					    ath10k_mac_has_radar_iter,
1305					    &has_radar);
1306
1307	return has_radar;
1308}
1309
1310static void ath10k_recalc_radar_detection(struct ath10k *ar)
1311{
1312	int ret;
1313
1314	lockdep_assert_held(&ar->conf_mutex);
1315
1316	ath10k_stop_cac(ar);
1317
1318	if (!ath10k_mac_has_radar_enabled(ar))
1319		return;
1320
1321	if (ar->num_started_vdevs > 0)
1322		return;
1323
1324	ret = ath10k_start_cac(ar);
1325	if (ret) {
1326		/*
1327		 * Not possible to start CAC on current channel so starting
1328		 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1329		 * by indicating that radar was detected.
1330		 */
1331		ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1332		ieee80211_radar_detected(ar->hw);
1333	}
1334}
1335
1336static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1337{
1338	struct ath10k *ar = arvif->ar;
1339	int ret;
1340
1341	lockdep_assert_held(&ar->conf_mutex);
1342
1343	reinit_completion(&ar->vdev_setup_done);
1344
1345	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1346	if (ret) {
1347		ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1348			    arvif->vdev_id, ret);
1349		return ret;
1350	}
1351
1352	ret = ath10k_vdev_setup_sync(ar);
1353	if (ret) {
1354		ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
1355			    arvif->vdev_id, ret);
1356		return ret;
1357	}
1358
1359	WARN_ON(ar->num_started_vdevs == 0);
1360
1361	if (ar->num_started_vdevs != 0) {
1362		ar->num_started_vdevs--;
1363		ath10k_recalc_radar_detection(ar);
1364	}
1365
1366	return ret;
1367}
1368
1369static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1370				     const struct cfg80211_chan_def *chandef,
1371				     bool restart)
1372{
1373	struct ath10k *ar = arvif->ar;
1374	struct wmi_vdev_start_request_arg arg = {};
1375	int ret = 0;
1376
1377	lockdep_assert_held(&ar->conf_mutex);
1378
1379	reinit_completion(&ar->vdev_setup_done);
1380
1381	arg.vdev_id = arvif->vdev_id;
1382	arg.dtim_period = arvif->dtim_period;
1383	arg.bcn_intval = arvif->beacon_interval;
1384
1385	arg.channel.freq = chandef->chan->center_freq;
1386	arg.channel.band_center_freq1 = chandef->center_freq1;
1387	arg.channel.mode = chan_to_phymode(chandef);
1388
1389	arg.channel.min_power = 0;
1390	arg.channel.max_power = chandef->chan->max_power * 2;
1391	arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1392	arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1393
1394	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1395		arg.ssid = arvif->u.ap.ssid;
1396		arg.ssid_len = arvif->u.ap.ssid_len;
1397		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1398
1399		/* For now allow DFS for AP mode */
1400		arg.channel.chan_radar =
1401			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1402	} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1403		arg.ssid = arvif->vif->bss_conf.ssid;
1404		arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1405	}
1406
1407	ath10k_dbg(ar, ATH10K_DBG_MAC,
1408		   "mac vdev %d start center_freq %d phymode %s\n",
1409		   arg.vdev_id, arg.channel.freq,
1410		   ath10k_wmi_phymode_str(arg.channel.mode));
1411
1412	if (restart)
1413		ret = ath10k_wmi_vdev_restart(ar, &arg);
1414	else
1415		ret = ath10k_wmi_vdev_start(ar, &arg);
1416
1417	if (ret) {
1418		ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1419			    arg.vdev_id, ret);
1420		return ret;
1421	}
1422
1423	ret = ath10k_vdev_setup_sync(ar);
1424	if (ret) {
1425		ath10k_warn(ar,
1426			    "failed to synchronize setup for vdev %i restart %d: %d\n",
1427			    arg.vdev_id, restart, ret);
1428		return ret;
1429	}
1430
1431	ar->num_started_vdevs++;
1432	ath10k_recalc_radar_detection(ar);
1433
1434	return ret;
1435}
1436
1437static int ath10k_vdev_start(struct ath10k_vif *arvif,
1438			     const struct cfg80211_chan_def *def)
1439{
1440	return ath10k_vdev_start_restart(arvif, def, false);
1441}
1442
1443static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1444			       const struct cfg80211_chan_def *def)
1445{
1446	return ath10k_vdev_start_restart(arvif, def, true);
1447}
1448
1449static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1450				       struct sk_buff *bcn)
1451{
1452	struct ath10k *ar = arvif->ar;
1453	struct ieee80211_mgmt *mgmt;
1454	const u8 *p2p_ie;
1455	int ret;
1456
1457	if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1458		return 0;
1459
1460	mgmt = (void *)bcn->data;
1461	p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1462					 mgmt->u.beacon.variable,
1463					 bcn->len - (mgmt->u.beacon.variable -
1464						     bcn->data));
1465	if (!p2p_ie)
1466		return -ENOENT;
1467
1468	ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1469	if (ret) {
1470		ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1471			    arvif->vdev_id, ret);
1472		return ret;
1473	}
1474
1475	return 0;
1476}
1477
1478static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1479				       u8 oui_type, size_t ie_offset)
1480{
1481	size_t len;
1482	const u8 *next;
1483	const u8 *end;
1484	u8 *ie;
1485
1486	if (WARN_ON(skb->len < ie_offset))
1487		return -EINVAL;
1488
1489	ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1490					   skb->data + ie_offset,
1491					   skb->len - ie_offset);
1492	if (!ie)
1493		return -ENOENT;
1494
1495	len = ie[1] + 2;
1496	end = skb->data + skb->len;
1497	next = ie + len;
1498
1499	if (WARN_ON(next > end))
1500		return -EINVAL;
1501
1502	memmove(ie, next, end - next);
1503	skb_trim(skb, skb->len - len);
1504
1505	return 0;
1506}
1507
1508static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1509{
1510	struct ath10k *ar = arvif->ar;
1511	struct ieee80211_hw *hw = ar->hw;
1512	struct ieee80211_vif *vif = arvif->vif;
1513	struct ieee80211_mutable_offsets offs = {};
1514	struct sk_buff *bcn;
1515	int ret;
1516
1517	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1518		return 0;
1519
1520	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1521	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1522		return 0;
1523
1524	bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1525	if (!bcn) {
1526		ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1527		return -EPERM;
1528	}
1529
1530	ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1531	if (ret) {
1532		ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1533		kfree_skb(bcn);
1534		return ret;
1535	}
1536
1537	/* P2P IE is inserted by firmware automatically (as configured above)
1538	 * so remove it from the base beacon template to avoid duplicate P2P
1539	 * IEs in beacon frames.
1540	 */
1541	ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1542				    offsetof(struct ieee80211_mgmt,
1543					     u.beacon.variable));
1544
1545	ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1546				  0, NULL, 0);
1547	kfree_skb(bcn);
1548
1549	if (ret) {
1550		ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1551			    ret);
1552		return ret;
1553	}
1554
1555	return 0;
1556}
1557
1558static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1559{
1560	struct ath10k *ar = arvif->ar;
1561	struct ieee80211_hw *hw = ar->hw;
1562	struct ieee80211_vif *vif = arvif->vif;
1563	struct sk_buff *prb;
1564	int ret;
1565
1566	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1567		return 0;
1568
1569	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1570		return 0;
1571
1572	prb = ieee80211_proberesp_get(hw, vif);
1573	if (!prb) {
1574		ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1575		return -EPERM;
1576	}
1577
1578	ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1579	kfree_skb(prb);
1580
1581	if (ret) {
1582		ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1583			    ret);
1584		return ret;
1585	}
1586
1587	return 0;
1588}
1589
1590static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1591{
1592	struct ath10k *ar = arvif->ar;
1593	struct cfg80211_chan_def def;
1594	int ret;
1595
1596	/* When originally vdev is started during assign_vif_chanctx() some
1597	 * information is missing, notably SSID. Firmware revisions with beacon
1598	 * offloading require the SSID to be provided during vdev (re)start to
1599	 * handle hidden SSID properly.
1600	 *
1601	 * Vdev restart must be done after vdev has been both started and
1602	 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1603	 * deliver vdev restart response event causing timeouts during vdev
1604	 * syncing in ath10k.
1605	 *
1606	 * Note: The vdev down/up and template reinstallation could be skipped
1607	 * since only wmi-tlv firmware are known to have beacon offload and
1608	 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1609	 * response delivery. It's probably more robust to keep it as is.
1610	 */
1611	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1612		return 0;
1613
1614	if (WARN_ON(!arvif->is_started))
1615		return -EINVAL;
1616
1617	if (WARN_ON(!arvif->is_up))
1618		return -EINVAL;
1619
1620	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1621		return -EINVAL;
1622
1623	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1624	if (ret) {
1625		ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1626			    arvif->vdev_id, ret);
1627		return ret;
1628	}
1629
1630	/* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1631	 * firmware will crash upon vdev up.
1632	 */
1633
1634	ret = ath10k_mac_setup_bcn_tmpl(arvif);
1635	if (ret) {
1636		ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1637		return ret;
1638	}
1639
1640	ret = ath10k_mac_setup_prb_tmpl(arvif);
1641	if (ret) {
1642		ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1643		return ret;
1644	}
1645
1646	ret = ath10k_vdev_restart(arvif, &def);
1647	if (ret) {
1648		ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1649			    arvif->vdev_id, ret);
1650		return ret;
1651	}
1652
1653	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1654				 arvif->bssid);
1655	if (ret) {
1656		ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1657			    arvif->vdev_id, ret);
1658		return ret;
1659	}
1660
1661	return 0;
1662}
1663
1664static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1665				     struct ieee80211_bss_conf *info)
1666{
1667	struct ath10k *ar = arvif->ar;
1668	int ret = 0;
1669
1670	lockdep_assert_held(&arvif->ar->conf_mutex);
1671
1672	if (!info->enable_beacon) {
1673		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1674		if (ret)
1675			ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1676				    arvif->vdev_id, ret);
1677
1678		arvif->is_up = false;
1679
1680		spin_lock_bh(&arvif->ar->data_lock);
1681		ath10k_mac_vif_beacon_free(arvif);
1682		spin_unlock_bh(&arvif->ar->data_lock);
1683
1684		return;
1685	}
1686
1687	arvif->tx_seq_no = 0x1000;
1688
1689	arvif->aid = 0;
1690	ether_addr_copy(arvif->bssid, info->bssid);
1691
1692	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1693				 arvif->bssid);
1694	if (ret) {
1695		ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1696			    arvif->vdev_id, ret);
1697		return;
1698	}
1699
1700	arvif->is_up = true;
1701
1702	ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1703	if (ret) {
1704		ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1705			    arvif->vdev_id, ret);
1706		return;
1707	}
1708
1709	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1710}
1711
1712static void ath10k_control_ibss(struct ath10k_vif *arvif,
1713				struct ieee80211_bss_conf *info,
1714				const u8 self_peer[ETH_ALEN])
1715{
1716	struct ath10k *ar = arvif->ar;
1717	u32 vdev_param;
1718	int ret = 0;
1719
1720	lockdep_assert_held(&arvif->ar->conf_mutex);
1721
1722	if (!info->ibss_joined) {
1723		if (is_zero_ether_addr(arvif->bssid))
1724			return;
1725
1726		eth_zero_addr(arvif->bssid);
1727
1728		return;
1729	}
1730
1731	vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1732	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1733					ATH10K_DEFAULT_ATIM);
1734	if (ret)
1735		ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1736			    arvif->vdev_id, ret);
1737}
1738
1739static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1740{
1741	struct ath10k *ar = arvif->ar;
1742	u32 param;
1743	u32 value;
1744	int ret;
1745
1746	lockdep_assert_held(&arvif->ar->conf_mutex);
1747
1748	if (arvif->u.sta.uapsd)
1749		value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1750	else
1751		value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1752
1753	param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1754	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1755	if (ret) {
1756		ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1757			    value, arvif->vdev_id, ret);
1758		return ret;
1759	}
1760
1761	return 0;
1762}
1763
1764static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1765{
1766	struct ath10k *ar = arvif->ar;
1767	u32 param;
1768	u32 value;
1769	int ret;
1770
1771	lockdep_assert_held(&arvif->ar->conf_mutex);
1772
1773	if (arvif->u.sta.uapsd)
1774		value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1775	else
1776		value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1777
1778	param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1779	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1780					  param, value);
1781	if (ret) {
1782		ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1783			    value, arvif->vdev_id, ret);
1784		return ret;
1785	}
1786
1787	return 0;
1788}
1789
1790static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1791{
1792	struct ath10k_vif *arvif;
1793	int num = 0;
1794
1795	lockdep_assert_held(&ar->conf_mutex);
1796
1797	list_for_each_entry(arvif, &ar->arvifs, list)
1798		if (arvif->is_started)
1799			num++;
1800
1801	return num;
1802}
1803
1804static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1805{
1806	struct ath10k *ar = arvif->ar;
1807	struct ieee80211_vif *vif = arvif->vif;
1808	struct ieee80211_conf *conf = &ar->hw->conf;
1809	enum wmi_sta_powersave_param param;
1810	enum wmi_sta_ps_mode psmode;
1811	int ret;
1812	int ps_timeout;
1813	bool enable_ps;
1814
1815	lockdep_assert_held(&arvif->ar->conf_mutex);
1816
1817	if (arvif->vif->type != NL80211_IFTYPE_STATION)
1818		return 0;
1819
1820	enable_ps = arvif->ps;
1821
1822	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1823	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1824		      ar->running_fw->fw_file.fw_features)) {
1825		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1826			    arvif->vdev_id);
1827		enable_ps = false;
1828	}
1829
1830	if (!arvif->is_started) {
1831		/* mac80211 can update vif powersave state while disconnected.
1832		 * Firmware doesn't behave nicely and consumes more power than
1833		 * necessary if PS is disabled on a non-started vdev. Hence
1834		 * force-enable PS for non-running vdevs.
1835		 */
1836		psmode = WMI_STA_PS_MODE_ENABLED;
1837	} else if (enable_ps) {
1838		psmode = WMI_STA_PS_MODE_ENABLED;
1839		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1840
1841		ps_timeout = conf->dynamic_ps_timeout;
1842		if (ps_timeout == 0) {
1843			/* Firmware doesn't like 0 */
1844			ps_timeout = ieee80211_tu_to_usec(
1845				vif->bss_conf.beacon_int) / 1000;
1846		}
1847
1848		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1849						  ps_timeout);
1850		if (ret) {
1851			ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1852				    arvif->vdev_id, ret);
1853			return ret;
1854		}
1855	} else {
1856		psmode = WMI_STA_PS_MODE_DISABLED;
1857	}
1858
1859	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1860		   arvif->vdev_id, psmode ? "enable" : "disable");
1861
1862	ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1863	if (ret) {
1864		ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1865			    psmode, arvif->vdev_id, ret);
1866		return ret;
1867	}
1868
1869	return 0;
1870}
1871
1872static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1873{
1874	struct ath10k *ar = arvif->ar;
1875	struct wmi_sta_keepalive_arg arg = {};
1876	int ret;
1877
1878	lockdep_assert_held(&arvif->ar->conf_mutex);
1879
1880	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1881		return 0;
1882
1883	if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1884		return 0;
1885
1886	/* Some firmware revisions have a bug and ignore the `enabled` field.
1887	 * Instead use the interval to disable the keepalive.
1888	 */
1889	arg.vdev_id = arvif->vdev_id;
1890	arg.enabled = 1;
1891	arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1892	arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1893
1894	ret = ath10k_wmi_sta_keepalive(ar, &arg);
1895	if (ret) {
1896		ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1897			    arvif->vdev_id, ret);
1898		return ret;
1899	}
1900
1901	return 0;
1902}
1903
1904static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1905{
1906	struct ath10k *ar = arvif->ar;
1907	struct ieee80211_vif *vif = arvif->vif;
1908	int ret;
1909
1910	lockdep_assert_held(&arvif->ar->conf_mutex);
1911
1912	if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1913		return;
1914
1915	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1916		return;
1917
1918	if (!vif->csa_active)
1919		return;
1920
1921	if (!arvif->is_up)
1922		return;
1923
1924	if (!ieee80211_csa_is_complete(vif)) {
1925		ieee80211_csa_update_counter(vif);
1926
1927		ret = ath10k_mac_setup_bcn_tmpl(arvif);
1928		if (ret)
1929			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1930				    ret);
1931
1932		ret = ath10k_mac_setup_prb_tmpl(arvif);
1933		if (ret)
1934			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1935				    ret);
1936	} else {
1937		ieee80211_csa_finish(vif);
1938	}
1939}
1940
1941static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1942{
1943	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1944						ap_csa_work);
1945	struct ath10k *ar = arvif->ar;
1946
1947	mutex_lock(&ar->conf_mutex);
1948	ath10k_mac_vif_ap_csa_count_down(arvif);
1949	mutex_unlock(&ar->conf_mutex);
1950}
1951
1952static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
1953					  struct ieee80211_vif *vif)
1954{
1955	struct sk_buff *skb = data;
1956	struct ieee80211_mgmt *mgmt = (void *)skb->data;
1957	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1958
1959	if (vif->type != NL80211_IFTYPE_STATION)
1960		return;
1961
1962	if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
1963		return;
1964
1965	cancel_delayed_work(&arvif->connection_loss_work);
1966}
1967
1968void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
1969{
1970	ieee80211_iterate_active_interfaces_atomic(ar->hw,
1971						   IEEE80211_IFACE_ITER_NORMAL,
1972						   ath10k_mac_handle_beacon_iter,
1973						   skb);
1974}
1975
1976static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
1977					       struct ieee80211_vif *vif)
1978{
1979	u32 *vdev_id = data;
1980	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1981	struct ath10k *ar = arvif->ar;
1982	struct ieee80211_hw *hw = ar->hw;
1983
1984	if (arvif->vdev_id != *vdev_id)
1985		return;
1986
1987	if (!arvif->is_up)
1988		return;
1989
1990	ieee80211_beacon_loss(vif);
1991
1992	/* Firmware doesn't report beacon loss events repeatedly. If AP probe
1993	 * (done by mac80211) succeeds but beacons do not resume then it
1994	 * doesn't make sense to continue operation. Queue connection loss work
1995	 * which can be cancelled when beacon is received.
1996	 */
1997	ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
1998				     ATH10K_CONNECTION_LOSS_HZ);
1999}
2000
2001void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
2002{
2003	ieee80211_iterate_active_interfaces_atomic(ar->hw,
2004						   IEEE80211_IFACE_ITER_NORMAL,
2005						   ath10k_mac_handle_beacon_miss_iter,
2006						   &vdev_id);
2007}
2008
2009static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
2010{
2011	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2012						connection_loss_work.work);
2013	struct ieee80211_vif *vif = arvif->vif;
2014
2015	if (!arvif->is_up)
2016		return;
2017
2018	ieee80211_connection_loss(vif);
2019}
2020
2021/**********************/
2022/* Station management */
2023/**********************/
2024
2025static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2026					     struct ieee80211_vif *vif)
2027{
2028	/* Some firmware revisions have unstable STA powersave when listen
2029	 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2030	 * generate NullFunc frames properly even if buffered frames have been
2031	 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2032	 * buffered frames. Often pinging the device from AP would simply fail.
2033	 *
2034	 * As a workaround set it to 1.
2035	 */
2036	if (vif->type == NL80211_IFTYPE_STATION)
2037		return 1;
2038
2039	return ar->hw->conf.listen_interval;
2040}
2041
2042static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2043				      struct ieee80211_vif *vif,
2044				      struct ieee80211_sta *sta,
2045				      struct wmi_peer_assoc_complete_arg *arg)
2046{
2047	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2048	u32 aid;
2049
2050	lockdep_assert_held(&ar->conf_mutex);
2051
2052	if (vif->type == NL80211_IFTYPE_STATION)
2053		aid = vif->bss_conf.aid;
2054	else
2055		aid = sta->aid;
2056
2057	ether_addr_copy(arg->addr, sta->addr);
2058	arg->vdev_id = arvif->vdev_id;
2059	arg->peer_aid = aid;
2060	arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2061	arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2062	arg->peer_num_spatial_streams = 1;
2063	arg->peer_caps = vif->bss_conf.assoc_capability;
2064}
2065
2066static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2067				       struct ieee80211_vif *vif,
2068				       struct ieee80211_sta *sta,
2069				       struct wmi_peer_assoc_complete_arg *arg)
2070{
2071	struct ieee80211_bss_conf *info = &vif->bss_conf;
2072	struct cfg80211_chan_def def;
2073	struct cfg80211_bss *bss;
2074	const u8 *rsnie = NULL;
2075	const u8 *wpaie = NULL;
2076
2077	lockdep_assert_held(&ar->conf_mutex);
2078
2079	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2080		return;
2081
2082	bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2083			       IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2084	if (bss) {
2085		const struct cfg80211_bss_ies *ies;
2086
2087		rcu_read_lock();
2088		rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2089
2090		ies = rcu_dereference(bss->ies);
2091
2092		wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2093						WLAN_OUI_TYPE_MICROSOFT_WPA,
2094						ies->data,
2095						ies->len);
2096		rcu_read_unlock();
2097		cfg80211_put_bss(ar->hw->wiphy, bss);
2098	}
2099
2100	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
2101	if (rsnie || wpaie) {
2102		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2103		arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2104	}
2105
2106	if (wpaie) {
2107		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2108		arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2109	}
2110
2111	if (sta->mfp &&
2112	    test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2113		     ar->running_fw->fw_file.fw_features)) {
2114		arg->peer_flags |= ar->wmi.peer_flags->pmf;
2115	}
2116}
2117
2118static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2119				      struct ieee80211_vif *vif,
2120				      struct ieee80211_sta *sta,
2121				      struct wmi_peer_assoc_complete_arg *arg)
2122{
2123	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2124	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2125	struct cfg80211_chan_def def;
2126	const struct ieee80211_supported_band *sband;
2127	const struct ieee80211_rate *rates;
2128	enum nl80211_band band;
2129	u32 ratemask;
2130	u8 rate;
2131	int i;
2132
2133	lockdep_assert_held(&ar->conf_mutex);
2134
2135	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2136		return;
2137
2138	band = def.chan->band;
2139	sband = ar->hw->wiphy->bands[band];
2140	ratemask = sta->supp_rates[band];
2141	ratemask &= arvif->bitrate_mask.control[band].legacy;
2142	rates = sband->bitrates;
2143
2144	rateset->num_rates = 0;
2145
2146	for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2147		if (!(ratemask & 1))
2148			continue;
2149
2150		rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2151		rateset->rates[rateset->num_rates] = rate;
2152		rateset->num_rates++;
2153	}
2154}
2155
2156static bool
2157ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2158{
2159	int nss;
2160
2161	for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2162		if (ht_mcs_mask[nss])
2163			return false;
2164
2165	return true;
2166}
2167
2168static bool
2169ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2170{
2171	int nss;
2172
2173	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2174		if (vht_mcs_mask[nss])
2175			return false;
2176
2177	return true;
2178}
2179
2180static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2181				   struct ieee80211_vif *vif,
2182				   struct ieee80211_sta *sta,
2183				   struct wmi_peer_assoc_complete_arg *arg)
2184{
2185	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2186	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2187	struct cfg80211_chan_def def;
2188	enum nl80211_band band;
2189	const u8 *ht_mcs_mask;
2190	const u16 *vht_mcs_mask;
2191	int i, n;
2192	u8 max_nss;
2193	u32 stbc;
2194
2195	lockdep_assert_held(&ar->conf_mutex);
2196
2197	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2198		return;
2199
2200	if (!ht_cap->ht_supported)
2201		return;
2202
2203	band = def.chan->band;
2204	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2205	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2206
2207	if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2208	    ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2209		return;
2210
2211	arg->peer_flags |= ar->wmi.peer_flags->ht;
2212	arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2213				    ht_cap->ampdu_factor)) - 1;
2214
2215	arg->peer_mpdu_density =
2216		ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2217
2218	arg->peer_ht_caps = ht_cap->cap;
2219	arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2220
2221	if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2222		arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2223
2224	if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2225		arg->peer_flags |= ar->wmi.peer_flags->bw40;
2226		arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2227	}
2228
2229	if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2230		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2231			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2232
2233		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2234			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2235	}
2236
2237	if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2238		arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2239		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2240	}
2241
2242	if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2243		stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2244		stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2245		stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2246		arg->peer_rate_caps |= stbc;
2247		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2248	}
2249
2250	if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2251		arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2252	else if (ht_cap->mcs.rx_mask[1])
2253		arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2254
2255	for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2256		if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2257		    (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2258			max_nss = (i / 8) + 1;
2259			arg->peer_ht_rates.rates[n++] = i;
2260		}
2261
2262	/*
2263	 * This is a workaround for HT-enabled STAs which break the spec
2264	 * and have no HT capabilities RX mask (no HT RX MCS map).
2265	 *
2266	 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2267	 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2268	 *
2269	 * Firmware asserts if such situation occurs.
2270	 */
2271	if (n == 0) {
2272		arg->peer_ht_rates.num_rates = 8;
2273		for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2274			arg->peer_ht_rates.rates[i] = i;
2275	} else {
2276		arg->peer_ht_rates.num_rates = n;
2277		arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2278	}
2279
2280	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2281		   arg->addr,
2282		   arg->peer_ht_rates.num_rates,
2283		   arg->peer_num_spatial_streams);
2284}
2285
2286static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2287				    struct ath10k_vif *arvif,
2288				    struct ieee80211_sta *sta)
2289{
2290	u32 uapsd = 0;
2291	u32 max_sp = 0;
2292	int ret = 0;
2293
2294	lockdep_assert_held(&ar->conf_mutex);
2295
2296	if (sta->wme && sta->uapsd_queues) {
2297		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2298			   sta->uapsd_queues, sta->max_sp);
2299
2300		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2301			uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2302				 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2303		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2304			uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2305				 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2306		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2307			uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2308				 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2309		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2310			uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2311				 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2312
2313		if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2314			max_sp = sta->max_sp;
2315
2316		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2317						 sta->addr,
2318						 WMI_AP_PS_PEER_PARAM_UAPSD,
2319						 uapsd);
2320		if (ret) {
2321			ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2322				    arvif->vdev_id, ret);
2323			return ret;
2324		}
2325
2326		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2327						 sta->addr,
2328						 WMI_AP_PS_PEER_PARAM_MAX_SP,
2329						 max_sp);
2330		if (ret) {
2331			ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2332				    arvif->vdev_id, ret);
2333			return ret;
2334		}
2335
2336		/* TODO setup this based on STA listen interval and
2337		   beacon interval. Currently we don't know
2338		   sta->listen_interval - mac80211 patch required.
2339		   Currently use 10 seconds */
2340		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2341						 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2342						 10);
2343		if (ret) {
2344			ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2345				    arvif->vdev_id, ret);
2346			return ret;
2347		}
2348	}
2349
2350	return 0;
2351}
2352
2353static u16
2354ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2355			      const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2356{
2357	int idx_limit;
2358	int nss;
2359	u16 mcs_map;
2360	u16 mcs;
2361
2362	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2363		mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2364			  vht_mcs_limit[nss];
2365
2366		if (mcs_map)
2367			idx_limit = fls(mcs_map) - 1;
2368		else
2369			idx_limit = -1;
2370
2371		switch (idx_limit) {
2372		case 0: /* fall through */
2373		case 1: /* fall through */
2374		case 2: /* fall through */
2375		case 3: /* fall through */
2376		case 4: /* fall through */
2377		case 5: /* fall through */
2378		case 6: /* fall through */
2379		default:
2380			/* see ath10k_mac_can_set_bitrate_mask() */
2381			WARN_ON(1);
2382			/* fall through */
2383		case -1:
2384			mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2385			break;
2386		case 7:
2387			mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2388			break;
2389		case 8:
2390			mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2391			break;
2392		case 9:
2393			mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2394			break;
2395		}
2396
2397		tx_mcs_set &= ~(0x3 << (nss * 2));
2398		tx_mcs_set |= mcs << (nss * 2);
2399	}
2400
2401	return tx_mcs_set;
2402}
2403
2404static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2405				    struct ieee80211_vif *vif,
2406				    struct ieee80211_sta *sta,
2407				    struct wmi_peer_assoc_complete_arg *arg)
2408{
2409	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2410	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2411	struct cfg80211_chan_def def;
2412	enum nl80211_band band;
2413	const u16 *vht_mcs_mask;
2414	u8 ampdu_factor;
2415
2416	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2417		return;
2418
2419	if (!vht_cap->vht_supported)
2420		return;
2421
2422	band = def.chan->band;
2423	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2424
2425	if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2426		return;
2427
2428	arg->peer_flags |= ar->wmi.peer_flags->vht;
2429
2430	if (def.chan->band == NL80211_BAND_2GHZ)
2431		arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2432
2433	arg->peer_vht_caps = vht_cap->cap;
2434
2435	ampdu_factor = (vht_cap->cap &
2436			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2437		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2438
2439	/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2440	 * zero in VHT IE. Using it would result in degraded throughput.
2441	 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2442	 * it if VHT max_mpdu is smaller. */
2443	arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2444				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2445					ampdu_factor)) - 1);
2446
2447	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2448		arg->peer_flags |= ar->wmi.peer_flags->bw80;
2449
2450	arg->peer_vht_rates.rx_max_rate =
2451		__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2452	arg->peer_vht_rates.rx_mcs_set =
2453		__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2454	arg->peer_vht_rates.tx_max_rate =
2455		__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2456	arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2457		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2458
2459	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2460		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2461}
2462
2463static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2464				    struct ieee80211_vif *vif,
2465				    struct ieee80211_sta *sta,
2466				    struct wmi_peer_assoc_complete_arg *arg)
2467{
2468	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2469
2470	switch (arvif->vdev_type) {
2471	case WMI_VDEV_TYPE_AP:
2472		if (sta->wme)
2473			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2474
2475		if (sta->wme && sta->uapsd_queues) {
2476			arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2477			arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2478		}
2479		break;
2480	case WMI_VDEV_TYPE_STA:
2481		if (vif->bss_conf.qos)
2482			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2483		break;
2484	case WMI_VDEV_TYPE_IBSS:
2485		if (sta->wme)
2486			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2487		break;
2488	default:
2489		break;
2490	}
2491
2492	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2493		   sta->addr, !!(arg->peer_flags &
2494		   arvif->ar->wmi.peer_flags->qos));
2495}
2496
2497static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2498{
2499	return sta->supp_rates[NL80211_BAND_2GHZ] >>
2500	       ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2501}
2502
2503static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2504					struct ieee80211_vif *vif,
2505					struct ieee80211_sta *sta,
2506					struct wmi_peer_assoc_complete_arg *arg)
2507{
2508	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2509	struct cfg80211_chan_def def;
2510	enum nl80211_band band;
2511	const u8 *ht_mcs_mask;
2512	const u16 *vht_mcs_mask;
2513	enum wmi_phy_mode phymode = MODE_UNKNOWN;
2514
2515	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2516		return;
2517
2518	band = def.chan->band;
2519	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2520	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2521
2522	switch (band) {
2523	case NL80211_BAND_2GHZ:
2524		if (sta->vht_cap.vht_supported &&
2525		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2526			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2527				phymode = MODE_11AC_VHT40;
2528			else
2529				phymode = MODE_11AC_VHT20;
2530		} else if (sta->ht_cap.ht_supported &&
2531			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2532			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2533				phymode = MODE_11NG_HT40;
2534			else
2535				phymode = MODE_11NG_HT20;
2536		} else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2537			phymode = MODE_11G;
2538		} else {
2539			phymode = MODE_11B;
2540		}
2541
2542		break;
2543	case NL80211_BAND_5GHZ:
2544		/*
2545		 * Check VHT first.
2546		 */
2547		if (sta->vht_cap.vht_supported &&
2548		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2549			if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2550				phymode = MODE_11AC_VHT80;
2551			else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2552				phymode = MODE_11AC_VHT40;
2553			else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2554				phymode = MODE_11AC_VHT20;
2555		} else if (sta->ht_cap.ht_supported &&
2556			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2557			if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2558				phymode = MODE_11NA_HT40;
2559			else
2560				phymode = MODE_11NA_HT20;
2561		} else {
2562			phymode = MODE_11A;
2563		}
2564
2565		break;
2566	default:
2567		break;
2568	}
2569
2570	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2571		   sta->addr, ath10k_wmi_phymode_str(phymode));
2572
2573	arg->peer_phymode = phymode;
2574	WARN_ON(phymode == MODE_UNKNOWN);
2575}
2576
2577static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2578				     struct ieee80211_vif *vif,
2579				     struct ieee80211_sta *sta,
2580				     struct wmi_peer_assoc_complete_arg *arg)
2581{
2582	lockdep_assert_held(&ar->conf_mutex);
2583
2584	memset(arg, 0, sizeof(*arg));
2585
2586	ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2587	ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2588	ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2589	ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2590	ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2591	ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2592	ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2593
2594	return 0;
2595}
2596
2597static const u32 ath10k_smps_map[] = {
2598	[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2599	[WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2600	[WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2601	[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2602};
2603
2604static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2605				  const u8 *addr,
2606				  const struct ieee80211_sta_ht_cap *ht_cap)
2607{
2608	int smps;
2609
2610	if (!ht_cap->ht_supported)
2611		return 0;
2612
2613	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2614	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2615
2616	if (smps >= ARRAY_SIZE(ath10k_smps_map))
2617		return -EINVAL;
2618
2619	return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2620					 WMI_PEER_SMPS_STATE,
2621					 ath10k_smps_map[smps]);
2622}
2623
2624static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2625				      struct ieee80211_vif *vif,
2626				      struct ieee80211_sta_vht_cap vht_cap)
2627{
2628	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2629	int ret;
2630	u32 param;
2631	u32 value;
2632
2633	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2634		return 0;
2635
2636	if (!(ar->vht_cap_info &
2637	      (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2638	       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2639	       IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2640	       IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2641		return 0;
2642
2643	param = ar->wmi.vdev_param->txbf;
2644	value = 0;
2645
2646	if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2647		return 0;
2648
2649	/* The following logic is correct. If a remote STA advertises support
2650	 * for being a beamformer then we should enable us being a beamformee.
2651	 */
2652
2653	if (ar->vht_cap_info &
2654	    (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2655	     IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2656		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2657			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2658
2659		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2660			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2661	}
2662
2663	if (ar->vht_cap_info &
2664	    (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2665	     IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2666		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2667			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2668
2669		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2670			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2671	}
2672
2673	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2674		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2675
2676	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2677		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2678
2679	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2680	if (ret) {
2681		ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2682			    value, ret);
2683		return ret;
2684	}
2685
2686	return 0;
2687}
2688
2689/* can be called only in mac80211 callbacks due to `key_count` usage */
2690static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2691			     struct ieee80211_vif *vif,
2692			     struct ieee80211_bss_conf *bss_conf)
2693{
2694	struct ath10k *ar = hw->priv;
2695	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2696	struct ieee80211_sta_ht_cap ht_cap;
2697	struct ieee80211_sta_vht_cap vht_cap;
2698	struct wmi_peer_assoc_complete_arg peer_arg;
2699	struct ieee80211_sta *ap_sta;
2700	int ret;
2701
2702	lockdep_assert_held(&ar->conf_mutex);
2703
2704	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2705		   arvif->vdev_id, arvif->bssid, arvif->aid);
2706
2707	rcu_read_lock();
2708
2709	ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2710	if (!ap_sta) {
2711		ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2712			    bss_conf->bssid, arvif->vdev_id);
2713		rcu_read_unlock();
2714		return;
2715	}
2716
2717	/* ap_sta must be accessed only within rcu section which must be left
2718	 * before calling ath10k_setup_peer_smps() which might sleep. */
2719	ht_cap = ap_sta->ht_cap;
2720	vht_cap = ap_sta->vht_cap;
2721
2722	ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2723	if (ret) {
2724		ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2725			    bss_conf->bssid, arvif->vdev_id, ret);
2726		rcu_read_unlock();
2727		return;
2728	}
2729
2730	rcu_read_unlock();
2731
2732	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2733	if (ret) {
2734		ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2735			    bss_conf->bssid, arvif->vdev_id, ret);
2736		return;
2737	}
2738
2739	ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2740	if (ret) {
2741		ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2742			    arvif->vdev_id, ret);
2743		return;
2744	}
2745
2746	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2747	if (ret) {
2748		ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2749			    arvif->vdev_id, bss_conf->bssid, ret);
2750		return;
2751	}
2752
2753	ath10k_dbg(ar, ATH10K_DBG_MAC,
2754		   "mac vdev %d up (associated) bssid %pM aid %d\n",
2755		   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2756
2757	WARN_ON(arvif->is_up);
2758
2759	arvif->aid = bss_conf->aid;
2760	ether_addr_copy(arvif->bssid, bss_conf->bssid);
2761
2762	ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2763	if (ret) {
2764		ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2765			    arvif->vdev_id, ret);
2766		return;
2767	}
2768
2769	arvif->is_up = true;
2770
2771	/* Workaround: Some firmware revisions (tested with qca6174
2772	 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2773	 * poked with peer param command.
2774	 */
2775	ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2776					WMI_PEER_DUMMY_VAR, 1);
2777	if (ret) {
2778		ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2779			    arvif->bssid, arvif->vdev_id, ret);
2780		return;
2781	}
2782}
2783
2784static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2785				struct ieee80211_vif *vif)
2786{
2787	struct ath10k *ar = hw->priv;
2788	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2789	struct ieee80211_sta_vht_cap vht_cap = {};
2790	int ret;
2791
2792	lockdep_assert_held(&ar->conf_mutex);
2793
2794	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2795		   arvif->vdev_id, arvif->bssid);
2796
2797	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2798	if (ret)
2799		ath10k_warn(ar, "failed to down vdev %i: %d\n",
2800			    arvif->vdev_id, ret);
2801
2802	arvif->def_wep_key_idx = -1;
2803
2804	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2805	if (ret) {
2806		ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2807			    arvif->vdev_id, ret);
2808		return;
2809	}
2810
2811	arvif->is_up = false;
2812
2813	cancel_delayed_work_sync(&arvif->connection_loss_work);
2814}
2815
2816static int ath10k_station_assoc(struct ath10k *ar,
2817				struct ieee80211_vif *vif,
2818				struct ieee80211_sta *sta,
2819				bool reassoc)
2820{
2821	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2822	struct wmi_peer_assoc_complete_arg peer_arg;
2823	int ret = 0;
2824
2825	lockdep_assert_held(&ar->conf_mutex);
2826
2827	ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2828	if (ret) {
2829		ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2830			    sta->addr, arvif->vdev_id, ret);
2831		return ret;
2832	}
2833
2834	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2835	if (ret) {
2836		ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2837			    sta->addr, arvif->vdev_id, ret);
2838		return ret;
2839	}
2840
2841	/* Re-assoc is run only to update supported rates for given station. It
2842	 * doesn't make much sense to reconfigure the peer completely.
2843	 */
2844	if (!reassoc) {
2845		ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2846					     &sta->ht_cap);
2847		if (ret) {
2848			ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2849				    arvif->vdev_id, ret);
2850			return ret;
2851		}
2852
2853		ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2854		if (ret) {
2855			ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2856				    sta->addr, arvif->vdev_id, ret);
2857			return ret;
2858		}
2859
2860		if (!sta->wme) {
2861			arvif->num_legacy_stations++;
2862			ret  = ath10k_recalc_rtscts_prot(arvif);
2863			if (ret) {
2864				ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2865					    arvif->vdev_id, ret);
2866				return ret;
2867			}
2868		}
2869
2870		/* Plumb cached keys only for static WEP */
2871		if (arvif->def_wep_key_idx != -1) {
2872			ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2873			if (ret) {
2874				ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2875					    arvif->vdev_id, ret);
2876				return ret;
2877			}
2878		}
2879	}
2880
2881	return ret;
2882}
2883
2884static int ath10k_station_disassoc(struct ath10k *ar,
2885				   struct ieee80211_vif *vif,
2886				   struct ieee80211_sta *sta)
2887{
2888	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2889	int ret = 0;
2890
2891	lockdep_assert_held(&ar->conf_mutex);
2892
2893	if (!sta->wme) {
2894		arvif->num_legacy_stations--;
2895		ret = ath10k_recalc_rtscts_prot(arvif);
2896		if (ret) {
2897			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2898				    arvif->vdev_id, ret);
2899			return ret;
2900		}
2901	}
2902
2903	ret = ath10k_clear_peer_keys(arvif, sta->addr);
2904	if (ret) {
2905		ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
2906			    arvif->vdev_id, ret);
2907		return ret;
2908	}
2909
2910	return ret;
2911}
2912
2913/**************/
2914/* Regulatory */
2915/**************/
2916
2917static int ath10k_update_channel_list(struct ath10k *ar)
2918{
2919	struct ieee80211_hw *hw = ar->hw;
2920	struct ieee80211_supported_band **bands;
2921	enum nl80211_band band;
2922	struct ieee80211_channel *channel;
2923	struct wmi_scan_chan_list_arg arg = {0};
2924	struct wmi_channel_arg *ch;
2925	bool passive;
2926	int len;
2927	int ret;
2928	int i;
2929
2930	lockdep_assert_held(&ar->conf_mutex);
2931
2932	bands = hw->wiphy->bands;
2933	for (band = 0; band < NUM_NL80211_BANDS; band++) {
2934		if (!bands[band])
2935			continue;
2936
2937		for (i = 0; i < bands[band]->n_channels; i++) {
2938			if (bands[band]->channels[i].flags &
2939			    IEEE80211_CHAN_DISABLED)
2940				continue;
2941
2942			arg.n_channels++;
2943		}
2944	}
2945
2946	len = sizeof(struct wmi_channel_arg) * arg.n_channels;
2947	arg.channels = kzalloc(len, GFP_KERNEL);
2948	if (!arg.channels)
2949		return -ENOMEM;
2950
2951	ch = arg.channels;
2952	for (band = 0; band < NUM_NL80211_BANDS; band++) {
2953		if (!bands[band])
2954			continue;
2955
2956		for (i = 0; i < bands[band]->n_channels; i++) {
2957			channel = &bands[band]->channels[i];
2958
2959			if (channel->flags & IEEE80211_CHAN_DISABLED)
2960				continue;
2961
2962			ch->allow_ht = true;
2963
2964			/* FIXME: when should we really allow VHT? */
2965			ch->allow_vht = true;
2966
2967			ch->allow_ibss =
2968				!(channel->flags & IEEE80211_CHAN_NO_IR);
2969
2970			ch->ht40plus =
2971				!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
2972
2973			ch->chan_radar =
2974				!!(channel->flags & IEEE80211_CHAN_RADAR);
2975
2976			passive = channel->flags & IEEE80211_CHAN_NO_IR;
2977			ch->passive = passive;
2978
2979			ch->freq = channel->center_freq;
2980			ch->band_center_freq1 = channel->center_freq;
2981			ch->min_power = 0;
2982			ch->max_power = channel->max_power * 2;
2983			ch->max_reg_power = channel->max_reg_power * 2;
2984			ch->max_antenna_gain = channel->max_antenna_gain * 2;
2985			ch->reg_class_id = 0; /* FIXME */
2986
2987			/* FIXME: why use only legacy modes, why not any
2988			 * HT/VHT modes? Would that even make any
2989			 * difference? */
2990			if (channel->band == NL80211_BAND_2GHZ)
2991				ch->mode = MODE_11G;
2992			else
2993				ch->mode = MODE_11A;
2994
2995			if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
2996				continue;
2997
2998			ath10k_dbg(ar, ATH10K_DBG_WMI,
2999				   "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
3000				    ch - arg.channels, arg.n_channels,
3001				   ch->freq, ch->max_power, ch->max_reg_power,
3002				   ch->max_antenna_gain, ch->mode);
3003
3004			ch++;
3005		}
3006	}
3007
3008	ret = ath10k_wmi_scan_chan_list(ar, &arg);
3009	kfree(arg.channels);
3010
3011	return ret;
3012}
3013
3014static enum wmi_dfs_region
3015ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
3016{
3017	switch (dfs_region) {
3018	case NL80211_DFS_UNSET:
3019		return WMI_UNINIT_DFS_DOMAIN;
3020	case NL80211_DFS_FCC:
3021		return WMI_FCC_DFS_DOMAIN;
3022	case NL80211_DFS_ETSI:
3023		return WMI_ETSI_DFS_DOMAIN;
3024	case NL80211_DFS_JP:
3025		return WMI_MKK4_DFS_DOMAIN;
3026	}
3027	return WMI_UNINIT_DFS_DOMAIN;
3028}
3029
3030static void ath10k_regd_update(struct ath10k *ar)
3031{
3032	struct reg_dmn_pair_mapping *regpair;
3033	int ret;
3034	enum wmi_dfs_region wmi_dfs_reg;
3035	enum nl80211_dfs_regions nl_dfs_reg;
3036
3037	lockdep_assert_held(&ar->conf_mutex);
3038
3039	ret = ath10k_update_channel_list(ar);
3040	if (ret)
3041		ath10k_warn(ar, "failed to update channel list: %d\n", ret);
3042
3043	regpair = ar->ath_common.regulatory.regpair;
3044
3045	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3046		nl_dfs_reg = ar->dfs_detector->region;
3047		wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
3048	} else {
3049		wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3050	}
3051
3052	/* Target allows setting up per-band regdomain but ath_common provides
3053	 * a combined one only */
3054	ret = ath10k_wmi_pdev_set_regdomain(ar,
3055					    regpair->reg_domain,
3056					    regpair->reg_domain, /* 2ghz */
3057					    regpair->reg_domain, /* 5ghz */
3058					    regpair->reg_2ghz_ctl,
3059					    regpair->reg_5ghz_ctl,
3060					    wmi_dfs_reg);
3061	if (ret)
3062		ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3063}
3064
3065static void ath10k_reg_notifier(struct wiphy *wiphy,
3066				struct regulatory_request *request)
3067{
3068	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3069	struct ath10k *ar = hw->priv;
3070	bool result;
3071
3072	ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3073
3074	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3075		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3076			   request->dfs_region);
3077		result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3078							  request->dfs_region);
3079		if (!result)
3080			ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3081				    request->dfs_region);
3082	}
3083
3084	mutex_lock(&ar->conf_mutex);
3085	if (ar->state == ATH10K_STATE_ON)
3086		ath10k_regd_update(ar);
3087	mutex_unlock(&ar->conf_mutex);
3088}
3089
3090/***************/
3091/* TX handlers */
3092/***************/
3093
3094enum ath10k_mac_tx_path {
3095	ATH10K_MAC_TX_HTT,
3096	ATH10K_MAC_TX_HTT_MGMT,
3097	ATH10K_MAC_TX_WMI_MGMT,
3098	ATH10K_MAC_TX_UNKNOWN,
3099};
3100
3101void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3102{
3103	lockdep_assert_held(&ar->htt.tx_lock);
3104
3105	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3106	ar->tx_paused |= BIT(reason);
3107	ieee80211_stop_queues(ar->hw);
3108}
3109
3110static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3111				      struct ieee80211_vif *vif)
3112{
3113	struct ath10k *ar = data;
3114	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3115
3116	if (arvif->tx_paused)
3117		return;
3118
3119	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3120}
3121
3122void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3123{
3124	lockdep_assert_held(&ar->htt.tx_lock);
3125
3126	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3127	ar->tx_paused &= ~BIT(reason);
3128
3129	if (ar->tx_paused)
3130		return;
3131
3132	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3133						   IEEE80211_IFACE_ITER_RESUME_ALL,
3134						   ath10k_mac_tx_unlock_iter,
3135						   ar);
3136
3137	ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3138}
3139
3140void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3141{
3142	struct ath10k *ar = arvif->ar;
3143
3144	lockdep_assert_held(&ar->htt.tx_lock);
3145
3146	WARN_ON(reason >= BITS_PER_LONG);
3147	arvif->tx_paused |= BIT(reason);
3148	ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3149}
3150
3151void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3152{
3153	struct ath10k *ar = arvif->ar;
3154
3155	lockdep_assert_held(&ar->htt.tx_lock);
3156
3157	WARN_ON(reason >= BITS_PER_LONG);
3158	arvif->tx_paused &= ~BIT(reason);
3159
3160	if (ar->tx_paused)
3161		return;
3162
3163	if (arvif->tx_paused)
3164		return;
3165
3166	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3167}
3168
3169static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3170					   enum wmi_tlv_tx_pause_id pause_id,
3171					   enum wmi_tlv_tx_pause_action action)
3172{
3173	struct ath10k *ar = arvif->ar;
3174
3175	lockdep_assert_held(&ar->htt.tx_lock);
3176
3177	switch (action) {
3178	case WMI_TLV_TX_PAUSE_ACTION_STOP:
3179		ath10k_mac_vif_tx_lock(arvif, pause_id);
3180		break;
3181	case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3182		ath10k_mac_vif_tx_unlock(arvif, pause_id);
3183		break;
3184	default:
3185		ath10k_dbg(ar, ATH10K_DBG_BOOT,
3186			   "received unknown tx pause action %d on vdev %i, ignoring\n",
3187			    action, arvif->vdev_id);
3188		break;
3189	}
3190}
3191
3192struct ath10k_mac_tx_pause {
3193	u32 vdev_id;
3194	enum wmi_tlv_tx_pause_id pause_id;
3195	enum wmi_tlv_tx_pause_action action;
3196};
3197
3198static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3199					    struct ieee80211_vif *vif)
3200{
3201	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3202	struct ath10k_mac_tx_pause *arg = data;
3203
3204	if (arvif->vdev_id != arg->vdev_id)
3205		return;
3206
3207	ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3208}
3209
3210void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3211				     enum wmi_tlv_tx_pause_id pause_id,
3212				     enum wmi_tlv_tx_pause_action action)
3213{
3214	struct ath10k_mac_tx_pause arg = {
3215		.vdev_id = vdev_id,
3216		.pause_id = pause_id,
3217		.action = action,
3218	};
3219
3220	spin_lock_bh(&ar->htt.tx_lock);
3221	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3222						   IEEE80211_IFACE_ITER_RESUME_ALL,
3223						   ath10k_mac_handle_tx_pause_iter,
3224						   &arg);
3225	spin_unlock_bh(&ar->htt.tx_lock);
3226}
3227
3228static enum ath10k_hw_txrx_mode
3229ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3230			   struct ieee80211_vif *vif,
3231			   struct ieee80211_sta *sta,
3232			   struct sk_buff *skb)
3233{
3234	const struct ieee80211_hdr *hdr = (void *)skb->data;
3235	__le16 fc = hdr->frame_control;
3236
3237	if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3238		return ATH10K_HW_TXRX_RAW;
3239
3240	if (ieee80211_is_mgmt(fc))
3241		return ATH10K_HW_TXRX_MGMT;
3242
3243	/* Workaround:
3244	 *
3245	 * NullFunc frames are mostly used to ping if a client or AP are still
3246	 * reachable and responsive. This implies tx status reports must be
3247	 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3248	 * come to a conclusion that the other end disappeared and tear down
3249	 * BSS connection or it can never disconnect from BSS/client (which is
3250	 * the case).
3251	 *
3252	 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3253	 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3254	 * which seems to deliver correct tx reports for NullFunc frames. The
3255	 * downside of using it is it ignores client powersave state so it can
3256	 * end up disconnecting sleeping clients in AP mode. It should fix STA
3257	 * mode though because AP don't sleep.
3258	 */
3259	if (ar->htt.target_version_major < 3 &&
3260	    (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3261	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3262		      ar->running_fw->fw_file.fw_features))
3263		return ATH10K_HW_TXRX_MGMT;
3264
3265	/* Workaround:
3266	 *
3267	 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3268	 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3269	 * to work with Ethernet txmode so use it.
3270	 *
3271	 * FIXME: Check if raw mode works with TDLS.
3272	 */
3273	if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3274		return ATH10K_HW_TXRX_ETHERNET;
3275
3276	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3277		return ATH10K_HW_TXRX_RAW;
3278
3279	return ATH10K_HW_TXRX_NATIVE_WIFI;
3280}
3281
3282static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3283				     struct sk_buff *skb)
3284{
3285	const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3286	const struct ieee80211_hdr *hdr = (void *)skb->data;
3287	const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3288			 IEEE80211_TX_CTL_INJECTED;
3289
3290	if (!ieee80211_has_protected(hdr->frame_control))
3291		return false;
3292
3293	if ((info->flags & mask) == mask)
3294		return false;
3295
3296	if (vif)
3297		return !ath10k_vif_to_arvif(vif)->nohwcrypt;
3298
3299	return true;
3300}
3301
3302/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3303 * Control in the header.
3304 */
3305static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3306{
3307	struct ieee80211_hdr *hdr = (void *)skb->data;
3308	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3309	u8 *qos_ctl;
3310
3311	if (!ieee80211_is_data_qos(hdr->frame_control))
3312		return;
3313
3314	qos_ctl = ieee80211_get_qos_ctl(hdr);
3315	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3316		skb->data, (void *)qos_ctl - (void *)skb->data);
3317	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3318
3319	/* Some firmware revisions don't handle sending QoS NullFunc well.
3320	 * These frames are mainly used for CQM purposes so it doesn't really
3321	 * matter whether QoS NullFunc or NullFunc are sent.
3322	 */
3323	hdr = (void *)skb->data;
3324	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3325		cb->flags &= ~ATH10K_SKB_F_QOS;
3326
3327	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3328}
3329
3330static void ath10k_tx_h_8023(struct sk_buff *skb)
3331{
3332	struct ieee80211_hdr *hdr;
3333	struct rfc1042_hdr *rfc1042;
3334	struct ethhdr *eth;
3335	size_t hdrlen;
3336	u8 da[ETH_ALEN];
3337	u8 sa[ETH_ALEN];
3338	__be16 type;
3339
3340	hdr = (void *)skb->data;
3341	hdrlen = ieee80211_hdrlen(hdr->frame_control);
3342	rfc1042 = (void *)skb->data + hdrlen;
3343
3344	ether_addr_copy(da, ieee80211_get_DA(hdr));
3345	ether_addr_copy(sa, ieee80211_get_SA(hdr));
3346	type = rfc1042->snap_type;
3347
3348	skb_pull(skb, hdrlen + sizeof(*rfc1042));
3349	skb_push(skb, sizeof(*eth));
3350
3351	eth = (void *)skb->data;
3352	ether_addr_copy(eth->h_dest, da);
3353	ether_addr_copy(eth->h_source, sa);
3354	eth->h_proto = type;
3355}
3356
3357static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3358				       struct ieee80211_vif *vif,
3359				       struct sk_buff *skb)
3360{
3361	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3362	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3363
3364	/* This is case only for P2P_GO */
3365	if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3366		return;
3367
3368	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3369		spin_lock_bh(&ar->data_lock);
3370		if (arvif->u.ap.noa_data)
3371			if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3372					      GFP_ATOMIC))
3373				memcpy(skb_put(skb, arvif->u.ap.noa_len),
3374				       arvif->u.ap.noa_data,
3375				       arvif->u.ap.noa_len);
3376		spin_unlock_bh(&ar->data_lock);
3377	}
3378}
3379
3380static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3381				    struct ieee80211_vif *vif,
3382				    struct ieee80211_txq *txq,
3383				    struct sk_buff *skb)
3384{
3385	struct ieee80211_hdr *hdr = (void *)skb->data;
3386	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3387
3388	cb->flags = 0;
3389	if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3390		cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3391
3392	if (ieee80211_is_mgmt(hdr->frame_control))
3393		cb->flags |= ATH10K_SKB_F_MGMT;
3394
3395	if (ieee80211_is_data_qos(hdr->frame_control))
3396		cb->flags |= ATH10K_SKB_F_QOS;
3397
3398	cb->vif = vif;
3399	cb->txq = txq;
3400}
3401
3402bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3403{
3404	/* FIXME: Not really sure since when the behaviour changed. At some
3405	 * point new firmware stopped requiring creation of peer entries for
3406	 * offchannel tx (and actually creating them causes issues with wmi-htc
3407	 * tx credit replenishment and reliability). Assuming it's at least 3.4
3408	 * because that's when the `freq` was introduced to TX_FRM HTT command.
3409	 */
3410	return (ar->htt.target_version_major >= 3 &&
3411		ar->htt.target_version_minor >= 4 &&
3412		ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3413}
3414
3415static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3416{
3417	struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3418	int ret = 0;
3419
3420	spin_lock_bh(&ar->data_lock);
3421
3422	if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3423		ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3424		ret = -ENOSPC;
3425		goto unlock;
3426	}
3427
3428	__skb_queue_tail(q, skb);
3429	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3430
3431unlock:
3432	spin_unlock_bh(&ar->data_lock);
3433
3434	return ret;
3435}
3436
3437static enum ath10k_mac_tx_path
3438ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3439			   struct sk_buff *skb,
3440			   enum ath10k_hw_txrx_mode txmode)
3441{
3442	switch (txmode) {
3443	case ATH10K_HW_TXRX_RAW:
3444	case ATH10K_HW_TXRX_NATIVE_WIFI:
3445	case ATH10K_HW_TXRX_ETHERNET:
3446		return ATH10K_MAC_TX_HTT;
3447	case ATH10K_HW_TXRX_MGMT:
3448		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3449			     ar->running_fw->fw_file.fw_features))
3450			return ATH10K_MAC_TX_WMI_MGMT;
3451		else if (ar->htt.target_version_major >= 3)
3452			return ATH10K_MAC_TX_HTT;
3453		else
3454			return ATH10K_MAC_TX_HTT_MGMT;
3455	}
3456
3457	return ATH10K_MAC_TX_UNKNOWN;
3458}
3459
3460static int ath10k_mac_tx_submit(struct ath10k *ar,
3461				enum ath10k_hw_txrx_mode txmode,
3462				enum ath10k_mac_tx_path txpath,
3463				struct sk_buff *skb)
3464{
3465	struct ath10k_htt *htt = &ar->htt;
3466	int ret = -EINVAL;
3467
3468	switch (txpath) {
3469	case ATH10K_MAC_TX_HTT:
3470		ret = ath10k_htt_tx(htt, txmode, skb);
3471		break;
3472	case ATH10K_MAC_TX_HTT_MGMT:
3473		ret = ath10k_htt_mgmt_tx(htt, skb);
3474		break;
3475	case ATH10K_MAC_TX_WMI_MGMT:
3476		ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3477		break;
3478	case ATH10K_MAC_TX_UNKNOWN:
3479		WARN_ON_ONCE(1);
3480		ret = -EINVAL;
3481		break;
3482	}
3483
3484	if (ret) {
3485		ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3486			    ret);
3487		ieee80211_free_txskb(ar->hw, skb);
3488	}
3489
3490	return ret;
3491}
3492
3493/* This function consumes the sk_buff regardless of return value as far as
3494 * caller is concerned so no freeing is necessary afterwards.
3495 */
3496static int ath10k_mac_tx(struct ath10k *ar,
3497			 struct ieee80211_vif *vif,
3498			 struct ieee80211_sta *sta,
3499			 enum ath10k_hw_txrx_mode txmode,
3500			 enum ath10k_mac_tx_path txpath,
3501			 struct sk_buff *skb)
3502{
3503	struct ieee80211_hw *hw = ar->hw;
3504	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3505	int ret;
3506
3507	/* We should disable CCK RATE due to P2P */
3508	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3509		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3510
3511	switch (txmode) {
3512	case ATH10K_HW_TXRX_MGMT:
3513	case ATH10K_HW_TXRX_NATIVE_WIFI:
3514		ath10k_tx_h_nwifi(hw, skb);
3515		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3516		ath10k_tx_h_seq_no(vif, skb);
3517		break;
3518	case ATH10K_HW_TXRX_ETHERNET:
3519		ath10k_tx_h_8023(skb);
3520		break;
3521	case ATH10K_HW_TXRX_RAW:
3522		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3523			WARN_ON_ONCE(1);
3524			ieee80211_free_txskb(hw, skb);
3525			return -ENOTSUPP;
3526		}
3527	}
3528
3529	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3530		if (!ath10k_mac_tx_frm_has_freq(ar)) {
3531			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
3532				   skb);
3533
3534			skb_queue_tail(&ar->offchan_tx_queue, skb);
3535			ieee80211_queue_work(hw, &ar->offchan_tx_work);
3536			return 0;
3537		}
3538	}
3539
3540	ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3541	if (ret) {
3542		ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3543		return ret;
3544	}
3545
3546	return 0;
3547}
3548
3549void ath10k_offchan_tx_purge(struct ath10k *ar)
3550{
3551	struct sk_buff *skb;
3552
3553	for (;;) {
3554		skb = skb_dequeue(&ar->offchan_tx_queue);
3555		if (!skb)
3556			break;
3557
3558		ieee80211_free_txskb(ar->hw, skb);
3559	}
3560}
3561
3562void ath10k_offchan_tx_work(struct work_struct *work)
3563{
3564	struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3565	struct ath10k_peer *peer;
3566	struct ath10k_vif *arvif;
3567	enum ath10k_hw_txrx_mode txmode;
3568	enum ath10k_mac_tx_path txpath;
3569	struct ieee80211_hdr *hdr;
3570	struct ieee80211_vif *vif;
3571	struct ieee80211_sta *sta;
3572	struct sk_buff *skb;
3573	const u8 *peer_addr;
3574	int vdev_id;
3575	int ret;
3576	unsigned long time_left;
3577	bool tmp_peer_created = false;
3578
3579	/* FW requirement: We must create a peer before FW will send out
3580	 * an offchannel frame. Otherwise the frame will be stuck and
3581	 * never transmitted. We delete the peer upon tx completion.
3582	 * It is unlikely that a peer for offchannel tx will already be
3583	 * present. However it may be in some rare cases so account for that.
3584	 * Otherwise we might remove a legitimate peer and break stuff. */
3585
3586	for (;;) {
3587		skb = skb_dequeue(&ar->offchan_tx_queue);
3588		if (!skb)
3589			break;
3590
3591		mutex_lock(&ar->conf_mutex);
3592
3593		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
3594			   skb);
3595
3596		hdr = (struct ieee80211_hdr *)skb->data;
3597		peer_addr = ieee80211_get_DA(hdr);
3598
3599		spin_lock_bh(&ar->data_lock);
3600		vdev_id = ar->scan.vdev_id;
3601		peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3602		spin_unlock_bh(&ar->data_lock);
3603
3604		if (peer)
3605			/* FIXME: should this use ath10k_warn()? */
3606			ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3607				   peer_addr, vdev_id);
3608
3609		if (!peer) {
3610			ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3611						 peer_addr,
3612						 WMI_PEER_TYPE_DEFAULT);
3613			if (ret)
3614				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3615					    peer_addr, vdev_id, ret);
3616			tmp_peer_created = (ret == 0);
3617		}
3618
3619		spin_lock_bh(&ar->data_lock);
3620		reinit_completion(&ar->offchan_tx_completed);
3621		ar->offchan_tx_skb = skb;
3622		spin_unlock_bh(&ar->data_lock);
3623
3624		/* It's safe to access vif and sta - conf_mutex guarantees that
3625		 * sta_state() and remove_interface() are locked exclusively
3626		 * out wrt to this offchannel worker.
3627		 */
3628		arvif = ath10k_get_arvif(ar, vdev_id);
3629		if (arvif) {
3630			vif = arvif->vif;
3631			sta = ieee80211_find_sta(vif, peer_addr);
3632		} else {
3633			vif = NULL;
3634			sta = NULL;
3635		}
3636
3637		txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3638		txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3639
3640		ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3641		if (ret) {
3642			ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3643				    ret);
3644			/* not serious */
3645		}
3646
3647		time_left =
3648		wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3649		if (time_left == 0)
3650			ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
3651				    skb);
3652
3653		if (!peer && tmp_peer_created) {
3654			ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3655			if (ret)
3656				ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3657					    peer_addr, vdev_id, ret);
3658		}
3659
3660		mutex_unlock(&ar->conf_mutex);
3661	}
3662}
3663
3664void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3665{
3666	struct sk_buff *skb;
3667
3668	for (;;) {
3669		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3670		if (!skb)
3671			break;
3672
3673		ieee80211_free_txskb(ar->hw, skb);
3674	}
3675}
3676
3677void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3678{
3679	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3680	struct sk_buff *skb;
3681	int ret;
3682
3683	for (;;) {
3684		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3685		if (!skb)
3686			break;
3687
3688		ret = ath10k_wmi_mgmt_tx(ar, skb);
3689		if (ret) {
3690			ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3691				    ret);
3692			ieee80211_free_txskb(ar->hw, skb);
3693		}
3694	}
3695}
3696
3697static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3698{
3699	struct ath10k_txq *artxq;
3700
3701	if (!txq)
3702		return;
3703
3704	artxq = (void *)txq->drv_priv;
3705	INIT_LIST_HEAD(&artxq->list);
3706}
3707
3708static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3709{
3710	struct ath10k_txq *artxq;
3711	struct ath10k_skb_cb *cb;
3712	struct sk_buff *msdu;
3713	int msdu_id;
3714
3715	if (!txq)
3716		return;
3717
3718	artxq = (void *)txq->drv_priv;
3719	spin_lock_bh(&ar->txqs_lock);
3720	if (!list_empty(&artxq->list))
3721		list_del_init(&artxq->list);
3722	spin_unlock_bh(&ar->txqs_lock);
3723
3724	spin_lock_bh(&ar->htt.tx_lock);
3725	idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3726		cb = ATH10K_SKB_CB(msdu);
3727		if (cb->txq == txq)
3728			cb->txq = NULL;
3729	}
3730	spin_unlock_bh(&ar->htt.tx_lock);
3731}
3732
3733struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3734					    u16 peer_id,
3735					    u8 tid)
3736{
3737	struct ath10k_peer *peer;
3738
3739	lockdep_assert_held(&ar->data_lock);
3740
3741	peer = ar->peer_map[peer_id];
3742	if (!peer)
3743		return NULL;
3744
3745	if (peer->sta)
3746		return peer->sta->txq[tid];
3747	else if (peer->vif)
3748		return peer->vif->txq;
3749	else
3750		return NULL;
3751}
3752
3753static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3754				   struct ieee80211_txq *txq)
3755{
3756	struct ath10k *ar = hw->priv;
3757	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3758
3759	/* No need to get locks */
3760
3761	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3762		return true;
3763
3764	if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3765		return true;
3766
3767	if (artxq->num_fw_queued < artxq->num_push_allowed)
3768		return true;
3769
3770	return false;
3771}
3772
3773int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3774			   struct ieee80211_txq *txq)
3775{
3776	struct ath10k *ar = hw->priv;
3777	struct ath10k_htt *htt = &ar->htt;
3778	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3779	struct ieee80211_vif *vif = txq->vif;
3780	struct ieee80211_sta *sta = txq->sta;
3781	enum ath10k_hw_txrx_mode txmode;
3782	enum ath10k_mac_tx_path txpath;
3783	struct sk_buff *skb;
3784	struct ieee80211_hdr *hdr;
3785	size_t skb_len;
3786	bool is_mgmt, is_presp;
3787	int ret;
3788
3789	spin_lock_bh(&ar->htt.tx_lock);
3790	ret = ath10k_htt_tx_inc_pending(htt);
3791	spin_unlock_bh(&ar->htt.tx_lock);
3792
3793	if (ret)
3794		return ret;
3795
3796	skb = ieee80211_tx_dequeue(hw, txq);
3797	if (!skb) {
3798		spin_lock_bh(&ar->htt.tx_lock);
3799		ath10k_htt_tx_dec_pending(htt);
3800		spin_unlock_bh(&ar->htt.tx_lock);
3801
3802		return -ENOENT;
3803	}
3804
3805	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3806
3807	skb_len = skb->len;
3808	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3809	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3810	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
3811
3812	if (is_mgmt) {
3813		hdr = (struct ieee80211_hdr *)skb->data;
3814		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
3815
3816		spin_lock_bh(&ar->htt.tx_lock);
3817		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
3818
3819		if (ret) {
3820			ath10k_htt_tx_dec_pending(htt);
3821			spin_unlock_bh(&ar->htt.tx_lock);
3822			return ret;
3823		}
3824		spin_unlock_bh(&ar->htt.tx_lock);
3825	}
3826
3827	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3828	if (unlikely(ret)) {
3829		ath10k_warn(ar, "failed to push frame: %d\n", ret);
3830
3831		spin_lock_bh(&ar->htt.tx_lock);
3832		ath10k_htt_tx_dec_pending(htt);
3833		if (is_mgmt)
3834			ath10k_htt_tx_mgmt_dec_pending(htt);
3835		spin_unlock_bh(&ar->htt.tx_lock);
3836
3837		return ret;
3838	}
3839
3840	spin_lock_bh(&ar->htt.tx_lock);
3841	artxq->num_fw_queued++;
3842	spin_unlock_bh(&ar->htt.tx_lock);
3843
3844	return skb_len;
3845}
3846
3847void ath10k_mac_tx_push_pending(struct ath10k *ar)
3848{
3849	struct ieee80211_hw *hw = ar->hw;
3850	struct ieee80211_txq *txq;
3851	struct ath10k_txq *artxq;
3852	struct ath10k_txq *last;
3853	int ret;
3854	int max;
3855
3856	if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
3857		return;
3858
3859	spin_lock_bh(&ar->txqs_lock);
3860	rcu_read_lock();
3861
3862	last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3863	while (!list_empty(&ar->txqs)) {
3864		artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3865		txq = container_of((void *)artxq, struct ieee80211_txq,
3866				   drv_priv);
3867
3868		/* Prevent aggressive sta/tid taking over tx queue */
3869		max = 16;
3870		ret = 0;
3871		while (ath10k_mac_tx_can_push(hw, txq) && max--) {
3872			ret = ath10k_mac_tx_push_txq(hw, txq);
3873			if (ret < 0)
3874				break;
3875		}
3876
3877		list_del_init(&artxq->list);
3878		if (ret != -ENOENT)
3879			list_add_tail(&artxq->list, &ar->txqs);
3880
3881		ath10k_htt_tx_txq_update(hw, txq);
3882
3883		if (artxq == last || (ret < 0 && ret != -ENOENT))
3884			break;
3885	}
3886
3887	rcu_read_unlock();
3888	spin_unlock_bh(&ar->txqs_lock);
3889}
3890
3891/************/
3892/* Scanning */
3893/************/
3894
3895void __ath10k_scan_finish(struct ath10k *ar)
3896{
3897	lockdep_assert_held(&ar->data_lock);
3898
3899	switch (ar->scan.state) {
3900	case ATH10K_SCAN_IDLE:
3901		break;
3902	case ATH10K_SCAN_RUNNING:
3903	case ATH10K_SCAN_ABORTING:
3904		if (!ar->scan.is_roc) {
3905			struct cfg80211_scan_info info = {
3906				.aborted = (ar->scan.state ==
3907					    ATH10K_SCAN_ABORTING),
3908			};
3909
3910			ieee80211_scan_completed(ar->hw, &info);
3911		} else if (ar->scan.roc_notify) {
3912			ieee80211_remain_on_channel_expired(ar->hw);
3913		}
3914		/* fall through */
3915	case ATH10K_SCAN_STARTING:
3916		ar->scan.state = ATH10K_SCAN_IDLE;
3917		ar->scan_channel = NULL;
3918		ar->scan.roc_freq = 0;
3919		ath10k_offchan_tx_purge(ar);
3920		cancel_delayed_work(&ar->scan.timeout);
3921		complete(&ar->scan.completed);
3922		break;
3923	}
3924}
3925
3926void ath10k_scan_finish(struct ath10k *ar)
3927{
3928	spin_lock_bh(&ar->data_lock);
3929	__ath10k_scan_finish(ar);
3930	spin_unlock_bh(&ar->data_lock);
3931}
3932
3933static int ath10k_scan_stop(struct ath10k *ar)
3934{
3935	struct wmi_stop_scan_arg arg = {
3936		.req_id = 1, /* FIXME */
3937		.req_type = WMI_SCAN_STOP_ONE,
3938		.u.scan_id = ATH10K_SCAN_ID,
3939	};
3940	int ret;
3941
3942	lockdep_assert_held(&ar->conf_mutex);
3943
3944	ret = ath10k_wmi_stop_scan(ar, &arg);
3945	if (ret) {
3946		ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
3947		goto out;
3948	}
3949
3950	ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
3951	if (ret == 0) {
3952		ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
3953		ret = -ETIMEDOUT;
3954	} else if (ret > 0) {
3955		ret = 0;
3956	}
3957
3958out:
3959	/* Scan state should be updated upon scan completion but in case
3960	 * firmware fails to deliver the event (for whatever reason) it is
3961	 * desired to clean up scan state anyway. Firmware may have just
3962	 * dropped the scan completion event delivery due to transport pipe
3963	 * being overflown with data and/or it can recover on its own before
3964	 * next scan request is submitted.
3965	 */
3966	spin_lock_bh(&ar->data_lock);
3967	if (ar->scan.state != ATH10K_SCAN_IDLE)
3968		__ath10k_scan_finish(ar);
3969	spin_unlock_bh(&ar->data_lock);
3970
3971	return ret;
3972}
3973
3974static void ath10k_scan_abort(struct ath10k *ar)
3975{
3976	int ret;
3977
3978	lockdep_assert_held(&ar->conf_mutex);
3979
3980	spin_lock_bh(&ar->data_lock);
3981
3982	switch (ar->scan.state) {
3983	case ATH10K_SCAN_IDLE:
3984		/* This can happen if timeout worker kicked in and called
3985		 * abortion while scan completion was being processed.
3986		 */
3987		break;
3988	case ATH10K_SCAN_STARTING:
3989	case ATH10K_SCAN_ABORTING:
3990		ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
3991			    ath10k_scan_state_str(ar->scan.state),
3992			    ar->scan.state);
3993		break;
3994	case ATH10K_SCAN_RUNNING:
3995		ar->scan.state = ATH10K_SCAN_ABORTING;
3996		spin_unlock_bh(&ar->data_lock);
3997
3998		ret = ath10k_scan_stop(ar);
3999		if (ret)
4000			ath10k_warn(ar, "failed to abort scan: %d\n", ret);
4001
4002		spin_lock_bh(&ar->data_lock);
4003		break;
4004	}
4005
4006	spin_unlock_bh(&ar->data_lock);
4007}
4008
4009void ath10k_scan_timeout_work(struct work_struct *work)
4010{
4011	struct ath10k *ar = container_of(work, struct ath10k,
4012					 scan.timeout.work);
4013
4014	mutex_lock(&ar->conf_mutex);
4015	ath10k_scan_abort(ar);
4016	mutex_unlock(&ar->conf_mutex);
4017}
4018
4019static int ath10k_start_scan(struct ath10k *ar,
4020			     const struct wmi_start_scan_arg *arg)
4021{
4022	int ret;
4023
4024	lockdep_assert_held(&ar->conf_mutex);
4025
4026	ret = ath10k_wmi_start_scan(ar, arg);
4027	if (ret)
4028		return ret;
4029
4030	ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
4031	if (ret == 0) {
4032		ret = ath10k_scan_stop(ar);
4033		if (ret)
4034			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
4035
4036		return -ETIMEDOUT;
4037	}
4038
4039	/* If we failed to start the scan, return error code at
4040	 * this point.  This is probably due to some issue in the
4041	 * firmware, but no need to wedge the driver due to that...
4042	 */
4043	spin_lock_bh(&ar->data_lock);
4044	if (ar->scan.state == ATH10K_SCAN_IDLE) {
4045		spin_unlock_bh(&ar->data_lock);
4046		return -EINVAL;
4047	}
4048	spin_unlock_bh(&ar->data_lock);
4049
4050	return 0;
4051}
4052
4053/**********************/
4054/* mac80211 callbacks */
4055/**********************/
4056
4057static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4058			     struct ieee80211_tx_control *control,
4059			     struct sk_buff *skb)
4060{
4061	struct ath10k *ar = hw->priv;
4062	struct ath10k_htt *htt = &ar->htt;
4063	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4064	struct ieee80211_vif *vif = info->control.vif;
4065	struct ieee80211_sta *sta = control->sta;
4066	struct ieee80211_txq *txq = NULL;
4067	struct ieee80211_hdr *hdr = (void *)skb->data;
4068	enum ath10k_hw_txrx_mode txmode;
4069	enum ath10k_mac_tx_path txpath;
4070	bool is_htt;
4071	bool is_mgmt;
4072	bool is_presp;
4073	int ret;
4074
4075	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
4076
4077	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4078	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4079	is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4080		  txpath == ATH10K_MAC_TX_HTT_MGMT);
4081	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4082
4083	if (is_htt) {
4084		spin_lock_bh(&ar->htt.tx_lock);
4085		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4086
4087		ret = ath10k_htt_tx_inc_pending(htt);
4088		if (ret) {
4089			ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4090				    ret);
4091			spin_unlock_bh(&ar->htt.tx_lock);
4092			ieee80211_free_txskb(ar->hw, skb);
4093			return;
4094		}
4095
4096		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4097		if (ret) {
4098			ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4099				   ret);
4100			ath10k_htt_tx_dec_pending(htt);
4101			spin_unlock_bh(&ar->htt.tx_lock);
4102			ieee80211_free_txskb(ar->hw, skb);
4103			return;
4104		}
4105		spin_unlock_bh(&ar->htt.tx_lock);
4106	}
4107
4108	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
4109	if (ret) {
4110		ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4111		if (is_htt) {
4112			spin_lock_bh(&ar->htt.tx_lock);
4113			ath10k_htt_tx_dec_pending(htt);
4114			if (is_mgmt)
4115				ath10k_htt_tx_mgmt_dec_pending(htt);
4116			spin_unlock_bh(&ar->htt.tx_lock);
4117		}
4118		return;
4119	}
4120}
4121
4122static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4123					struct ieee80211_txq *txq)
4124{
4125	struct ath10k *ar = hw->priv;
4126	struct ath10k_txq *artxq = (void *)txq->drv_priv;
4127	struct ieee80211_txq *f_txq;
4128	struct ath10k_txq *f_artxq;
4129	int ret = 0;
4130	int max = 16;
4131
4132	spin_lock_bh(&ar->txqs_lock);
4133	if (list_empty(&artxq->list))
4134		list_add_tail(&artxq->list, &ar->txqs);
4135
4136	f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
4137	f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
4138	list_del_init(&f_artxq->list);
4139
4140	while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
4141		ret = ath10k_mac_tx_push_txq(hw, f_txq);
4142		if (ret)
4143			break;
4144	}
4145	if (ret != -ENOENT)
4146		list_add_tail(&f_artxq->list, &ar->txqs);
4147	spin_unlock_bh(&ar->txqs_lock);
4148
4149	ath10k_htt_tx_txq_update(hw, f_txq);
4150	ath10k_htt_tx_txq_update(hw, txq);
4151}
4152
4153/* Must not be called with conf_mutex held as workers can use that also. */
4154void ath10k_drain_tx(struct ath10k *ar)
4155{
4156	/* make sure rcu-protected mac80211 tx path itself is drained */
4157	synchronize_net();
4158
4159	ath10k_offchan_tx_purge(ar);
4160	ath10k_mgmt_over_wmi_tx_purge(ar);
4161
4162	cancel_work_sync(&ar->offchan_tx_work);
4163	cancel_work_sync(&ar->wmi_mgmt_tx_work);
4164}
4165
4166void ath10k_halt(struct ath10k *ar)
4167{
4168	struct ath10k_vif *arvif;
4169
4170	lockdep_assert_held(&ar->conf_mutex);
4171
4172	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4173	ar->filter_flags = 0;
4174	ar->monitor = false;
4175	ar->monitor_arvif = NULL;
4176
4177	if (ar->monitor_started)
4178		ath10k_monitor_stop(ar);
4179
4180	ar->monitor_started = false;
4181	ar->tx_paused = 0;
4182
4183	ath10k_scan_finish(ar);
4184	ath10k_peer_cleanup_all(ar);
4185	ath10k_core_stop(ar);
4186	ath10k_hif_power_down(ar);
4187
4188	spin_lock_bh(&ar->data_lock);
4189	list_for_each_entry(arvif, &ar->arvifs, list)
4190		ath10k_mac_vif_beacon_cleanup(arvif);
4191	spin_unlock_bh(&ar->data_lock);
4192}
4193
4194static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4195{
4196	struct ath10k *ar = hw->priv;
4197
4198	mutex_lock(&ar->conf_mutex);
4199
4200	*tx_ant = ar->cfg_tx_chainmask;
4201	*rx_ant = ar->cfg_rx_chainmask;
4202
4203	mutex_unlock(&ar->conf_mutex);
4204
4205	return 0;
4206}
4207
4208static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4209{
4210	/* It is not clear that allowing gaps in chainmask
4211	 * is helpful.  Probably it will not do what user
4212	 * is hoping for, so warn in that case.
4213	 */
4214	if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4215		return;
4216
4217	ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x.  Suggested values: 15, 7, 3, 1 or 0.\n",
4218		    dbg, cm);
4219}
4220
4221static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4222{
4223	int nsts = ar->vht_cap_info;
4224
4225	nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4226	nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4227
4228	/* If firmware does not deliver to host number of space-time
4229	 * streams supported, assume it support up to 4 BF STS and return
4230	 * the value for VHT CAP: nsts-1)
4231	 */
4232	if (nsts == 0)
4233		return 3;
4234
4235	return nsts;
4236}
4237
4238static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4239{
4240	int sound_dim = ar->vht_cap_info;
4241
4242	sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4243	sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4244
4245	/* If the sounding dimension is not advertised by the firmware,
4246	 * let's use a default value of 1
4247	 */
4248	if (sound_dim == 0)
4249		return 1;
4250
4251	return sound_dim;
4252}
4253
4254static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4255{
4256	struct ieee80211_sta_vht_cap vht_cap = {0};
4257	u16 mcs_map;
4258	u32 val;
4259	int i;
4260
4261	vht_cap.vht_supported = 1;
4262	vht_cap.cap = ar->vht_cap_info;
4263
4264	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4265				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4266		val = ath10k_mac_get_vht_cap_bf_sts(ar);
4267		val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4268		val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4269
4270		vht_cap.cap |= val;
4271	}
4272
4273	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4274				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4275		val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4276		val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4277		val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4278
4279		vht_cap.cap |= val;
4280	}
4281
4282	mcs_map = 0;
4283	for (i = 0; i < 8; i++) {
4284		if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4285			mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4286		else
4287			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4288	}
4289
4290	if (ar->cfg_tx_chainmask <= 1)
4291		vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
4292
4293	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4294	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4295
4296	return vht_cap;
4297}
4298
4299static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4300{
4301	int i;
4302	struct ieee80211_sta_ht_cap ht_cap = {0};
4303
4304	if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4305		return ht_cap;
4306
4307	ht_cap.ht_supported = 1;
4308	ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4309	ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4310	ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4311	ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4312	ht_cap.cap |=
4313		WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4314
4315	if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4316		ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4317
4318	if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4319		ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4320
4321	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4322		u32 smps;
4323
4324		smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
4325		smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4326
4327		ht_cap.cap |= smps;
4328	}
4329
4330	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
4331		ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4332
4333	if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4334		u32 stbc;
4335
4336		stbc   = ar->ht_cap_info;
4337		stbc  &= WMI_HT_CAP_RX_STBC;
4338		stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4339		stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4340		stbc  &= IEEE80211_HT_CAP_RX_STBC;
4341
4342		ht_cap.cap |= stbc;
4343	}
4344
4345	if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4346		ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4347
4348	if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4349		ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4350
4351	/* max AMSDU is implicitly taken from vht_cap_info */
4352	if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4353		ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4354
4355	for (i = 0; i < ar->num_rf_chains; i++) {
4356		if (ar->cfg_rx_chainmask & BIT(i))
4357			ht_cap.mcs.rx_mask[i] = 0xFF;
4358	}
4359
4360	ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4361
4362	return ht_cap;
4363}
4364
4365static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4366{
4367	struct ieee80211_supported_band *band;
4368	struct ieee80211_sta_vht_cap vht_cap;
4369	struct ieee80211_sta_ht_cap ht_cap;
4370
4371	ht_cap = ath10k_get_ht_cap(ar);
4372	vht_cap = ath10k_create_vht_cap(ar);
4373
4374	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4375		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4376		band->ht_cap = ht_cap;
4377	}
4378	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4379		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4380		band->ht_cap = ht_cap;
4381		band->vht_cap = vht_cap;
4382	}
4383}
4384
4385static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4386{
4387	int ret;
4388
4389	lockdep_assert_held(&ar->conf_mutex);
4390
4391	ath10k_check_chain_mask(ar, tx_ant, "tx");
4392	ath10k_check_chain_mask(ar, rx_ant, "rx");
4393
4394	ar->cfg_tx_chainmask = tx_ant;
4395	ar->cfg_rx_chainmask = rx_ant;
4396
4397	if ((ar->state != ATH10K_STATE_ON) &&
4398	    (ar->state != ATH10K_STATE_RESTARTED))
4399		return 0;
4400
4401	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4402					tx_ant);
4403	if (ret) {
4404		ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4405			    ret, tx_ant);
4406		return ret;
4407	}
4408
4409	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4410					rx_ant);
4411	if (ret) {
4412		ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4413			    ret, rx_ant);
4414		return ret;
4415	}
4416
4417	/* Reload HT/VHT capability */
4418	ath10k_mac_setup_ht_vht_cap(ar);
4419
4420	return 0;
4421}
4422
4423static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4424{
4425	struct ath10k *ar = hw->priv;
4426	int ret;
4427
4428	mutex_lock(&ar->conf_mutex);
4429	ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4430	mutex_unlock(&ar->conf_mutex);
4431	return ret;
4432}
4433
4434static int ath10k_start(struct ieee80211_hw *hw)
4435{
4436	struct ath10k *ar = hw->priv;
4437	u32 param;
4438	int ret = 0;
4439
4440	/*
4441	 * This makes sense only when restarting hw. It is harmless to call
4442	 * unconditionally. This is necessary to make sure no HTT/WMI tx
4443	 * commands will be submitted while restarting.
4444	 */
4445	ath10k_drain_tx(ar);
4446
4447	mutex_lock(&ar->conf_mutex);
4448
4449	switch (ar->state) {
4450	case ATH10K_STATE_OFF:
4451		ar->state = ATH10K_STATE_ON;
4452		break;
4453	case ATH10K_STATE_RESTARTING:
4454		ar->state = ATH10K_STATE_RESTARTED;
4455		break;
4456	case ATH10K_STATE_ON:
4457	case ATH10K_STATE_RESTARTED:
4458	case ATH10K_STATE_WEDGED:
4459		WARN_ON(1);
4460		ret = -EINVAL;
4461		goto err;
4462	case ATH10K_STATE_UTF:
4463		ret = -EBUSY;
4464		goto err;
4465	}
4466
4467	ret = ath10k_hif_power_up(ar);
4468	if (ret) {
4469		ath10k_err(ar, "Could not init hif: %d\n", ret);
4470		goto err_off;
4471	}
4472
4473	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4474				&ar->normal_mode_fw);
4475	if (ret) {
4476		ath10k_err(ar, "Could not init core: %d\n", ret);
4477		goto err_power_down;
4478	}
4479
4480	param = ar->wmi.pdev_param->pmf_qos;
4481	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4482	if (ret) {
4483		ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4484		goto err_core_stop;
4485	}
4486
4487	param = ar->wmi.pdev_param->dynamic_bw;
4488	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4489	if (ret) {
4490		ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4491		goto err_core_stop;
4492	}
4493
4494	if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4495		ret = ath10k_wmi_adaptive_qcs(ar, true);
4496		if (ret) {
4497			ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4498				    ret);
4499			goto err_core_stop;
4500		}
4501	}
4502
4503	if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4504		param = ar->wmi.pdev_param->burst_enable;
4505		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4506		if (ret) {
4507			ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4508			goto err_core_stop;
4509		}
4510	}
4511
4512	__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4513
4514	/*
4515	 * By default FW set ARP frames ac to voice (6). In that case ARP
4516	 * exchange is not working properly for UAPSD enabled AP. ARP requests
4517	 * which arrives with access category 0 are processed by network stack
4518	 * and send back with access category 0, but FW changes access category
4519	 * to 6. Set ARP frames access category to best effort (0) solves
4520	 * this problem.
4521	 */
4522
4523	param = ar->wmi.pdev_param->arp_ac_override;
4524	ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4525	if (ret) {
4526		ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4527			    ret);
4528		goto err_core_stop;
4529	}
4530
4531	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4532		     ar->running_fw->fw_file.fw_features)) {
4533		ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4534							  WMI_CCA_DETECT_LEVEL_AUTO,
4535							  WMI_CCA_DETECT_MARGIN_AUTO);
4536		if (ret) {
4537			ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4538				    ret);
4539			goto err_core_stop;
4540		}
4541	}
4542
4543	param = ar->wmi.pdev_param->ani_enable;
4544	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4545	if (ret) {
4546		ath10k_warn(ar, "failed to enable ani by default: %d\n",
4547			    ret);
4548		goto err_core_stop;
4549	}
4550
4551	ar->ani_enabled = true;
4552
4553	if (ath10k_peer_stats_enabled(ar)) {
4554		param = ar->wmi.pdev_param->peer_stats_update_period;
4555		ret = ath10k_wmi_pdev_set_param(ar, param,
4556						PEER_DEFAULT_STATS_UPDATE_PERIOD);
4557		if (ret) {
4558			ath10k_warn(ar,
4559				    "failed to set peer stats period : %d\n",
4560				    ret);
4561			goto err_core_stop;
4562		}
4563	}
4564
4565	param = ar->wmi.pdev_param->enable_btcoex;
4566	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
4567	    test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
4568		     ar->running_fw->fw_file.fw_features)) {
4569		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4570		if (ret) {
4571			ath10k_warn(ar,
4572				    "failed to set btcoex param: %d\n", ret);
4573			goto err_core_stop;
4574		}
4575		clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
4576	}
4577
4578	ar->num_started_vdevs = 0;
4579	ath10k_regd_update(ar);
4580
4581	ath10k_spectral_start(ar);
4582	ath10k_thermal_set_throttling(ar);
4583
4584	mutex_unlock(&ar->conf_mutex);
4585	return 0;
4586
4587err_core_stop:
4588	ath10k_core_stop(ar);
4589
4590err_power_down:
4591	ath10k_hif_power_down(ar);
4592
4593err_off:
4594	ar->state = ATH10K_STATE_OFF;
4595
4596err:
4597	mutex_unlock(&ar->conf_mutex);
4598	return ret;
4599}
4600
4601static void ath10k_stop(struct ieee80211_hw *hw)
4602{
4603	struct ath10k *ar = hw->priv;
4604
4605	ath10k_drain_tx(ar);
4606
4607	mutex_lock(&ar->conf_mutex);
4608	if (ar->state != ATH10K_STATE_OFF) {
4609		ath10k_halt(ar);
4610		ar->state = ATH10K_STATE_OFF;
4611	}
4612	mutex_unlock(&ar->conf_mutex);
4613
4614	cancel_delayed_work_sync(&ar->scan.timeout);
4615	cancel_work_sync(&ar->restart_work);
4616}
4617
4618static int ath10k_config_ps(struct ath10k *ar)
4619{
4620	struct ath10k_vif *arvif;
4621	int ret = 0;
4622
4623	lockdep_assert_held(&ar->conf_mutex);
4624
4625	list_for_each_entry(arvif, &ar->arvifs, list) {
4626		ret = ath10k_mac_vif_setup_ps(arvif);
4627		if (ret) {
4628			ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4629			break;
4630		}
4631	}
4632
4633	return ret;
4634}
4635
4636static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4637{
4638	int ret;
4639	u32 param;
4640
4641	lockdep_assert_held(&ar->conf_mutex);
4642
4643	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4644
4645	param = ar->wmi.pdev_param->txpower_limit2g;
4646	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4647	if (ret) {
4648		ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4649			    txpower, ret);
4650		return ret;
4651	}
4652
4653	param = ar->wmi.pdev_param->txpower_limit5g;
4654	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4655	if (ret) {
4656		ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4657			    txpower, ret);
4658		return ret;
4659	}
4660
4661	return 0;
4662}
4663
4664static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4665{
4666	struct ath10k_vif *arvif;
4667	int ret, txpower = -1;
4668
4669	lockdep_assert_held(&ar->conf_mutex);
4670
4671	list_for_each_entry(arvif, &ar->arvifs, list) {
4672		WARN_ON(arvif->txpower < 0);
4673
4674		if (txpower == -1)
4675			txpower = arvif->txpower;
4676		else
4677			txpower = min(txpower, arvif->txpower);
4678	}
4679
4680	if (WARN_ON(txpower == -1))
4681		return -EINVAL;
4682
4683	ret = ath10k_mac_txpower_setup(ar, txpower);
4684	if (ret) {
4685		ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4686			    txpower, ret);
4687		return ret;
4688	}
4689
4690	return 0;
4691}
4692
4693static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4694{
4695	struct ath10k *ar = hw->priv;
4696	struct ieee80211_conf *conf = &hw->conf;
4697	int ret = 0;
4698
4699	mutex_lock(&ar->conf_mutex);
4700
4701	if (changed & IEEE80211_CONF_CHANGE_PS)
4702		ath10k_config_ps(ar);
4703
4704	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4705		ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4706		ret = ath10k_monitor_recalc(ar);
4707		if (ret)
4708			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4709	}
4710
4711	mutex_unlock(&ar->conf_mutex);
4712	return ret;
4713}
4714
4715static u32 get_nss_from_chainmask(u16 chain_mask)
4716{
4717	if ((chain_mask & 0xf) == 0xf)
4718		return 4;
4719	else if ((chain_mask & 0x7) == 0x7)
4720		return 3;
4721	else if ((chain_mask & 0x3) == 0x3)
4722		return 2;
4723	return 1;
4724}
4725
4726static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4727{
4728	u32 value = 0;
4729	struct ath10k *ar = arvif->ar;
4730	int nsts;
4731	int sound_dim;
4732
4733	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4734		return 0;
4735
4736	nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4737	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4738				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4739		value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4740
4741	sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4742	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4743				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4744		value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4745
4746	if (!value)
4747		return 0;
4748
4749	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4750		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4751
4752	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4753		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4754			  WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4755
4756	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4757		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4758
4759	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4760		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4761			  WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4762
4763	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4764					 ar->wmi.vdev_param->txbf, value);
4765}
4766
4767/*
4768 * TODO:
4769 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4770 * because we will send mgmt frames without CCK. This requirement
4771 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4772 * in the TX packet.
4773 */
4774static int ath10k_add_interface(struct ieee80211_hw *hw,
4775				struct ieee80211_vif *vif)
4776{
4777	struct ath10k *ar = hw->priv;
4778	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4779	struct ath10k_peer *peer;
4780	enum wmi_sta_powersave_param param;
4781	int ret = 0;
4782	u32 value;
4783	int bit;
4784	int i;
4785	u32 vdev_param;
4786
4787	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4788
4789	mutex_lock(&ar->conf_mutex);
4790
4791	memset(arvif, 0, sizeof(*arvif));
4792	ath10k_mac_txq_init(vif->txq);
4793
4794	arvif->ar = ar;
4795	arvif->vif = vif;
4796
4797	INIT_LIST_HEAD(&arvif->list);
4798	INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
4799	INIT_DELAYED_WORK(&arvif->connection_loss_work,
4800			  ath10k_mac_vif_sta_connection_loss_work);
4801
4802	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4803		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4804		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4805		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4806		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4807		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4808	}
4809
4810	if (ar->num_peers >= ar->max_num_peers) {
4811		ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
4812		ret = -ENOBUFS;
4813		goto err;
4814	}
4815
4816	if (ar->free_vdev_map == 0) {
4817		ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
4818		ret = -EBUSY;
4819		goto err;
4820	}
4821	bit = __ffs64(ar->free_vdev_map);
4822
4823	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4824		   bit, ar->free_vdev_map);
4825
4826	arvif->vdev_id = bit;
4827	arvif->vdev_subtype =
4828		ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
4829
4830	switch (vif->type) {
4831	case NL80211_IFTYPE_P2P_DEVICE:
4832		arvif->vdev_type = WMI_VDEV_TYPE_STA;
4833		arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4834					(ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
4835		break;
4836	case NL80211_IFTYPE_UNSPECIFIED:
4837	case NL80211_IFTYPE_STATION:
4838		arvif->vdev_type = WMI_VDEV_TYPE_STA;
4839		if (vif->p2p)
4840			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4841					(ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
4842		break;
4843	case NL80211_IFTYPE_ADHOC:
4844		arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
4845		break;
4846	case NL80211_IFTYPE_MESH_POINT:
4847		if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
4848			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4849						(ar, WMI_VDEV_SUBTYPE_MESH_11S);
4850		} else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4851			ret = -EINVAL;
4852			ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
4853			goto err;
4854		}
4855		arvif->vdev_type = WMI_VDEV_TYPE_AP;
4856		break;
4857	case NL80211_IFTYPE_AP:
4858		arvif->vdev_type = WMI_VDEV_TYPE_AP;
4859
4860		if (vif->p2p)
4861			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4862						(ar, WMI_VDEV_SUBTYPE_P2P_GO);
4863		break;
4864	case NL80211_IFTYPE_MONITOR:
4865		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
4866		break;
4867	default:
4868		WARN_ON(1);
4869		break;
4870	}
4871
4872	/* Using vdev_id as queue number will make it very easy to do per-vif
4873	 * tx queue locking. This shouldn't wrap due to interface combinations
4874	 * but do a modulo for correctness sake and prevent using offchannel tx
4875	 * queues for regular vif tx.
4876	 */
4877	vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4878	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
4879		vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4880
4881	/* Some firmware revisions don't wait for beacon tx completion before
4882	 * sending another SWBA event. This could lead to hardware using old
4883	 * (freed) beacon data in some cases, e.g. tx credit starvation
4884	 * combined with missed TBTT. This is very very rare.
4885	 *
4886	 * On non-IOMMU-enabled hosts this could be a possible security issue
4887	 * because hw could beacon some random data on the air.  On
4888	 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
4889	 * device would crash.
4890	 *
4891	 * Since there are no beacon tx completions (implicit nor explicit)
4892	 * propagated to host the only workaround for this is to allocate a
4893	 * DMA-coherent buffer for a lifetime of a vif and use it for all
4894	 * beacon tx commands. Worst case for this approach is some beacons may
4895	 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
4896	 */
4897	if (vif->type == NL80211_IFTYPE_ADHOC ||
4898	    vif->type == NL80211_IFTYPE_MESH_POINT ||
4899	    vif->type == NL80211_IFTYPE_AP) {
4900		arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
4901							IEEE80211_MAX_FRAME_LEN,
4902							&arvif->beacon_paddr,
4903							GFP_ATOMIC);
4904		if (!arvif->beacon_buf) {
4905			ret = -ENOMEM;
4906			ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
4907				    ret);
4908			goto err;
4909		}
4910	}
4911	if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
4912		arvif->nohwcrypt = true;
4913
4914	if (arvif->nohwcrypt &&
4915	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4916		ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
4917		goto err;
4918	}
4919
4920	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
4921		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
4922		   arvif->beacon_buf ? "single-buf" : "per-skb");
4923
4924	ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
4925				     arvif->vdev_subtype, vif->addr);
4926	if (ret) {
4927		ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
4928			    arvif->vdev_id, ret);
4929		goto err;
4930	}
4931
4932	ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
4933	spin_lock_bh(&ar->data_lock);
4934	list_add(&arvif->list, &ar->arvifs);
4935	spin_unlock_bh(&ar->data_lock);
4936
4937	/* It makes no sense to have firmware do keepalives. mac80211 already
4938	 * takes care of this with idle connection polling.
4939	 */
4940	ret = ath10k_mac_vif_disable_keepalive(arvif);
4941	if (ret) {
4942		ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
4943			    arvif->vdev_id, ret);
4944		goto err_vdev_delete;
4945	}
4946
4947	arvif->def_wep_key_idx = -1;
4948
4949	vdev_param = ar->wmi.vdev_param->tx_encap_type;
4950	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4951					ATH10K_HW_TXRX_NATIVE_WIFI);
4952	/* 10.X firmware does not support this VDEV parameter. Do not warn */
4953	if (ret && ret != -EOPNOTSUPP) {
4954		ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
4955			    arvif->vdev_id, ret);
4956		goto err_vdev_delete;
4957	}
4958
4959	/* Configuring number of spatial stream for monitor interface is causing
4960	 * target assert in qca9888 and qca6174.
4961	 */
4962	if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
4963		u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
4964
4965		vdev_param = ar->wmi.vdev_param->nss;
4966		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4967						nss);
4968		if (ret) {
4969			ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
4970				    arvif->vdev_id, ar->cfg_tx_chainmask, nss,
4971				    ret);
4972			goto err_vdev_delete;
4973		}
4974	}
4975
4976	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4977	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
4978		ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
4979					 vif->addr, WMI_PEER_TYPE_DEFAULT);
4980		if (ret) {
4981			ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
4982				    arvif->vdev_id, ret);
4983			goto err_vdev_delete;
4984		}
4985
4986		spin_lock_bh(&ar->data_lock);
4987
4988		peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
4989		if (!peer) {
4990			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
4991				    vif->addr, arvif->vdev_id);
4992			spin_unlock_bh(&ar->data_lock);
4993			ret = -ENOENT;
4994			goto err_peer_delete;
4995		}
4996
4997		arvif->peer_id = find_first_bit(peer->peer_ids,
4998						ATH10K_MAX_NUM_PEER_IDS);
4999
5000		spin_unlock_bh(&ar->data_lock);
5001	} else {
5002		arvif->peer_id = HTT_INVALID_PEERID;
5003	}
5004
5005	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
5006		ret = ath10k_mac_set_kickout(arvif);
5007		if (ret) {
5008			ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
5009				    arvif->vdev_id, ret);
5010			goto err_peer_delete;
5011		}
5012	}
5013
5014	if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
5015		param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
5016		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
5017		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
5018						  param, value);
5019		if (ret) {
5020			ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
5021				    arvif->vdev_id, ret);
5022			goto err_peer_delete;
5023		}
5024
5025		ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
5026		if (ret) {
5027			ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
5028				    arvif->vdev_id, ret);
5029			goto err_peer_delete;
5030		}
5031
5032		ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
5033		if (ret) {
5034			ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
5035				    arvif->vdev_id, ret);
5036			goto err_peer_delete;
5037		}
5038	}
5039
5040	ret = ath10k_mac_set_txbf_conf(arvif);
5041	if (ret) {
5042		ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
5043			    arvif->vdev_id, ret);
5044		goto err_peer_delete;
5045	}
5046
5047	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
5048	if (ret) {
5049		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
5050			    arvif->vdev_id, ret);
5051		goto err_peer_delete;
5052	}
5053
5054	arvif->txpower = vif->bss_conf.txpower;
5055	ret = ath10k_mac_txpower_recalc(ar);
5056	if (ret) {
5057		ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5058		goto err_peer_delete;
5059	}
5060
5061	if (vif->type == NL80211_IFTYPE_MONITOR) {
5062		ar->monitor_arvif = arvif;
5063		ret = ath10k_monitor_recalc(ar);
5064		if (ret) {
5065			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5066			goto err_peer_delete;
5067		}
5068	}
5069
5070	spin_lock_bh(&ar->htt.tx_lock);
5071	if (!ar->tx_paused)
5072		ieee80211_wake_queue(ar->hw, arvif->vdev_id);
5073	spin_unlock_bh(&ar->htt.tx_lock);
5074
5075	mutex_unlock(&ar->conf_mutex);
5076	return 0;
5077
5078err_peer_delete:
5079	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5080	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
5081		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
5082
5083err_vdev_delete:
5084	ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5085	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5086	spin_lock_bh(&ar->data_lock);
5087	list_del(&arvif->list);
5088	spin_unlock_bh(&ar->data_lock);
5089
5090err:
5091	if (arvif->beacon_buf) {
5092		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
5093				  arvif->beacon_buf, arvif->beacon_paddr);
5094		arvif->beacon_buf = NULL;
5095	}
5096
5097	mutex_unlock(&ar->conf_mutex);
5098
5099	return ret;
5100}
5101
5102static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5103{
5104	int i;
5105
5106	for (i = 0; i < BITS_PER_LONG; i++)
5107		ath10k_mac_vif_tx_unlock(arvif, i);
5108}
5109
5110static void ath10k_remove_interface(struct ieee80211_hw *hw,
5111				    struct ieee80211_vif *vif)
5112{
5113	struct ath10k *ar = hw->priv;
5114	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5115	struct ath10k_peer *peer;
5116	int ret;
5117	int i;
5118
5119	cancel_work_sync(&arvif->ap_csa_work);
5120	cancel_delayed_work_sync(&arvif->connection_loss_work);
5121
5122	mutex_lock(&ar->conf_mutex);
5123
5124	spin_lock_bh(&ar->data_lock);
5125	ath10k_mac_vif_beacon_cleanup(arvif);
5126	spin_unlock_bh(&ar->data_lock);
5127
5128	ret = ath10k_spectral_vif_stop(arvif);
5129	if (ret)
5130		ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5131			    arvif->vdev_id, ret);
5132
5133	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5134	spin_lock_bh(&ar->data_lock);
5135	list_del(&arvif->list);
5136	spin_unlock_bh(&ar->data_lock);
5137
5138	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5139	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5140		ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5141					     vif->addr);
5142		if (ret)
5143			ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5144				    arvif->vdev_id, ret);
5145
5146		kfree(arvif->u.ap.noa_data);
5147	}
5148
5149	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5150		   arvif->vdev_id);
5151
5152	ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5153	if (ret)
5154		ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5155			    arvif->vdev_id, ret);
5156
5157	/* Some firmware revisions don't notify host about self-peer removal
5158	 * until after associated vdev is deleted.
5159	 */
5160	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5161	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5162		ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5163						   vif->addr);
5164		if (ret)
5165			ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5166				    arvif->vdev_id, ret);
5167
5168		spin_lock_bh(&ar->data_lock);
5169		ar->num_peers--;
5170		spin_unlock_bh(&ar->data_lock);
5171	}
5172
5173	spin_lock_bh(&ar->data_lock);
5174	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5175		peer = ar->peer_map[i];
5176		if (!peer)
5177			continue;
5178
5179		if (peer->vif == vif) {
5180			ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5181				    vif->addr, arvif->vdev_id);
5182			peer->vif = NULL;
5183		}
5184	}
5185	spin_unlock_bh(&ar->data_lock);
5186
5187	ath10k_peer_cleanup(ar, arvif->vdev_id);
5188	ath10k_mac_txq_unref(ar, vif->txq);
5189
5190	if (vif->type == NL80211_IFTYPE_MONITOR) {
5191		ar->monitor_arvif = NULL;
5192		ret = ath10k_monitor_recalc(ar);
5193		if (ret)
5194			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5195	}
5196
5197	spin_lock_bh(&ar->htt.tx_lock);
5198	ath10k_mac_vif_tx_unlock_all(arvif);
5199	spin_unlock_bh(&ar->htt.tx_lock);
5200
5201	ath10k_mac_txq_unref(ar, vif->txq);
5202
5203	mutex_unlock(&ar->conf_mutex);
5204}
5205
5206/*
5207 * FIXME: Has to be verified.
5208 */
5209#define SUPPORTED_FILTERS			\
5210	(FIF_ALLMULTI |				\
5211	FIF_CONTROL |				\
5212	FIF_PSPOLL |				\
5213	FIF_OTHER_BSS |				\
5214	FIF_BCN_PRBRESP_PROMISC |		\
5215	FIF_PROBE_REQ |				\
5216	FIF_FCSFAIL)
5217
5218static void ath10k_configure_filter(struct ieee80211_hw *hw,
5219				    unsigned int changed_flags,
5220				    unsigned int *total_flags,
5221				    u64 multicast)
5222{
5223	struct ath10k *ar = hw->priv;
5224	int ret;
5225
5226	mutex_lock(&ar->conf_mutex);
5227
5228	changed_flags &= SUPPORTED_FILTERS;
5229	*total_flags &= SUPPORTED_FILTERS;
5230	ar->filter_flags = *total_flags;
5231
5232	ret = ath10k_monitor_recalc(ar);
5233	if (ret)
5234		ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5235
5236	mutex_unlock(&ar->conf_mutex);
5237}
5238
5239static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5240				    struct ieee80211_vif *vif,
5241				    struct ieee80211_bss_conf *info,
5242				    u32 changed)
5243{
5244	struct ath10k *ar = hw->priv;
5245	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5246	int ret = 0;
5247	u32 vdev_param, pdev_param, slottime, preamble;
5248
5249	mutex_lock(&ar->conf_mutex);
5250
5251	if (changed & BSS_CHANGED_IBSS)
5252		ath10k_control_ibss(arvif, info, vif->addr);
5253
5254	if (changed & BSS_CHANGED_BEACON_INT) {
5255		arvif->beacon_interval = info->beacon_int;
5256		vdev_param = ar->wmi.vdev_param->beacon_interval;
5257		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5258						arvif->beacon_interval);
5259		ath10k_dbg(ar, ATH10K_DBG_MAC,
5260			   "mac vdev %d beacon_interval %d\n",
5261			   arvif->vdev_id, arvif->beacon_interval);
5262
5263		if (ret)
5264			ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5265				    arvif->vdev_id, ret);
5266	}
5267
5268	if (changed & BSS_CHANGED_BEACON) {
5269		ath10k_dbg(ar, ATH10K_DBG_MAC,
5270			   "vdev %d set beacon tx mode to staggered\n",
5271			   arvif->vdev_id);
5272
5273		pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5274		ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5275						WMI_BEACON_STAGGERED_MODE);
5276		if (ret)
5277			ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5278				    arvif->vdev_id, ret);
5279
5280		ret = ath10k_mac_setup_bcn_tmpl(arvif);
5281		if (ret)
5282			ath10k_warn(ar, "failed to update beacon template: %d\n",
5283				    ret);
5284
5285		if (ieee80211_vif_is_mesh(vif)) {
5286			/* mesh doesn't use SSID but firmware needs it */
5287			strncpy(arvif->u.ap.ssid, "mesh",
5288				sizeof(arvif->u.ap.ssid));
5289			arvif->u.ap.ssid_len = 4;
5290		}
5291	}
5292
5293	if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5294		ret = ath10k_mac_setup_prb_tmpl(arvif);
5295		if (ret)
5296			ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5297				    arvif->vdev_id, ret);
5298	}
5299
5300	if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5301		arvif->dtim_period = info->dtim_period;
5302
5303		ath10k_dbg(ar, ATH10K_DBG_MAC,
5304			   "mac vdev %d dtim_period %d\n",
5305			   arvif->vdev_id, arvif->dtim_period);
5306
5307		vdev_param = ar->wmi.vdev_param->dtim_period;
5308		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5309						arvif->dtim_period);
5310		if (ret)
5311			ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5312				    arvif->vdev_id, ret);
5313	}
5314
5315	if (changed & BSS_CHANGED_SSID &&
5316	    vif->type == NL80211_IFTYPE_AP) {
5317		arvif->u.ap.ssid_len = info->ssid_len;
5318		if (info->ssid_len)
5319			memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5320		arvif->u.ap.hidden_ssid = info->hidden_ssid;
5321	}
5322
5323	if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5324		ether_addr_copy(arvif->bssid, info->bssid);
5325
5326	if (changed & BSS_CHANGED_BEACON_ENABLED)
5327		ath10k_control_beaconing(arvif, info);
5328
5329	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5330		arvif->use_cts_prot = info->use_cts_prot;
5331		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
5332			   arvif->vdev_id, info->use_cts_prot);
5333
5334		ret = ath10k_recalc_rtscts_prot(arvif);
5335		if (ret)
5336			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5337				    arvif->vdev_id, ret);
5338
5339		vdev_param = ar->wmi.vdev_param->protection_mode;
5340		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5341						info->use_cts_prot ? 1 : 0);
5342		if (ret)
5343			ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
5344				    info->use_cts_prot, arvif->vdev_id, ret);
5345	}
5346
5347	if (changed & BSS_CHANGED_ERP_SLOT) {
5348		if (info->use_short_slot)
5349			slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5350
5351		else
5352			slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5353
5354		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5355			   arvif->vdev_id, slottime);
5356
5357		vdev_param = ar->wmi.vdev_param->slot_time;
5358		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5359						slottime);
5360		if (ret)
5361			ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5362				    arvif->vdev_id, ret);
5363	}
5364
5365	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5366		if (info->use_short_preamble)
5367			preamble = WMI_VDEV_PREAMBLE_SHORT;
5368		else
5369			preamble = WMI_VDEV_PREAMBLE_LONG;
5370
5371		ath10k_dbg(ar, ATH10K_DBG_MAC,
5372			   "mac vdev %d preamble %dn",
5373			   arvif->vdev_id, preamble);
5374
5375		vdev_param = ar->wmi.vdev_param->preamble;
5376		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5377						preamble);
5378		if (ret)
5379			ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5380				    arvif->vdev_id, ret);
5381	}
5382
5383	if (changed & BSS_CHANGED_ASSOC) {
5384		if (info->assoc) {
5385			/* Workaround: Make sure monitor vdev is not running
5386			 * when associating to prevent some firmware revisions
5387			 * (e.g. 10.1 and 10.2) from crashing.
5388			 */
5389			if (ar->monitor_started)
5390				ath10k_monitor_stop(ar);
5391			ath10k_bss_assoc(hw, vif, info);
5392			ath10k_monitor_recalc(ar);
5393		} else {
5394			ath10k_bss_disassoc(hw, vif);
5395		}
5396	}
5397
5398	if (changed & BSS_CHANGED_TXPOWER) {
5399		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5400			   arvif->vdev_id, info->txpower);
5401
5402		arvif->txpower = info->txpower;
5403		ret = ath10k_mac_txpower_recalc(ar);
5404		if (ret)
5405			ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5406	}
5407
5408	if (changed & BSS_CHANGED_PS) {
5409		arvif->ps = vif->bss_conf.ps;
5410
5411		ret = ath10k_config_ps(ar);
5412		if (ret)
5413			ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5414				    arvif->vdev_id, ret);
5415	}
5416
5417	mutex_unlock(&ar->conf_mutex);
5418}
5419
5420static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
5421{
5422	struct ath10k *ar = hw->priv;
5423
5424	/* This function should never be called if setting the coverage class
5425	 * is not supported on this hardware.
5426	 */
5427	if (!ar->hw_params.hw_ops->set_coverage_class) {
5428		WARN_ON_ONCE(1);
5429		return;
5430	}
5431	ar->hw_params.hw_ops->set_coverage_class(ar, value);
5432}
5433
5434static int ath10k_hw_scan(struct ieee80211_hw *hw,
5435			  struct ieee80211_vif *vif,
5436			  struct ieee80211_scan_request *hw_req)
5437{
5438	struct ath10k *ar = hw->priv;
5439	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5440	struct cfg80211_scan_request *req = &hw_req->req;
5441	struct wmi_start_scan_arg arg;
5442	int ret = 0;
5443	int i;
5444
5445	mutex_lock(&ar->conf_mutex);
5446
5447	spin_lock_bh(&ar->data_lock);
5448	switch (ar->scan.state) {
5449	case ATH10K_SCAN_IDLE:
5450		reinit_completion(&ar->scan.started);
5451		reinit_completion(&ar->scan.completed);
5452		ar->scan.state = ATH10K_SCAN_STARTING;
5453		ar->scan.is_roc = false;
5454		ar->scan.vdev_id = arvif->vdev_id;
5455		ret = 0;
5456		break;
5457	case ATH10K_SCAN_STARTING:
5458	case ATH10K_SCAN_RUNNING:
5459	case ATH10K_SCAN_ABORTING:
5460		ret = -EBUSY;
5461		break;
5462	}
5463	spin_unlock_bh(&ar->data_lock);
5464
5465	if (ret)
5466		goto exit;
5467
5468	memset(&arg, 0, sizeof(arg));
5469	ath10k_wmi_start_scan_init(ar, &arg);
5470	arg.vdev_id = arvif->vdev_id;
5471	arg.scan_id = ATH10K_SCAN_ID;
5472
5473	if (req->ie_len) {
5474		arg.ie_len = req->ie_len;
5475		memcpy(arg.ie, req->ie, arg.ie_len);
5476	}
5477
5478	if (req->n_ssids) {
5479		arg.n_ssids = req->n_ssids;
5480		for (i = 0; i < arg.n_ssids; i++) {
5481			arg.ssids[i].len  = req->ssids[i].ssid_len;
5482			arg.ssids[i].ssid = req->ssids[i].ssid;
5483		}
5484	} else {
5485		arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5486	}
5487
5488	if (req->n_channels) {
5489		arg.n_channels = req->n_channels;
5490		for (i = 0; i < arg.n_channels; i++)
5491			arg.channels[i] = req->channels[i]->center_freq;
5492	}
5493
5494	ret = ath10k_start_scan(ar, &arg);
5495	if (ret) {
5496		ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5497		spin_lock_bh(&ar->data_lock);
5498		ar->scan.state = ATH10K_SCAN_IDLE;
5499		spin_unlock_bh(&ar->data_lock);
5500	}
5501
5502	/* Add a 200ms margin to account for event/command processing */
5503	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5504				     msecs_to_jiffies(arg.max_scan_time +
5505						      200));
5506
5507exit:
5508	mutex_unlock(&ar->conf_mutex);
5509	return ret;
5510}
5511
5512static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5513				  struct ieee80211_vif *vif)
5514{
5515	struct ath10k *ar = hw->priv;
5516
5517	mutex_lock(&ar->conf_mutex);
5518	ath10k_scan_abort(ar);
5519	mutex_unlock(&ar->conf_mutex);
5520
5521	cancel_delayed_work_sync(&ar->scan.timeout);
5522}
5523
5524static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5525					struct ath10k_vif *arvif,
5526					enum set_key_cmd cmd,
5527					struct ieee80211_key_conf *key)
5528{
5529	u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5530	int ret;
5531
5532	/* 10.1 firmware branch requires default key index to be set to group
5533	 * key index after installing it. Otherwise FW/HW Txes corrupted
5534	 * frames with multi-vif APs. This is not required for main firmware
5535	 * branch (e.g. 636).
5536	 *
5537	 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5538	 *
5539	 * FIXME: It remains unknown if this is required for multi-vif STA
5540	 * interfaces on 10.1.
5541	 */
5542
5543	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5544	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5545		return;
5546
5547	if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5548		return;
5549
5550	if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5551		return;
5552
5553	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5554		return;
5555
5556	if (cmd != SET_KEY)
5557		return;
5558
5559	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5560					key->keyidx);
5561	if (ret)
5562		ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5563			    arvif->vdev_id, ret);
5564}
5565
5566static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5567			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5568			  struct ieee80211_key_conf *key)
5569{
5570	struct ath10k *ar = hw->priv;
5571	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5572	struct ath10k_peer *peer;
5573	const u8 *peer_addr;
5574	bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5575		      key->cipher == WLAN_CIPHER_SUITE_WEP104;
5576	int ret = 0;
5577	int ret2;
5578	u32 flags = 0;
5579	u32 flags2;
5580
5581	/* this one needs to be done in software */
5582	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
5583		return 1;
5584
5585	if (arvif->nohwcrypt)
5586		return 1;
5587
5588	if (key->keyidx > WMI_MAX_KEY_INDEX)
5589		return -ENOSPC;
5590
5591	mutex_lock(&ar->conf_mutex);
5592
5593	if (sta)
5594		peer_addr = sta->addr;
5595	else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5596		peer_addr = vif->bss_conf.bssid;
5597	else
5598		peer_addr = vif->addr;
5599
5600	key->hw_key_idx = key->keyidx;
5601
5602	if (is_wep) {
5603		if (cmd == SET_KEY)
5604			arvif->wep_keys[key->keyidx] = key;
5605		else
5606			arvif->wep_keys[key->keyidx] = NULL;
5607	}
5608
5609	/* the peer should not disappear in mid-way (unless FW goes awry) since
5610	 * we already hold conf_mutex. we just make sure its there now. */
5611	spin_lock_bh(&ar->data_lock);
5612	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5613	spin_unlock_bh(&ar->data_lock);
5614
5615	if (!peer) {
5616		if (cmd == SET_KEY) {
5617			ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5618				    peer_addr);
5619			ret = -EOPNOTSUPP;
5620			goto exit;
5621		} else {
5622			/* if the peer doesn't exist there is no key to disable
5623			 * anymore */
5624			goto exit;
5625		}
5626	}
5627
5628	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5629		flags |= WMI_KEY_PAIRWISE;
5630	else
5631		flags |= WMI_KEY_GROUP;
5632
5633	if (is_wep) {
5634		if (cmd == DISABLE_KEY)
5635			ath10k_clear_vdev_key(arvif, key);
5636
5637		/* When WEP keys are uploaded it's possible that there are
5638		 * stations associated already (e.g. when merging) without any
5639		 * keys. Static WEP needs an explicit per-peer key upload.
5640		 */
5641		if (vif->type == NL80211_IFTYPE_ADHOC &&
5642		    cmd == SET_KEY)
5643			ath10k_mac_vif_update_wep_key(arvif, key);
5644
5645		/* 802.1x never sets the def_wep_key_idx so each set_key()
5646		 * call changes default tx key.
5647		 *
5648		 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5649		 * after first set_key().
5650		 */
5651		if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5652			flags |= WMI_KEY_TX_USAGE;
5653	}
5654
5655	ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5656	if (ret) {
5657		WARN_ON(ret > 0);
5658		ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5659			    arvif->vdev_id, peer_addr, ret);
5660		goto exit;
5661	}
5662
5663	/* mac80211 sets static WEP keys as groupwise while firmware requires
5664	 * them to be installed twice as both pairwise and groupwise.
5665	 */
5666	if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5667		flags2 = flags;
5668		flags2 &= ~WMI_KEY_GROUP;
5669		flags2 |= WMI_KEY_PAIRWISE;
5670
5671		ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5672		if (ret) {
5673			WARN_ON(ret > 0);
5674			ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5675				    arvif->vdev_id, peer_addr, ret);
5676			ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5677						  peer_addr, flags);
5678			if (ret2) {
5679				WARN_ON(ret2 > 0);
5680				ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5681					    arvif->vdev_id, peer_addr, ret2);
5682			}
5683			goto exit;
5684		}
5685	}
5686
5687	ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5688
5689	spin_lock_bh(&ar->data_lock);
5690	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5691	if (peer && cmd == SET_KEY)
5692		peer->keys[key->keyidx] = key;
5693	else if (peer && cmd == DISABLE_KEY)
5694		peer->keys[key->keyidx] = NULL;
5695	else if (peer == NULL)
5696		/* impossible unless FW goes crazy */
5697		ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
5698	spin_unlock_bh(&ar->data_lock);
5699
5700exit:
5701	mutex_unlock(&ar->conf_mutex);
5702	return ret;
5703}
5704
5705static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5706					   struct ieee80211_vif *vif,
5707					   int keyidx)
5708{
5709	struct ath10k *ar = hw->priv;
5710	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5711	int ret;
5712
5713	mutex_lock(&arvif->ar->conf_mutex);
5714
5715	if (arvif->ar->state != ATH10K_STATE_ON)
5716		goto unlock;
5717
5718	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5719		   arvif->vdev_id, keyidx);
5720
5721	ret = ath10k_wmi_vdev_set_param(arvif->ar,
5722					arvif->vdev_id,
5723					arvif->ar->wmi.vdev_param->def_keyid,
5724					keyidx);
5725
5726	if (ret) {
5727		ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5728			    arvif->vdev_id,
5729			    ret);
5730		goto unlock;
5731	}
5732
5733	arvif->def_wep_key_idx = keyidx;
5734
5735unlock:
5736	mutex_unlock(&arvif->ar->conf_mutex);
5737}
5738
5739static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5740{
5741	struct ath10k *ar;
5742	struct ath10k_vif *arvif;
5743	struct ath10k_sta *arsta;
5744	struct ieee80211_sta *sta;
5745	struct cfg80211_chan_def def;
5746	enum nl80211_band band;
5747	const u8 *ht_mcs_mask;
5748	const u16 *vht_mcs_mask;
5749	u32 changed, bw, nss, smps;
5750	int err;
5751
5752	arsta = container_of(wk, struct ath10k_sta, update_wk);
5753	sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5754	arvif = arsta->arvif;
5755	ar = arvif->ar;
5756
5757	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5758		return;
5759
5760	band = def.chan->band;
5761	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
5762	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
5763
5764	spin_lock_bh(&ar->data_lock);
5765
5766	changed = arsta->changed;
5767	arsta->changed = 0;
5768
5769	bw = arsta->bw;
5770	nss = arsta->nss;
5771	smps = arsta->smps;
5772
5773	spin_unlock_bh(&ar->data_lock);
5774
5775	mutex_lock(&ar->conf_mutex);
5776
5777	nss = max_t(u32, 1, nss);
5778	nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
5779			   ath10k_mac_max_vht_nss(vht_mcs_mask)));
5780
5781	if (changed & IEEE80211_RC_BW_CHANGED) {
5782		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
5783			   sta->addr, bw);
5784
5785		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5786						WMI_PEER_CHAN_WIDTH, bw);
5787		if (err)
5788			ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
5789				    sta->addr, bw, err);
5790	}
5791
5792	if (changed & IEEE80211_RC_NSS_CHANGED) {
5793		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
5794			   sta->addr, nss);
5795
5796		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5797						WMI_PEER_NSS, nss);
5798		if (err)
5799			ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
5800				    sta->addr, nss, err);
5801	}
5802
5803	if (changed & IEEE80211_RC_SMPS_CHANGED) {
5804		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
5805			   sta->addr, smps);
5806
5807		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5808						WMI_PEER_SMPS_STATE, smps);
5809		if (err)
5810			ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
5811				    sta->addr, smps, err);
5812	}
5813
5814	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
5815	    changed & IEEE80211_RC_NSS_CHANGED) {
5816		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
5817			   sta->addr);
5818
5819		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
5820		if (err)
5821			ath10k_warn(ar, "failed to reassociate station: %pM\n",
5822				    sta->addr);
5823	}
5824
5825	mutex_unlock(&ar->conf_mutex);
5826}
5827
5828static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
5829				       struct ieee80211_sta *sta)
5830{
5831	struct ath10k *ar = arvif->ar;
5832
5833	lockdep_assert_held(&ar->conf_mutex);
5834
5835	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5836		return 0;
5837
5838	if (ar->num_stations >= ar->max_num_stations)
5839		return -ENOBUFS;
5840
5841	ar->num_stations++;
5842
5843	return 0;
5844}
5845
5846static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
5847					struct ieee80211_sta *sta)
5848{
5849	struct ath10k *ar = arvif->ar;
5850
5851	lockdep_assert_held(&ar->conf_mutex);
5852
5853	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5854		return;
5855
5856	ar->num_stations--;
5857}
5858
5859struct ath10k_mac_tdls_iter_data {
5860	u32 num_tdls_stations;
5861	struct ieee80211_vif *curr_vif;
5862};
5863
5864static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5865						    struct ieee80211_sta *sta)
5866{
5867	struct ath10k_mac_tdls_iter_data *iter_data = data;
5868	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5869	struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5870
5871	if (sta->tdls && sta_vif == iter_data->curr_vif)
5872		iter_data->num_tdls_stations++;
5873}
5874
5875static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5876					      struct ieee80211_vif *vif)
5877{
5878	struct ath10k_mac_tdls_iter_data data = {};
5879
5880	data.curr_vif = vif;
5881
5882	ieee80211_iterate_stations_atomic(hw,
5883					  ath10k_mac_tdls_vif_stations_count_iter,
5884					  &data);
5885	return data.num_tdls_stations;
5886}
5887
5888static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5889					    struct ieee80211_vif *vif)
5890{
5891	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5892	int *num_tdls_vifs = data;
5893
5894	if (vif->type != NL80211_IFTYPE_STATION)
5895		return;
5896
5897	if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5898		(*num_tdls_vifs)++;
5899}
5900
5901static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5902{
5903	int num_tdls_vifs = 0;
5904
5905	ieee80211_iterate_active_interfaces_atomic(hw,
5906						   IEEE80211_IFACE_ITER_NORMAL,
5907						   ath10k_mac_tdls_vifs_count_iter,
5908						   &num_tdls_vifs);
5909	return num_tdls_vifs;
5910}
5911
5912static int ath10k_sta_state(struct ieee80211_hw *hw,
5913			    struct ieee80211_vif *vif,
5914			    struct ieee80211_sta *sta,
5915			    enum ieee80211_sta_state old_state,
5916			    enum ieee80211_sta_state new_state)
5917{
5918	struct ath10k *ar = hw->priv;
5919	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5920	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5921	struct ath10k_peer *peer;
5922	int ret = 0;
5923	int i;
5924
5925	if (old_state == IEEE80211_STA_NOTEXIST &&
5926	    new_state == IEEE80211_STA_NONE) {
5927		memset(arsta, 0, sizeof(*arsta));
5928		arsta->arvif = arvif;
5929		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
5930
5931		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5932			ath10k_mac_txq_init(sta->txq[i]);
5933	}
5934
5935	/* cancel must be done outside the mutex to avoid deadlock */
5936	if ((old_state == IEEE80211_STA_NONE &&
5937	     new_state == IEEE80211_STA_NOTEXIST))
5938		cancel_work_sync(&arsta->update_wk);
5939
5940	mutex_lock(&ar->conf_mutex);
5941
5942	if (old_state == IEEE80211_STA_NOTEXIST &&
5943	    new_state == IEEE80211_STA_NONE) {
5944		/*
5945		 * New station addition.
5946		 */
5947		enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
5948		u32 num_tdls_stations;
5949		u32 num_tdls_vifs;
5950
5951		ath10k_dbg(ar, ATH10K_DBG_MAC,
5952			   "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
5953			   arvif->vdev_id, sta->addr,
5954			   ar->num_stations + 1, ar->max_num_stations,
5955			   ar->num_peers + 1, ar->max_num_peers);
5956
5957		ret = ath10k_mac_inc_num_stations(arvif, sta);
5958		if (ret) {
5959			ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
5960				    ar->max_num_stations);
5961			goto exit;
5962		}
5963
5964		if (sta->tdls)
5965			peer_type = WMI_PEER_TYPE_TDLS;
5966
5967		ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
5968					 sta->addr, peer_type);
5969		if (ret) {
5970			ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
5971				    sta->addr, arvif->vdev_id, ret);
5972			ath10k_mac_dec_num_stations(arvif, sta);
5973			goto exit;
5974		}
5975
5976		spin_lock_bh(&ar->data_lock);
5977
5978		peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5979		if (!peer) {
5980			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5981				    vif->addr, arvif->vdev_id);
5982			spin_unlock_bh(&ar->data_lock);
5983			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5984			ath10k_mac_dec_num_stations(arvif, sta);
5985			ret = -ENOENT;
5986			goto exit;
5987		}
5988
5989		arsta->peer_id = find_first_bit(peer->peer_ids,
5990						ATH10K_MAX_NUM_PEER_IDS);
5991
5992		spin_unlock_bh(&ar->data_lock);
5993
5994		if (!sta->tdls)
5995			goto exit;
5996
5997		num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
5998		num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
5999
6000		if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
6001		    num_tdls_stations == 0) {
6002			ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
6003				    arvif->vdev_id, ar->max_num_tdls_vdevs);
6004			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6005			ath10k_mac_dec_num_stations(arvif, sta);
6006			ret = -ENOBUFS;
6007			goto exit;
6008		}
6009
6010		if (num_tdls_stations == 0) {
6011			/* This is the first tdls peer in current vif */
6012			enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
6013
6014			ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6015							      state);
6016			if (ret) {
6017				ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6018					    arvif->vdev_id, ret);
6019				ath10k_peer_delete(ar, arvif->vdev_id,
6020						   sta->addr);
6021				ath10k_mac_dec_num_stations(arvif, sta);
6022				goto exit;
6023			}
6024		}
6025
6026		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6027						  WMI_TDLS_PEER_STATE_PEERING);
6028		if (ret) {
6029			ath10k_warn(ar,
6030				    "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
6031				    sta->addr, arvif->vdev_id, ret);
6032			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6033			ath10k_mac_dec_num_stations(arvif, sta);
6034
6035			if (num_tdls_stations != 0)
6036				goto exit;
6037			ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6038							WMI_TDLS_DISABLE);
6039		}
6040	} else if ((old_state == IEEE80211_STA_NONE &&
6041		    new_state == IEEE80211_STA_NOTEXIST)) {
6042		/*
6043		 * Existing station deletion.
6044		 */
6045		ath10k_dbg(ar, ATH10K_DBG_MAC,
6046			   "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
6047			   arvif->vdev_id, sta->addr, sta);
6048
6049		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6050		if (ret)
6051			ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
6052				    sta->addr, arvif->vdev_id, ret);
6053
6054		ath10k_mac_dec_num_stations(arvif, sta);
6055
6056		spin_lock_bh(&ar->data_lock);
6057		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
6058			peer = ar->peer_map[i];
6059			if (!peer)
6060				continue;
6061
6062			if (peer->sta == sta) {
6063				ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
6064					    sta->addr, peer, i, arvif->vdev_id);
6065				peer->sta = NULL;
6066
6067				/* Clean up the peer object as well since we
6068				 * must have failed to do this above.
6069				 */
6070				list_del(&peer->list);
6071				ar->peer_map[i] = NULL;
6072				kfree(peer);
6073				ar->num_peers--;
6074			}
6075		}
6076		spin_unlock_bh(&ar->data_lock);
6077
6078		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6079			ath10k_mac_txq_unref(ar, sta->txq[i]);
6080
6081		if (!sta->tdls)
6082			goto exit;
6083
6084		if (ath10k_mac_tdls_vif_stations_count(hw, vif))
6085			goto exit;
6086
6087		/* This was the last tdls peer in current vif */
6088		ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6089						      WMI_TDLS_DISABLE);
6090		if (ret) {
6091			ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6092				    arvif->vdev_id, ret);
6093		}
6094	} else if (old_state == IEEE80211_STA_AUTH &&
6095		   new_state == IEEE80211_STA_ASSOC &&
6096		   (vif->type == NL80211_IFTYPE_AP ||
6097		    vif->type == NL80211_IFTYPE_MESH_POINT ||
6098		    vif->type == NL80211_IFTYPE_ADHOC)) {
6099		/*
6100		 * New association.
6101		 */
6102		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
6103			   sta->addr);
6104
6105		ret = ath10k_station_assoc(ar, vif, sta, false);
6106		if (ret)
6107			ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
6108				    sta->addr, arvif->vdev_id, ret);
6109	} else if (old_state == IEEE80211_STA_ASSOC &&
6110		   new_state == IEEE80211_STA_AUTHORIZED &&
6111		   sta->tdls) {
6112		/*
6113		 * Tdls station authorized.
6114		 */
6115		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
6116			   sta->addr);
6117
6118		ret = ath10k_station_assoc(ar, vif, sta, false);
6119		if (ret) {
6120			ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
6121				    sta->addr, arvif->vdev_id, ret);
6122			goto exit;
6123		}
6124
6125		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6126						  WMI_TDLS_PEER_STATE_CONNECTED);
6127		if (ret)
6128			ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
6129				    sta->addr, arvif->vdev_id, ret);
6130	} else if (old_state == IEEE80211_STA_ASSOC &&
6131		    new_state == IEEE80211_STA_AUTH &&
6132		    (vif->type == NL80211_IFTYPE_AP ||
6133		     vif->type == NL80211_IFTYPE_MESH_POINT ||
6134		     vif->type == NL80211_IFTYPE_ADHOC)) {
6135		/*
6136		 * Disassociation.
6137		 */
6138		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6139			   sta->addr);
6140
6141		ret = ath10k_station_disassoc(ar, vif, sta);
6142		if (ret)
6143			ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6144				    sta->addr, arvif->vdev_id, ret);
6145	}
6146exit:
6147	mutex_unlock(&ar->conf_mutex);
6148	return ret;
6149}
6150
6151static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6152				u16 ac, bool enable)
6153{
6154	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6155	struct wmi_sta_uapsd_auto_trig_arg arg = {};
6156	u32 prio = 0, acc = 0;
6157	u32 value = 0;
6158	int ret = 0;
6159
6160	lockdep_assert_held(&ar->conf_mutex);
6161
6162	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6163		return 0;
6164
6165	switch (ac) {
6166	case IEEE80211_AC_VO:
6167		value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6168			WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6169		prio = 7;
6170		acc = 3;
6171		break;
6172	case IEEE80211_AC_VI:
6173		value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6174			WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6175		prio = 5;
6176		acc = 2;
6177		break;
6178	case IEEE80211_AC_BE:
6179		value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6180			WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6181		prio = 2;
6182		acc = 1;
6183		break;
6184	case IEEE80211_AC_BK:
6185		value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6186			WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6187		prio = 0;
6188		acc = 0;
6189		break;
6190	}
6191
6192	if (enable)
6193		arvif->u.sta.uapsd |= value;
6194	else
6195		arvif->u.sta.uapsd &= ~value;
6196
6197	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6198					  WMI_STA_PS_PARAM_UAPSD,
6199					  arvif->u.sta.uapsd);
6200	if (ret) {
6201		ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6202		goto exit;
6203	}
6204
6205	if (arvif->u.sta.uapsd)
6206		value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6207	else
6208		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6209
6210	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6211					  WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6212					  value);
6213	if (ret)
6214		ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6215
6216	ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6217	if (ret) {
6218		ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6219			    arvif->vdev_id, ret);
6220		return ret;
6221	}
6222
6223	ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6224	if (ret) {
6225		ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6226			    arvif->vdev_id, ret);
6227		return ret;
6228	}
6229
6230	if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6231	    test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6232		/* Only userspace can make an educated decision when to send
6233		 * trigger frame. The following effectively disables u-UAPSD
6234		 * autotrigger in firmware (which is enabled by default
6235		 * provided the autotrigger service is available).
6236		 */
6237
6238		arg.wmm_ac = acc;
6239		arg.user_priority = prio;
6240		arg.service_interval = 0;
6241		arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6242		arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6243
6244		ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6245						arvif->bssid, &arg, 1);
6246		if (ret) {
6247			ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6248				    ret);
6249			return ret;
6250		}
6251	}
6252
6253exit:
6254	return ret;
6255}
6256
6257static int ath10k_conf_tx(struct ieee80211_hw *hw,
6258			  struct ieee80211_vif *vif, u16 ac,
6259			  const struct ieee80211_tx_queue_params *params)
6260{
6261	struct ath10k *ar = hw->priv;
6262	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6263	struct wmi_wmm_params_arg *p = NULL;
6264	int ret;
6265
6266	mutex_lock(&ar->conf_mutex);
6267
6268	switch (ac) {
6269	case IEEE80211_AC_VO:
6270		p = &arvif->wmm_params.ac_vo;
6271		break;
6272	case IEEE80211_AC_VI:
6273		p = &arvif->wmm_params.ac_vi;
6274		break;
6275	case IEEE80211_AC_BE:
6276		p = &arvif->wmm_params.ac_be;
6277		break;
6278	case IEEE80211_AC_BK:
6279		p = &arvif->wmm_params.ac_bk;
6280		break;
6281	}
6282
6283	if (WARN_ON(!p)) {
6284		ret = -EINVAL;
6285		goto exit;
6286	}
6287
6288	p->cwmin = params->cw_min;
6289	p->cwmax = params->cw_max;
6290	p->aifs = params->aifs;
6291
6292	/*
6293	 * The channel time duration programmed in the HW is in absolute
6294	 * microseconds, while mac80211 gives the txop in units of
6295	 * 32 microseconds.
6296	 */
6297	p->txop = params->txop * 32;
6298
6299	if (ar->wmi.ops->gen_vdev_wmm_conf) {
6300		ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6301					       &arvif->wmm_params);
6302		if (ret) {
6303			ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6304				    arvif->vdev_id, ret);
6305			goto exit;
6306		}
6307	} else {
6308		/* This won't work well with multi-interface cases but it's
6309		 * better than nothing.
6310		 */
6311		ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6312		if (ret) {
6313			ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6314			goto exit;
6315		}
6316	}
6317
6318	ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6319	if (ret)
6320		ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6321
6322exit:
6323	mutex_unlock(&ar->conf_mutex);
6324	return ret;
6325}
6326
6327#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6328
6329static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6330				    struct ieee80211_vif *vif,
6331				    struct ieee80211_channel *chan,
6332				    int duration,
6333				    enum ieee80211_roc_type type)
6334{
6335	struct ath10k *ar = hw->priv;
6336	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6337	struct wmi_start_scan_arg arg;
6338	int ret = 0;
6339	u32 scan_time_msec;
6340
6341	mutex_lock(&ar->conf_mutex);
6342
6343	spin_lock_bh(&ar->data_lock);
6344	switch (ar->scan.state) {
6345	case ATH10K_SCAN_IDLE:
6346		reinit_completion(&ar->scan.started);
6347		reinit_completion(&ar->scan.completed);
6348		reinit_completion(&ar->scan.on_channel);
6349		ar->scan.state = ATH10K_SCAN_STARTING;
6350		ar->scan.is_roc = true;
6351		ar->scan.vdev_id = arvif->vdev_id;
6352		ar->scan.roc_freq = chan->center_freq;
6353		ar->scan.roc_notify = true;
6354		ret = 0;
6355		break;
6356	case ATH10K_SCAN_STARTING:
6357	case ATH10K_SCAN_RUNNING:
6358	case ATH10K_SCAN_ABORTING:
6359		ret = -EBUSY;
6360		break;
6361	}
6362	spin_unlock_bh(&ar->data_lock);
6363
6364	if (ret)
6365		goto exit;
6366
6367	scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6368
6369	memset(&arg, 0, sizeof(arg));
6370	ath10k_wmi_start_scan_init(ar, &arg);
6371	arg.vdev_id = arvif->vdev_id;
6372	arg.scan_id = ATH10K_SCAN_ID;
6373	arg.n_channels = 1;
6374	arg.channels[0] = chan->center_freq;
6375	arg.dwell_time_active = scan_time_msec;
6376	arg.dwell_time_passive = scan_time_msec;
6377	arg.max_scan_time = scan_time_msec;
6378	arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6379	arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6380	arg.burst_duration_ms = duration;
6381
6382	ret = ath10k_start_scan(ar, &arg);
6383	if (ret) {
6384		ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6385		spin_lock_bh(&ar->data_lock);
6386		ar->scan.state = ATH10K_SCAN_IDLE;
6387		spin_unlock_bh(&ar->data_lock);
6388		goto exit;
6389	}
6390
6391	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6392	if (ret == 0) {
6393		ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6394
6395		ret = ath10k_scan_stop(ar);
6396		if (ret)
6397			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6398
6399		ret = -ETIMEDOUT;
6400		goto exit;
6401	}
6402
6403	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6404				     msecs_to_jiffies(duration));
6405
6406	ret = 0;
6407exit:
6408	mutex_unlock(&ar->conf_mutex);
6409	return ret;
6410}
6411
6412static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6413{
6414	struct ath10k *ar = hw->priv;
6415
6416	mutex_lock(&ar->conf_mutex);
6417
6418	spin_lock_bh(&ar->data_lock);
6419	ar->scan.roc_notify = false;
6420	spin_unlock_bh(&ar->data_lock);
6421
6422	ath10k_scan_abort(ar);
6423
6424	mutex_unlock(&ar->conf_mutex);
6425
6426	cancel_delayed_work_sync(&ar->scan.timeout);
6427
6428	return 0;
6429}
6430
6431/*
6432 * Both RTS and Fragmentation threshold are interface-specific
6433 * in ath10k, but device-specific in mac80211.
6434 */
6435
6436static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6437{
6438	struct ath10k *ar = hw->priv;
6439	struct ath10k_vif *arvif;
6440	int ret = 0;
6441
6442	mutex_lock(&ar->conf_mutex);
6443	list_for_each_entry(arvif, &ar->arvifs, list) {
6444		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6445			   arvif->vdev_id, value);
6446
6447		ret = ath10k_mac_set_rts(arvif, value);
6448		if (ret) {
6449			ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6450				    arvif->vdev_id, ret);
6451			break;
6452		}
6453	}
6454	mutex_unlock(&ar->conf_mutex);
6455
6456	return ret;
6457}
6458
6459static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6460{
6461	/* Even though there's a WMI enum for fragmentation threshold no known
6462	 * firmware actually implements it. Moreover it is not possible to rely
6463	 * frame fragmentation to mac80211 because firmware clears the "more
6464	 * fragments" bit in frame control making it impossible for remote
6465	 * devices to reassemble frames.
6466	 *
6467	 * Hence implement a dummy callback just to say fragmentation isn't
6468	 * supported. This effectively prevents mac80211 from doing frame
6469	 * fragmentation in software.
6470	 */
6471	return -EOPNOTSUPP;
6472}
6473
6474static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6475			 u32 queues, bool drop)
6476{
6477	struct ath10k *ar = hw->priv;
6478	bool skip;
6479	long time_left;
6480
6481	/* mac80211 doesn't care if we really xmit queued frames or not
6482	 * we'll collect those frames either way if we stop/delete vdevs */
6483	if (drop)
6484		return;
6485
6486	mutex_lock(&ar->conf_mutex);
6487
6488	if (ar->state == ATH10K_STATE_WEDGED)
6489		goto skip;
6490
6491	time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6492			bool empty;
6493
6494			spin_lock_bh(&ar->htt.tx_lock);
6495			empty = (ar->htt.num_pending_tx == 0);
6496			spin_unlock_bh(&ar->htt.tx_lock);
6497
6498			skip = (ar->state == ATH10K_STATE_WEDGED) ||
6499			       test_bit(ATH10K_FLAG_CRASH_FLUSH,
6500					&ar->dev_flags);
6501
6502			(empty || skip);
6503		}), ATH10K_FLUSH_TIMEOUT_HZ);
6504
6505	if (time_left == 0 || skip)
6506		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6507			    skip, ar->state, time_left);
6508
6509skip:
6510	mutex_unlock(&ar->conf_mutex);
6511}
6512
6513/* TODO: Implement this function properly
6514 * For now it is needed to reply to Probe Requests in IBSS mode.
6515 * Propably we need this information from FW.
6516 */
6517static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6518{
6519	return 1;
6520}
6521
6522static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6523				     enum ieee80211_reconfig_type reconfig_type)
6524{
6525	struct ath10k *ar = hw->priv;
6526
6527	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6528		return;
6529
6530	mutex_lock(&ar->conf_mutex);
6531
6532	/* If device failed to restart it will be in a different state, e.g.
6533	 * ATH10K_STATE_WEDGED */
6534	if (ar->state == ATH10K_STATE_RESTARTED) {
6535		ath10k_info(ar, "device successfully recovered\n");
6536		ar->state = ATH10K_STATE_ON;
6537		ieee80211_wake_queues(ar->hw);
6538	}
6539
6540	mutex_unlock(&ar->conf_mutex);
6541}
6542
6543static void
6544ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
6545				  struct ieee80211_channel *channel)
6546{
6547	int ret;
6548	enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
6549
6550	lockdep_assert_held(&ar->conf_mutex);
6551
6552	if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
6553	    (ar->rx_channel != channel))
6554		return;
6555
6556	if (ar->scan.state != ATH10K_SCAN_IDLE) {
6557		ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
6558		return;
6559	}
6560
6561	reinit_completion(&ar->bss_survey_done);
6562
6563	ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
6564	if (ret) {
6565		ath10k_warn(ar, "failed to send pdev bss chan info request\n");
6566		return;
6567	}
6568
6569	ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
6570	if (!ret) {
6571		ath10k_warn(ar, "bss channel survey timed out\n");
6572		return;
6573	}
6574}
6575
6576static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6577			     struct survey_info *survey)
6578{
6579	struct ath10k *ar = hw->priv;
6580	struct ieee80211_supported_band *sband;
6581	struct survey_info *ar_survey = &ar->survey[idx];
6582	int ret = 0;
6583
6584	mutex_lock(&ar->conf_mutex);
6585
6586	sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6587	if (sband && idx >= sband->n_channels) {
6588		idx -= sband->n_channels;
6589		sband = NULL;
6590	}
6591
6592	if (!sband)
6593		sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6594
6595	if (!sband || idx >= sband->n_channels) {
6596		ret = -ENOENT;
6597		goto exit;
6598	}
6599
6600	ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
6601
6602	spin_lock_bh(&ar->data_lock);
6603	memcpy(survey, ar_survey, sizeof(*survey));
6604	spin_unlock_bh(&ar->data_lock);
6605
6606	survey->channel = &sband->channels[idx];
6607
6608	if (ar->rx_channel == survey->channel)
6609		survey->filled |= SURVEY_INFO_IN_USE;
6610
6611exit:
6612	mutex_unlock(&ar->conf_mutex);
6613	return ret;
6614}
6615
6616static bool
6617ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6618					enum nl80211_band band,
6619					const struct cfg80211_bitrate_mask *mask)
6620{
6621	int num_rates = 0;
6622	int i;
6623
6624	num_rates += hweight32(mask->control[band].legacy);
6625
6626	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6627		num_rates += hweight8(mask->control[band].ht_mcs[i]);
6628
6629	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6630		num_rates += hweight16(mask->control[band].vht_mcs[i]);
6631
6632	return num_rates == 1;
6633}
6634
6635static bool
6636ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6637				       enum nl80211_band band,
6638				       const struct cfg80211_bitrate_mask *mask,
6639				       int *nss)
6640{
6641	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6642	u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6643	u8 ht_nss_mask = 0;
6644	u8 vht_nss_mask = 0;
6645	int i;
6646
6647	if (mask->control[band].legacy)
6648		return false;
6649
6650	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6651		if (mask->control[band].ht_mcs[i] == 0)
6652			continue;
6653		else if (mask->control[band].ht_mcs[i] ==
6654			 sband->ht_cap.mcs.rx_mask[i])
6655			ht_nss_mask |= BIT(i);
6656		else
6657			return false;
6658	}
6659
6660	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6661		if (mask->control[band].vht_mcs[i] == 0)
6662			continue;
6663		else if (mask->control[band].vht_mcs[i] ==
6664			 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6665			vht_nss_mask |= BIT(i);
6666		else
6667			return false;
6668	}
6669
6670	if (ht_nss_mask != vht_nss_mask)
6671		return false;
6672
6673	if (ht_nss_mask == 0)
6674		return false;
6675
6676	if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6677		return false;
6678
6679	*nss = fls(ht_nss_mask);
6680
6681	return true;
6682}
6683
6684static int
6685ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6686					enum nl80211_band band,
6687					const struct cfg80211_bitrate_mask *mask,
6688					u8 *rate, u8 *nss)
6689{
6690	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6691	int rate_idx;
6692	int i;
6693	u16 bitrate;
6694	u8 preamble;
6695	u8 hw_rate;
6696
6697	if (hweight32(mask->control[band].legacy) == 1) {
6698		rate_idx = ffs(mask->control[band].legacy) - 1;
6699
6700		hw_rate = sband->bitrates[rate_idx].hw_value;
6701		bitrate = sband->bitrates[rate_idx].bitrate;
6702
6703		if (ath10k_mac_bitrate_is_cck(bitrate))
6704			preamble = WMI_RATE_PREAMBLE_CCK;
6705		else
6706			preamble = WMI_RATE_PREAMBLE_OFDM;
6707
6708		*nss = 1;
6709		*rate = preamble << 6 |
6710			(*nss - 1) << 4 |
6711			hw_rate << 0;
6712
6713		return 0;
6714	}
6715
6716	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6717		if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6718			*nss = i + 1;
6719			*rate = WMI_RATE_PREAMBLE_HT << 6 |
6720				(*nss - 1) << 4 |
6721				(ffs(mask->control[band].ht_mcs[i]) - 1);
6722
6723			return 0;
6724		}
6725	}
6726
6727	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6728		if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6729			*nss = i + 1;
6730			*rate = WMI_RATE_PREAMBLE_VHT << 6 |
6731				(*nss - 1) << 4 |
6732				(ffs(mask->control[band].vht_mcs[i]) - 1);
6733
6734			return 0;
6735		}
6736	}
6737
6738	return -EINVAL;
6739}
6740
6741static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6742					    u8 rate, u8 nss, u8 sgi, u8 ldpc)
6743{
6744	struct ath10k *ar = arvif->ar;
6745	u32 vdev_param;
6746	int ret;
6747
6748	lockdep_assert_held(&ar->conf_mutex);
6749
6750	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6751		   arvif->vdev_id, rate, nss, sgi);
6752
6753	vdev_param = ar->wmi.vdev_param->fixed_rate;
6754	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
6755	if (ret) {
6756		ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
6757			    rate, ret);
6758		return ret;
6759	}
6760
6761	vdev_param = ar->wmi.vdev_param->nss;
6762	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
6763	if (ret) {
6764		ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6765		return ret;
6766	}
6767
6768	vdev_param = ar->wmi.vdev_param->sgi;
6769	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
6770	if (ret) {
6771		ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6772		return ret;
6773	}
6774
6775	vdev_param = ar->wmi.vdev_param->ldpc;
6776	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6777	if (ret) {
6778		ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6779		return ret;
6780	}
6781
6782	return 0;
6783}
6784
6785static bool
6786ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6787				enum nl80211_band band,
6788				const struct cfg80211_bitrate_mask *mask)
6789{
6790	int i;
6791	u16 vht_mcs;
6792
6793	/* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6794	 * to express all VHT MCS rate masks. Effectively only the following
6795	 * ranges can be used: none, 0-7, 0-8 and 0-9.
6796	 */
6797	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6798		vht_mcs = mask->control[band].vht_mcs[i];
6799
6800		switch (vht_mcs) {
6801		case 0:
6802		case BIT(8) - 1:
6803		case BIT(9) - 1:
6804		case BIT(10) - 1:
6805			break;
6806		default:
6807			ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
6808			return false;
6809		}
6810	}
6811
6812	return true;
6813}
6814
6815static void ath10k_mac_set_bitrate_mask_iter(void *data,
6816					     struct ieee80211_sta *sta)
6817{
6818	struct ath10k_vif *arvif = data;
6819	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6820	struct ath10k *ar = arvif->ar;
6821
6822	if (arsta->arvif != arvif)
6823		return;
6824
6825	spin_lock_bh(&ar->data_lock);
6826	arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
6827	spin_unlock_bh(&ar->data_lock);
6828
6829	ieee80211_queue_work(ar->hw, &arsta->update_wk);
6830}
6831
6832static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
6833					  struct ieee80211_vif *vif,
6834					  const struct cfg80211_bitrate_mask *mask)
6835{
6836	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6837	struct cfg80211_chan_def def;
6838	struct ath10k *ar = arvif->ar;
6839	enum nl80211_band band;
6840	const u8 *ht_mcs_mask;
6841	const u16 *vht_mcs_mask;
6842	u8 rate;
6843	u8 nss;
6844	u8 sgi;
6845	u8 ldpc;
6846	int single_nss;
6847	int ret;
6848
6849	if (ath10k_mac_vif_chan(vif, &def))
6850		return -EPERM;
6851
6852	band = def.chan->band;
6853	ht_mcs_mask = mask->control[band].ht_mcs;
6854	vht_mcs_mask = mask->control[band].vht_mcs;
6855	ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
6856
6857	sgi = mask->control[band].gi;
6858	if (sgi == NL80211_TXRATE_FORCE_LGI)
6859		return -EINVAL;
6860
6861	if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
6862		ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
6863							      &rate, &nss);
6864		if (ret) {
6865			ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
6866				    arvif->vdev_id, ret);
6867			return ret;
6868		}
6869	} else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
6870							  &single_nss)) {
6871		rate = WMI_FIXED_RATE_NONE;
6872		nss = single_nss;
6873	} else {
6874		rate = WMI_FIXED_RATE_NONE;
6875		nss = min(ar->num_rf_chains,
6876			  max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6877			      ath10k_mac_max_vht_nss(vht_mcs_mask)));
6878
6879		if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
6880			return -EINVAL;
6881
6882		mutex_lock(&ar->conf_mutex);
6883
6884		arvif->bitrate_mask = *mask;
6885		ieee80211_iterate_stations_atomic(ar->hw,
6886						  ath10k_mac_set_bitrate_mask_iter,
6887						  arvif);
6888
6889		mutex_unlock(&ar->conf_mutex);
6890	}
6891
6892	mutex_lock(&ar->conf_mutex);
6893
6894	ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
6895	if (ret) {
6896		ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
6897			    arvif->vdev_id, ret);
6898		goto exit;
6899	}
6900
6901exit:
6902	mutex_unlock(&ar->conf_mutex);
6903
6904	return ret;
6905}
6906
6907static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
6908				 struct ieee80211_vif *vif,
6909				 struct ieee80211_sta *sta,
6910				 u32 changed)
6911{
6912	struct ath10k *ar = hw->priv;
6913	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6914	u32 bw, smps;
6915
6916	spin_lock_bh(&ar->data_lock);
6917
6918	ath10k_dbg(ar, ATH10K_DBG_MAC,
6919		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
6920		   sta->addr, changed, sta->bandwidth, sta->rx_nss,
6921		   sta->smps_mode);
6922
6923	if (changed & IEEE80211_RC_BW_CHANGED) {
6924		bw = WMI_PEER_CHWIDTH_20MHZ;
6925
6926		switch (sta->bandwidth) {
6927		case IEEE80211_STA_RX_BW_20:
6928			bw = WMI_PEER_CHWIDTH_20MHZ;
6929			break;
6930		case IEEE80211_STA_RX_BW_40:
6931			bw = WMI_PEER_CHWIDTH_40MHZ;
6932			break;
6933		case IEEE80211_STA_RX_BW_80:
6934			bw = WMI_PEER_CHWIDTH_80MHZ;
6935			break;
6936		case IEEE80211_STA_RX_BW_160:
6937			ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
6938				    sta->bandwidth, sta->addr);
6939			bw = WMI_PEER_CHWIDTH_20MHZ;
6940			break;
6941		}
6942
6943		arsta->bw = bw;
6944	}
6945
6946	if (changed & IEEE80211_RC_NSS_CHANGED)
6947		arsta->nss = sta->rx_nss;
6948
6949	if (changed & IEEE80211_RC_SMPS_CHANGED) {
6950		smps = WMI_PEER_SMPS_PS_NONE;
6951
6952		switch (sta->smps_mode) {
6953		case IEEE80211_SMPS_AUTOMATIC:
6954		case IEEE80211_SMPS_OFF:
6955			smps = WMI_PEER_SMPS_PS_NONE;
6956			break;
6957		case IEEE80211_SMPS_STATIC:
6958			smps = WMI_PEER_SMPS_STATIC;
6959			break;
6960		case IEEE80211_SMPS_DYNAMIC:
6961			smps = WMI_PEER_SMPS_DYNAMIC;
6962			break;
6963		case IEEE80211_SMPS_NUM_MODES:
6964			ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
6965				    sta->smps_mode, sta->addr);
6966			smps = WMI_PEER_SMPS_PS_NONE;
6967			break;
6968		}
6969
6970		arsta->smps = smps;
6971	}
6972
6973	arsta->changed |= changed;
6974
6975	spin_unlock_bh(&ar->data_lock);
6976
6977	ieee80211_queue_work(hw, &arsta->update_wk);
6978}
6979
6980static void ath10k_offset_tsf(struct ieee80211_hw *hw,
6981			      struct ieee80211_vif *vif, s64 tsf_offset)
6982{
6983	struct ath10k *ar = hw->priv;
6984	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6985	u32 offset, vdev_param;
6986	int ret;
6987
6988	if (tsf_offset < 0) {
6989		vdev_param = ar->wmi.vdev_param->dec_tsf;
6990		offset = -tsf_offset;
6991	} else {
6992		vdev_param = ar->wmi.vdev_param->inc_tsf;
6993		offset = tsf_offset;
6994	}
6995
6996	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
6997					vdev_param, offset);
6998
6999	if (ret && ret != -EOPNOTSUPP)
7000		ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n",
7001			    offset, vdev_param, ret);
7002}
7003
7004static int ath10k_ampdu_action(struct ieee80211_hw *hw,
7005			       struct ieee80211_vif *vif,
7006			       struct ieee80211_ampdu_params *params)
7007{
7008	struct ath10k *ar = hw->priv;
7009	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7010	struct ieee80211_sta *sta = params->sta;
7011	enum ieee80211_ampdu_mlme_action action = params->action;
7012	u16 tid = params->tid;
7013
7014	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
7015		   arvif->vdev_id, sta->addr, tid, action);
7016
7017	switch (action) {
7018	case IEEE80211_AMPDU_RX_START:
7019	case IEEE80211_AMPDU_RX_STOP:
7020		/* HTT AddBa/DelBa events trigger mac80211 Rx BA session
7021		 * creation/removal. Do we need to verify this?
7022		 */
7023		return 0;
7024	case IEEE80211_AMPDU_TX_START:
7025	case IEEE80211_AMPDU_TX_STOP_CONT:
7026	case IEEE80211_AMPDU_TX_STOP_FLUSH:
7027	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
7028	case IEEE80211_AMPDU_TX_OPERATIONAL:
7029		/* Firmware offloads Tx aggregation entirely so deny mac80211
7030		 * Tx aggregation requests.
7031		 */
7032		return -EOPNOTSUPP;
7033	}
7034
7035	return -EINVAL;
7036}
7037
7038static void
7039ath10k_mac_update_rx_channel(struct ath10k *ar,
7040			     struct ieee80211_chanctx_conf *ctx,
7041			     struct ieee80211_vif_chanctx_switch *vifs,
7042			     int n_vifs)
7043{
7044	struct cfg80211_chan_def *def = NULL;
7045
7046	/* Both locks are required because ar->rx_channel is modified. This
7047	 * allows readers to hold either lock.
7048	 */
7049	lockdep_assert_held(&ar->conf_mutex);
7050	lockdep_assert_held(&ar->data_lock);
7051
7052	WARN_ON(ctx && vifs);
7053	WARN_ON(vifs && n_vifs != 1);
7054
7055	/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
7056	 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
7057	 * ppdu on Rx may reduce performance on low-end systems. It should be
7058	 * possible to make tables/hashmaps to speed the lookup up (be vary of
7059	 * cpu data cache lines though regarding sizes) but to keep the initial
7060	 * implementation simple and less intrusive fallback to the slow lookup
7061	 * only for multi-channel cases. Single-channel cases will remain to
7062	 * use the old channel derival and thus performance should not be
7063	 * affected much.
7064	 */
7065	rcu_read_lock();
7066	if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
7067		ieee80211_iter_chan_contexts_atomic(ar->hw,
7068						    ath10k_mac_get_any_chandef_iter,
7069						    &def);
7070
7071		if (vifs)
7072			def = &vifs[0].new_ctx->def;
7073
7074		ar->rx_channel = def->chan;
7075	} else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
7076		   (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
7077		/* During driver restart due to firmware assert, since mac80211
7078		 * already has valid channel context for given radio, channel
7079		 * context iteration return num_chanctx > 0. So fix rx_channel
7080		 * when restart is in progress.
7081		 */
7082		ar->rx_channel = ctx->def.chan;
7083	} else {
7084		ar->rx_channel = NULL;
7085	}
7086	rcu_read_unlock();
7087}
7088
7089static void
7090ath10k_mac_update_vif_chan(struct ath10k *ar,
7091			   struct ieee80211_vif_chanctx_switch *vifs,
7092			   int n_vifs)
7093{
7094	struct ath10k_vif *arvif;
7095	int ret;
7096	int i;
7097
7098	lockdep_assert_held(&ar->conf_mutex);
7099
7100	/* First stop monitor interface. Some FW versions crash if there's a
7101	 * lone monitor interface.
7102	 */
7103	if (ar->monitor_started)
7104		ath10k_monitor_stop(ar);
7105
7106	for (i = 0; i < n_vifs; i++) {
7107		arvif = ath10k_vif_to_arvif(vifs[i].vif);
7108
7109		ath10k_dbg(ar, ATH10K_DBG_MAC,
7110			   "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
7111			   arvif->vdev_id,
7112			   vifs[i].old_ctx->def.chan->center_freq,
7113			   vifs[i].new_ctx->def.chan->center_freq,
7114			   vifs[i].old_ctx->def.width,
7115			   vifs[i].new_ctx->def.width);
7116
7117		if (WARN_ON(!arvif->is_started))
7118			continue;
7119
7120		if (WARN_ON(!arvif->is_up))
7121			continue;
7122
7123		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7124		if (ret) {
7125			ath10k_warn(ar, "failed to down vdev %d: %d\n",
7126				    arvif->vdev_id, ret);
7127			continue;
7128		}
7129	}
7130
7131	/* All relevant vdevs are downed and associated channel resources
7132	 * should be available for the channel switch now.
7133	 */
7134
7135	spin_lock_bh(&ar->data_lock);
7136	ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
7137	spin_unlock_bh(&ar->data_lock);
7138
7139	for (i = 0; i < n_vifs; i++) {
7140		arvif = ath10k_vif_to_arvif(vifs[i].vif);
7141
7142		if (WARN_ON(!arvif->is_started))
7143			continue;
7144
7145		if (WARN_ON(!arvif->is_up))
7146			continue;
7147
7148		ret = ath10k_mac_setup_bcn_tmpl(arvif);
7149		if (ret)
7150			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
7151				    ret);
7152
7153		ret = ath10k_mac_setup_prb_tmpl(arvif);
7154		if (ret)
7155			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7156				    ret);
7157
7158		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7159		if (ret) {
7160			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7161				    arvif->vdev_id, ret);
7162			continue;
7163		}
7164
7165		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7166					 arvif->bssid);
7167		if (ret) {
7168			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7169				    arvif->vdev_id, ret);
7170			continue;
7171		}
7172	}
7173
7174	ath10k_monitor_recalc(ar);
7175}
7176
7177static int
7178ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7179			  struct ieee80211_chanctx_conf *ctx)
7180{
7181	struct ath10k *ar = hw->priv;
7182
7183	ath10k_dbg(ar, ATH10K_DBG_MAC,
7184		   "mac chanctx add freq %hu width %d ptr %pK\n",
7185		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7186
7187	mutex_lock(&ar->conf_mutex);
7188
7189	spin_lock_bh(&ar->data_lock);
7190	ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7191	spin_unlock_bh(&ar->data_lock);
7192
7193	ath10k_recalc_radar_detection(ar);
7194	ath10k_monitor_recalc(ar);
7195
7196	mutex_unlock(&ar->conf_mutex);
7197
7198	return 0;
7199}
7200
7201static void
7202ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7203			     struct ieee80211_chanctx_conf *ctx)
7204{
7205	struct ath10k *ar = hw->priv;
7206
7207	ath10k_dbg(ar, ATH10K_DBG_MAC,
7208		   "mac chanctx remove freq %hu width %d ptr %pK\n",
7209		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7210
7211	mutex_lock(&ar->conf_mutex);
7212
7213	spin_lock_bh(&ar->data_lock);
7214	ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7215	spin_unlock_bh(&ar->data_lock);
7216
7217	ath10k_recalc_radar_detection(ar);
7218	ath10k_monitor_recalc(ar);
7219
7220	mutex_unlock(&ar->conf_mutex);
7221}
7222
7223struct ath10k_mac_change_chanctx_arg {
7224	struct ieee80211_chanctx_conf *ctx;
7225	struct ieee80211_vif_chanctx_switch *vifs;
7226	int n_vifs;
7227	int next_vif;
7228};
7229
7230static void
7231ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7232				   struct ieee80211_vif *vif)
7233{
7234	struct ath10k_mac_change_chanctx_arg *arg = data;
7235
7236	if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7237		return;
7238
7239	arg->n_vifs++;
7240}
7241
7242static void
7243ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7244				    struct ieee80211_vif *vif)
7245{
7246	struct ath10k_mac_change_chanctx_arg *arg = data;
7247	struct ieee80211_chanctx_conf *ctx;
7248
7249	ctx = rcu_access_pointer(vif->chanctx_conf);
7250	if (ctx != arg->ctx)
7251		return;
7252
7253	if (WARN_ON(arg->next_vif == arg->n_vifs))
7254		return;
7255
7256	arg->vifs[arg->next_vif].vif = vif;
7257	arg->vifs[arg->next_vif].old_ctx = ctx;
7258	arg->vifs[arg->next_vif].new_ctx = ctx;
7259	arg->next_vif++;
7260}
7261
7262static void
7263ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7264			     struct ieee80211_chanctx_conf *ctx,
7265			     u32 changed)
7266{
7267	struct ath10k *ar = hw->priv;
7268	struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7269
7270	mutex_lock(&ar->conf_mutex);
7271
7272	ath10k_dbg(ar, ATH10K_DBG_MAC,
7273		   "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
7274		   ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7275
7276	/* This shouldn't really happen because channel switching should use
7277	 * switch_vif_chanctx().
7278	 */
7279	if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7280		goto unlock;
7281
7282	if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7283		ieee80211_iterate_active_interfaces_atomic(
7284					hw,
7285					IEEE80211_IFACE_ITER_NORMAL,
7286					ath10k_mac_change_chanctx_cnt_iter,
7287					&arg);
7288		if (arg.n_vifs == 0)
7289			goto radar;
7290
7291		arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7292				   GFP_KERNEL);
7293		if (!arg.vifs)
7294			goto radar;
7295
7296		ieee80211_iterate_active_interfaces_atomic(
7297					hw,
7298					IEEE80211_IFACE_ITER_NORMAL,
7299					ath10k_mac_change_chanctx_fill_iter,
7300					&arg);
7301		ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7302		kfree(arg.vifs);
7303	}
7304
7305radar:
7306	ath10k_recalc_radar_detection(ar);
7307
7308	/* FIXME: How to configure Rx chains properly? */
7309
7310	/* No other actions are actually necessary. Firmware maintains channel
7311	 * definitions per vdev internally and there's no host-side channel
7312	 * context abstraction to configure, e.g. channel width.
7313	 */
7314
7315unlock:
7316	mutex_unlock(&ar->conf_mutex);
7317}
7318
7319static int
7320ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7321				 struct ieee80211_vif *vif,
7322				 struct ieee80211_chanctx_conf *ctx)
7323{
7324	struct ath10k *ar = hw->priv;
7325	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7326	int ret;
7327
7328	mutex_lock(&ar->conf_mutex);
7329
7330	ath10k_dbg(ar, ATH10K_DBG_MAC,
7331		   "mac chanctx assign ptr %pK vdev_id %i\n",
7332		   ctx, arvif->vdev_id);
7333
7334	if (WARN_ON(arvif->is_started)) {
7335		mutex_unlock(&ar->conf_mutex);
7336		return -EBUSY;
7337	}
7338
7339	ret = ath10k_vdev_start(arvif, &ctx->def);
7340	if (ret) {
7341		ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7342			    arvif->vdev_id, vif->addr,
7343			    ctx->def.chan->center_freq, ret);
7344		goto err;
7345	}
7346
7347	arvif->is_started = true;
7348
7349	ret = ath10k_mac_vif_setup_ps(arvif);
7350	if (ret) {
7351		ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7352			    arvif->vdev_id, ret);
7353		goto err_stop;
7354	}
7355
7356	if (vif->type == NL80211_IFTYPE_MONITOR) {
7357		ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7358		if (ret) {
7359			ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7360				    arvif->vdev_id, ret);
7361			goto err_stop;
7362		}
7363
7364		arvif->is_up = true;
7365	}
7366
7367	mutex_unlock(&ar->conf_mutex);
7368	return 0;
7369
7370err_stop:
7371	ath10k_vdev_stop(arvif);
7372	arvif->is_started = false;
7373	ath10k_mac_vif_setup_ps(arvif);
7374
7375err:
7376	mutex_unlock(&ar->conf_mutex);
7377	return ret;
7378}
7379
7380static void
7381ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7382				   struct ieee80211_vif *vif,
7383				   struct ieee80211_chanctx_conf *ctx)
7384{
7385	struct ath10k *ar = hw->priv;
7386	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7387	int ret;
7388
7389	mutex_lock(&ar->conf_mutex);
7390
7391	ath10k_dbg(ar, ATH10K_DBG_MAC,
7392		   "mac chanctx unassign ptr %pK vdev_id %i\n",
7393		   ctx, arvif->vdev_id);
7394
7395	WARN_ON(!arvif->is_started);
7396
7397	if (vif->type == NL80211_IFTYPE_MONITOR) {
7398		WARN_ON(!arvif->is_up);
7399
7400		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7401		if (ret)
7402			ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7403				    arvif->vdev_id, ret);
7404
7405		arvif->is_up = false;
7406	}
7407
7408	ret = ath10k_vdev_stop(arvif);
7409	if (ret)
7410		ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7411			    arvif->vdev_id, ret);
7412
7413	arvif->is_started = false;
7414
7415	mutex_unlock(&ar->conf_mutex);
7416}
7417
7418static int
7419ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7420				 struct ieee80211_vif_chanctx_switch *vifs,
7421				 int n_vifs,
7422				 enum ieee80211_chanctx_switch_mode mode)
7423{
7424	struct ath10k *ar = hw->priv;
7425
7426	mutex_lock(&ar->conf_mutex);
7427
7428	ath10k_dbg(ar, ATH10K_DBG_MAC,
7429		   "mac chanctx switch n_vifs %d mode %d\n",
7430		   n_vifs, mode);
7431	ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7432
7433	mutex_unlock(&ar->conf_mutex);
7434	return 0;
7435}
7436
7437static const struct ieee80211_ops ath10k_ops = {
7438	.tx				= ath10k_mac_op_tx,
7439	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
7440	.start				= ath10k_start,
7441	.stop				= ath10k_stop,
7442	.config				= ath10k_config,
7443	.add_interface			= ath10k_add_interface,
7444	.remove_interface		= ath10k_remove_interface,
7445	.configure_filter		= ath10k_configure_filter,
7446	.bss_info_changed		= ath10k_bss_info_changed,
7447	.set_coverage_class		= ath10k_mac_op_set_coverage_class,
7448	.hw_scan			= ath10k_hw_scan,
7449	.cancel_hw_scan			= ath10k_cancel_hw_scan,
7450	.set_key			= ath10k_set_key,
7451	.set_default_unicast_key        = ath10k_set_default_unicast_key,
7452	.sta_state			= ath10k_sta_state,
7453	.conf_tx			= ath10k_conf_tx,
7454	.remain_on_channel		= ath10k_remain_on_channel,
7455	.cancel_remain_on_channel	= ath10k_cancel_remain_on_channel,
7456	.set_rts_threshold		= ath10k_set_rts_threshold,
7457	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
7458	.flush				= ath10k_flush,
7459	.tx_last_beacon			= ath10k_tx_last_beacon,
7460	.set_antenna			= ath10k_set_antenna,
7461	.get_antenna			= ath10k_get_antenna,
7462	.reconfig_complete		= ath10k_reconfig_complete,
7463	.get_survey			= ath10k_get_survey,
7464	.set_bitrate_mask		= ath10k_mac_op_set_bitrate_mask,
7465	.sta_rc_update			= ath10k_sta_rc_update,
7466	.offset_tsf			= ath10k_offset_tsf,
7467	.ampdu_action			= ath10k_ampdu_action,
7468	.get_et_sset_count		= ath10k_debug_get_et_sset_count,
7469	.get_et_stats			= ath10k_debug_get_et_stats,
7470	.get_et_strings			= ath10k_debug_get_et_strings,
7471	.add_chanctx			= ath10k_mac_op_add_chanctx,
7472	.remove_chanctx			= ath10k_mac_op_remove_chanctx,
7473	.change_chanctx			= ath10k_mac_op_change_chanctx,
7474	.assign_vif_chanctx		= ath10k_mac_op_assign_vif_chanctx,
7475	.unassign_vif_chanctx		= ath10k_mac_op_unassign_vif_chanctx,
7476	.switch_vif_chanctx		= ath10k_mac_op_switch_vif_chanctx,
7477
7478	CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7479
7480#ifdef CONFIG_PM
7481	.suspend			= ath10k_wow_op_suspend,
7482	.resume				= ath10k_wow_op_resume,
7483#endif
7484#ifdef CONFIG_MAC80211_DEBUGFS
7485	.sta_add_debugfs		= ath10k_sta_add_debugfs,
7486	.sta_statistics			= ath10k_sta_statistics,
7487#endif
7488};
7489
7490#define CHAN2G(_channel, _freq, _flags) { \
7491	.band			= NL80211_BAND_2GHZ, \
7492	.hw_value		= (_channel), \
7493	.center_freq		= (_freq), \
7494	.flags			= (_flags), \
7495	.max_antenna_gain	= 0, \
7496	.max_power		= 30, \
7497}
7498
7499#define CHAN5G(_channel, _freq, _flags) { \
7500	.band			= NL80211_BAND_5GHZ, \
7501	.hw_value		= (_channel), \
7502	.center_freq		= (_freq), \
7503	.flags			= (_flags), \
7504	.max_antenna_gain	= 0, \
7505	.max_power		= 30, \
7506}
7507
7508static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7509	CHAN2G(1, 2412, 0),
7510	CHAN2G(2, 2417, 0),
7511	CHAN2G(3, 2422, 0),
7512	CHAN2G(4, 2427, 0),
7513	CHAN2G(5, 2432, 0),
7514	CHAN2G(6, 2437, 0),
7515	CHAN2G(7, 2442, 0),
7516	CHAN2G(8, 2447, 0),
7517	CHAN2G(9, 2452, 0),
7518	CHAN2G(10, 2457, 0),
7519	CHAN2G(11, 2462, 0),
7520	CHAN2G(12, 2467, 0),
7521	CHAN2G(13, 2472, 0),
7522	CHAN2G(14, 2484, 0),
7523};
7524
7525static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7526	CHAN5G(36, 5180, 0),
7527	CHAN5G(40, 5200, 0),
7528	CHAN5G(44, 5220, 0),
7529	CHAN5G(48, 5240, 0),
7530	CHAN5G(52, 5260, 0),
7531	CHAN5G(56, 5280, 0),
7532	CHAN5G(60, 5300, 0),
7533	CHAN5G(64, 5320, 0),
7534	CHAN5G(100, 5500, 0),
7535	CHAN5G(104, 5520, 0),
7536	CHAN5G(108, 5540, 0),
7537	CHAN5G(112, 5560, 0),
7538	CHAN5G(116, 5580, 0),
7539	CHAN5G(120, 5600, 0),
7540	CHAN5G(124, 5620, 0),
7541	CHAN5G(128, 5640, 0),
7542	CHAN5G(132, 5660, 0),
7543	CHAN5G(136, 5680, 0),
7544	CHAN5G(140, 5700, 0),
7545	CHAN5G(144, 5720, 0),
7546	CHAN5G(149, 5745, 0),
7547	CHAN5G(153, 5765, 0),
7548	CHAN5G(157, 5785, 0),
7549	CHAN5G(161, 5805, 0),
7550	CHAN5G(165, 5825, 0),
7551};
7552
7553struct ath10k *ath10k_mac_create(size_t priv_size)
7554{
7555	struct ieee80211_hw *hw;
7556	struct ieee80211_ops *ops;
7557	struct ath10k *ar;
7558
7559	ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
7560	if (!ops)
7561		return NULL;
7562
7563	hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
7564	if (!hw) {
7565		kfree(ops);
7566		return NULL;
7567	}
7568
7569	ar = hw->priv;
7570	ar->hw = hw;
7571	ar->ops = ops;
7572
7573	return ar;
7574}
7575
7576void ath10k_mac_destroy(struct ath10k *ar)
7577{
7578	struct ieee80211_ops *ops = ar->ops;
7579
7580	ieee80211_free_hw(ar->hw);
7581	kfree(ops);
7582}
7583
7584static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7585	{
7586		.max	= 8,
7587		.types	= BIT(NL80211_IFTYPE_STATION)
7588			| BIT(NL80211_IFTYPE_P2P_CLIENT)
7589	},
7590	{
7591		.max	= 3,
7592		.types	= BIT(NL80211_IFTYPE_P2P_GO)
7593	},
7594	{
7595		.max	= 1,
7596		.types	= BIT(NL80211_IFTYPE_P2P_DEVICE)
7597	},
7598	{
7599		.max	= 7,
7600		.types	= BIT(NL80211_IFTYPE_AP)
7601#ifdef CONFIG_MAC80211_MESH
7602			| BIT(NL80211_IFTYPE_MESH_POINT)
7603#endif
7604	},
7605};
7606
7607static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7608	{
7609		.max	= 8,
7610		.types	= BIT(NL80211_IFTYPE_AP)
7611#ifdef CONFIG_MAC80211_MESH
7612			| BIT(NL80211_IFTYPE_MESH_POINT)
7613#endif
7614	},
7615	{
7616		.max	= 1,
7617		.types	= BIT(NL80211_IFTYPE_STATION)
7618	},
7619};
7620
7621static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7622	{
7623		.limits = ath10k_if_limits,
7624		.n_limits = ARRAY_SIZE(ath10k_if_limits),
7625		.max_interfaces = 8,
7626		.num_different_channels = 1,
7627		.beacon_int_infra_match = true,
7628	},
7629};
7630
7631static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
7632	{
7633		.limits = ath10k_10x_if_limits,
7634		.n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
7635		.max_interfaces = 8,
7636		.num_different_channels = 1,
7637		.beacon_int_infra_match = true,
7638#ifdef CONFIG_ATH10K_DFS_CERTIFIED
7639		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7640					BIT(NL80211_CHAN_WIDTH_20) |
7641					BIT(NL80211_CHAN_WIDTH_40) |
7642					BIT(NL80211_CHAN_WIDTH_80),
7643#endif
7644	},
7645};
7646
7647static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7648	{
7649		.max = 2,
7650		.types = BIT(NL80211_IFTYPE_STATION),
7651	},
7652	{
7653		.max = 2,
7654		.types = BIT(NL80211_IFTYPE_AP) |
7655#ifdef CONFIG_MAC80211_MESH
7656			 BIT(NL80211_IFTYPE_MESH_POINT) |
7657#endif
7658			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7659			 BIT(NL80211_IFTYPE_P2P_GO),
7660	},
7661	{
7662		.max = 1,
7663		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7664	},
7665};
7666
7667static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7668	{
7669		.max = 2,
7670		.types = BIT(NL80211_IFTYPE_STATION),
7671	},
7672	{
7673		.max = 2,
7674		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7675	},
7676	{
7677		.max = 1,
7678		.types = BIT(NL80211_IFTYPE_AP) |
7679#ifdef CONFIG_MAC80211_MESH
7680			 BIT(NL80211_IFTYPE_MESH_POINT) |
7681#endif
7682			 BIT(NL80211_IFTYPE_P2P_GO),
7683	},
7684	{
7685		.max = 1,
7686		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7687	},
7688};
7689
7690static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7691	{
7692		.max = 1,
7693		.types = BIT(NL80211_IFTYPE_STATION),
7694	},
7695	{
7696		.max = 1,
7697		.types = BIT(NL80211_IFTYPE_ADHOC),
7698	},
7699};
7700
7701/* FIXME: This is not thouroughly tested. These combinations may over- or
7702 * underestimate hw/fw capabilities.
7703 */
7704static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7705	{
7706		.limits = ath10k_tlv_if_limit,
7707		.num_different_channels = 1,
7708		.max_interfaces = 4,
7709		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7710	},
7711	{
7712		.limits = ath10k_tlv_if_limit_ibss,
7713		.num_different_channels = 1,
7714		.max_interfaces = 2,
7715		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7716	},
7717};
7718
7719static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7720	{
7721		.limits = ath10k_tlv_if_limit,
7722		.num_different_channels = 1,
7723		.max_interfaces = 4,
7724		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7725	},
7726	{
7727		.limits = ath10k_tlv_qcs_if_limit,
7728		.num_different_channels = 2,
7729		.max_interfaces = 4,
7730		.n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
7731	},
7732	{
7733		.limits = ath10k_tlv_if_limit_ibss,
7734		.num_different_channels = 1,
7735		.max_interfaces = 2,
7736		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7737	},
7738};
7739
7740static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
7741	{
7742		.max = 1,
7743		.types = BIT(NL80211_IFTYPE_STATION),
7744	},
7745	{
7746		.max	= 16,
7747		.types	= BIT(NL80211_IFTYPE_AP)
7748#ifdef CONFIG_MAC80211_MESH
7749			| BIT(NL80211_IFTYPE_MESH_POINT)
7750#endif
7751	},
7752};
7753
7754static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
7755	{
7756		.limits = ath10k_10_4_if_limits,
7757		.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
7758		.max_interfaces = 16,
7759		.num_different_channels = 1,
7760		.beacon_int_infra_match = true,
7761#ifdef CONFIG_ATH10K_DFS_CERTIFIED
7762		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7763					BIT(NL80211_CHAN_WIDTH_20) |
7764					BIT(NL80211_CHAN_WIDTH_40) |
7765					BIT(NL80211_CHAN_WIDTH_80),
7766#endif
7767	},
7768};
7769
7770static void ath10k_get_arvif_iter(void *data, u8 *mac,
7771				  struct ieee80211_vif *vif)
7772{
7773	struct ath10k_vif_iter *arvif_iter = data;
7774	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7775
7776	if (arvif->vdev_id == arvif_iter->vdev_id)
7777		arvif_iter->arvif = arvif;
7778}
7779
7780struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
7781{
7782	struct ath10k_vif_iter arvif_iter;
7783	u32 flags;
7784
7785	memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
7786	arvif_iter.vdev_id = vdev_id;
7787
7788	flags = IEEE80211_IFACE_ITER_RESUME_ALL;
7789	ieee80211_iterate_active_interfaces_atomic(ar->hw,
7790						   flags,
7791						   ath10k_get_arvif_iter,
7792						   &arvif_iter);
7793	if (!arvif_iter.arvif) {
7794		ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
7795		return NULL;
7796	}
7797
7798	return arvif_iter.arvif;
7799}
7800
7801#define WRD_METHOD "WRDD"
7802#define WRDD_WIFI  (0x07)
7803
7804static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
7805{
7806	union acpi_object *mcc_pkg;
7807	union acpi_object *domain_type;
7808	union acpi_object *mcc_value;
7809	u32 i;
7810
7811	if (wrdd->type != ACPI_TYPE_PACKAGE ||
7812	    wrdd->package.count < 2 ||
7813	    wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
7814	    wrdd->package.elements[0].integer.value != 0) {
7815		ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n");
7816		return 0;
7817	}
7818
7819	for (i = 1; i < wrdd->package.count; ++i) {
7820		mcc_pkg = &wrdd->package.elements[i];
7821
7822		if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
7823			continue;
7824		if (mcc_pkg->package.count < 2)
7825			continue;
7826		if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
7827		    mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
7828			continue;
7829
7830		domain_type = &mcc_pkg->package.elements[0];
7831		if (domain_type->integer.value != WRDD_WIFI)
7832			continue;
7833
7834		mcc_value = &mcc_pkg->package.elements[1];
7835		return mcc_value->integer.value;
7836	}
7837	return 0;
7838}
7839
7840static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
7841{
7842	struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev);
7843	acpi_handle root_handle;
7844	acpi_handle handle;
7845	struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
7846	acpi_status status;
7847	u32 alpha2_code;
7848	char alpha2[3];
7849
7850	root_handle = ACPI_HANDLE(&pdev->dev);
7851	if (!root_handle)
7852		return -EOPNOTSUPP;
7853
7854	status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
7855	if (ACPI_FAILURE(status)) {
7856		ath10k_dbg(ar, ATH10K_DBG_BOOT,
7857			   "failed to get wrd method %d\n", status);
7858		return -EIO;
7859	}
7860
7861	status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
7862	if (ACPI_FAILURE(status)) {
7863		ath10k_dbg(ar, ATH10K_DBG_BOOT,
7864			   "failed to call wrdc %d\n", status);
7865		return -EIO;
7866	}
7867
7868	alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer);
7869	kfree(wrdd.pointer);
7870	if (!alpha2_code)
7871		return -EIO;
7872
7873	alpha2[0] = (alpha2_code >> 8) & 0xff;
7874	alpha2[1] = (alpha2_code >> 0) & 0xff;
7875	alpha2[2] = '\0';
7876
7877	ath10k_dbg(ar, ATH10K_DBG_BOOT,
7878		   "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
7879
7880	*rd = ath_regd_find_country_by_name(alpha2);
7881	if (*rd == 0xffff)
7882		return -EIO;
7883
7884	*rd |= COUNTRY_ERD_FLAG;
7885	return 0;
7886}
7887
7888static int ath10k_mac_init_rd(struct ath10k *ar)
7889{
7890	int ret;
7891	u16 rd;
7892
7893	ret = ath10k_mac_get_wrdd_regulatory(ar, &rd);
7894	if (ret) {
7895		ath10k_dbg(ar, ATH10K_DBG_BOOT,
7896			   "fallback to eeprom programmed regulatory settings\n");
7897		rd = ar->hw_eeprom_rd;
7898	}
7899
7900	ar->ath_common.regulatory.current_rd = rd;
7901	return 0;
7902}
7903
7904int ath10k_mac_register(struct ath10k *ar)
7905{
7906	static const u32 cipher_suites[] = {
7907		WLAN_CIPHER_SUITE_WEP40,
7908		WLAN_CIPHER_SUITE_WEP104,
7909		WLAN_CIPHER_SUITE_TKIP,
7910		WLAN_CIPHER_SUITE_CCMP,
7911		WLAN_CIPHER_SUITE_AES_CMAC,
7912	};
7913	struct ieee80211_supported_band *band;
7914	void *channels;
7915	int ret;
7916
7917	SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
7918
7919	SET_IEEE80211_DEV(ar->hw, ar->dev);
7920
7921	BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
7922		      ARRAY_SIZE(ath10k_5ghz_channels)) !=
7923		     ATH10K_NUM_CHANS);
7924
7925	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
7926		channels = kmemdup(ath10k_2ghz_channels,
7927				   sizeof(ath10k_2ghz_channels),
7928				   GFP_KERNEL);
7929		if (!channels) {
7930			ret = -ENOMEM;
7931			goto err_free;
7932		}
7933
7934		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
7935		band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7936		band->channels = channels;
7937
7938		if (ar->hw_params.cck_rate_map_rev2) {
7939			band->n_bitrates = ath10k_g_rates_rev2_size;
7940			band->bitrates = ath10k_g_rates_rev2;
7941		} else {
7942			band->n_bitrates = ath10k_g_rates_size;
7943			band->bitrates = ath10k_g_rates;
7944		}
7945
7946		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
7947	}
7948
7949	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
7950		channels = kmemdup(ath10k_5ghz_channels,
7951				   sizeof(ath10k_5ghz_channels),
7952				   GFP_KERNEL);
7953		if (!channels) {
7954			ret = -ENOMEM;
7955			goto err_free;
7956		}
7957
7958		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
7959		band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
7960		band->channels = channels;
7961		band->n_bitrates = ath10k_a_rates_size;
7962		band->bitrates = ath10k_a_rates;
7963		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
7964	}
7965
7966	ath10k_mac_setup_ht_vht_cap(ar);
7967
7968	ar->hw->wiphy->interface_modes =
7969		BIT(NL80211_IFTYPE_STATION) |
7970		BIT(NL80211_IFTYPE_AP) |
7971		BIT(NL80211_IFTYPE_MESH_POINT);
7972
7973	ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
7974	ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
7975
7976	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
7977		ar->hw->wiphy->interface_modes |=
7978			BIT(NL80211_IFTYPE_P2P_DEVICE) |
7979			BIT(NL80211_IFTYPE_P2P_CLIENT) |
7980			BIT(NL80211_IFTYPE_P2P_GO);
7981
7982	ieee80211_hw_set(ar->hw, SIGNAL_DBM);
7983	ieee80211_hw_set(ar->hw, SUPPORTS_PS);
7984	ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
7985	ieee80211_hw_set(ar->hw, MFP_CAPABLE);
7986	ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
7987	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
7988	ieee80211_hw_set(ar->hw, AP_LINK_PS);
7989	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
7990	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
7991	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
7992	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
7993	ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
7994	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
7995	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
7996	ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
7997	ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
7998
7999	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8000		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
8001
8002	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
8003	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
8004
8005	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
8006		ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
8007
8008	if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
8009		ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
8010		ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
8011	}
8012
8013	ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
8014	ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
8015
8016	ar->hw->vif_data_size = sizeof(struct ath10k_vif);
8017	ar->hw->sta_data_size = sizeof(struct ath10k_sta);
8018	ar->hw->txq_data_size = sizeof(struct ath10k_txq);
8019
8020	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
8021
8022	if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
8023		ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
8024
8025		/* Firmware delivers WPS/P2P Probe Requests frames to driver so
8026		 * that userspace (e.g. wpa_supplicant/hostapd) can generate
8027		 * correct Probe Responses. This is more of a hack advert..
8028		 */
8029		ar->hw->wiphy->probe_resp_offload |=
8030			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
8031			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
8032			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
8033	}
8034
8035	if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
8036		ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
8037
8038	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
8039	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
8040	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
8041
8042	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
8043	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
8044				   NL80211_FEATURE_AP_SCAN;
8045
8046	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
8047
8048	ret = ath10k_wow_init(ar);
8049	if (ret) {
8050		ath10k_warn(ar, "failed to init wow: %d\n", ret);
8051		goto err_free;
8052	}
8053
8054	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
8055
8056	/*
8057	 * on LL hardware queues are managed entirely by the FW
8058	 * so we only advertise to mac we can do the queues thing
8059	 */
8060	ar->hw->queues = IEEE80211_MAX_QUEUES;
8061
8062	/* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
8063	 * something that vdev_ids can't reach so that we don't stop the queue
8064	 * accidentally.
8065	 */
8066	ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
8067
8068	switch (ar->running_fw->fw_file.wmi_op_version) {
8069	case ATH10K_FW_WMI_OP_VERSION_MAIN:
8070		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
8071		ar->hw->wiphy->n_iface_combinations =
8072			ARRAY_SIZE(ath10k_if_comb);
8073		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8074		break;
8075	case ATH10K_FW_WMI_OP_VERSION_TLV:
8076		if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
8077			ar->hw->wiphy->iface_combinations =
8078				ath10k_tlv_qcs_if_comb;
8079			ar->hw->wiphy->n_iface_combinations =
8080				ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
8081		} else {
8082			ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
8083			ar->hw->wiphy->n_iface_combinations =
8084				ARRAY_SIZE(ath10k_tlv_if_comb);
8085		}
8086		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8087		break;
8088	case ATH10K_FW_WMI_OP_VERSION_10_1:
8089	case ATH10K_FW_WMI_OP_VERSION_10_2:
8090	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
8091		ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
8092		ar->hw->wiphy->n_iface_combinations =
8093			ARRAY_SIZE(ath10k_10x_if_comb);
8094		break;
8095	case ATH10K_FW_WMI_OP_VERSION_10_4:
8096		ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
8097		ar->hw->wiphy->n_iface_combinations =
8098			ARRAY_SIZE(ath10k_10_4_if_comb);
8099		break;
8100	case ATH10K_FW_WMI_OP_VERSION_UNSET:
8101	case ATH10K_FW_WMI_OP_VERSION_MAX:
8102		WARN_ON(1);
8103		ret = -EINVAL;
8104		goto err_free;
8105	}
8106
8107	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8108		ar->hw->netdev_features = NETIF_F_HW_CSUM;
8109
8110	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
8111		/* Init ath dfs pattern detector */
8112		ar->ath_common.debug_mask = ATH_DBG_DFS;
8113		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
8114							     NL80211_DFS_UNSET);
8115
8116		if (!ar->dfs_detector)
8117			ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
8118	}
8119
8120	/* Current wake_tx_queue implementation imposes a significant
8121	 * performance penalty in some setups. The tx scheduling code needs
8122	 * more work anyway so disable the wake_tx_queue unless firmware
8123	 * supports the pull-push mechanism.
8124	 */
8125	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
8126		      ar->running_fw->fw_file.fw_features))
8127		ar->ops->wake_tx_queue = NULL;
8128
8129	ret = ath10k_mac_init_rd(ar);
8130	if (ret) {
8131		ath10k_err(ar, "failed to derive regdom: %d\n", ret);
8132		goto err_dfs_detector_exit;
8133	}
8134
8135	/* Disable set_coverage_class for chipsets that do not support it. */
8136	if (!ar->hw_params.hw_ops->set_coverage_class)
8137		ar->ops->set_coverage_class = NULL;
8138
8139	ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
8140			    ath10k_reg_notifier);
8141	if (ret) {
8142		ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
8143		goto err_dfs_detector_exit;
8144	}
8145
8146	ar->hw->wiphy->cipher_suites = cipher_suites;
8147	ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
8148
8149	ret = ieee80211_register_hw(ar->hw);
8150	if (ret) {
8151		ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
8152		goto err_dfs_detector_exit;
8153	}
8154
8155	if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
8156		ret = regulatory_hint(ar->hw->wiphy,
8157				      ar->ath_common.regulatory.alpha2);
8158		if (ret)
8159			goto err_unregister;
8160	}
8161
8162	return 0;
8163
8164err_unregister:
8165	ieee80211_unregister_hw(ar->hw);
8166
8167err_dfs_detector_exit:
8168	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8169		ar->dfs_detector->exit(ar->dfs_detector);
8170
8171err_free:
8172	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8173	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8174
8175	SET_IEEE80211_DEV(ar->hw, NULL);
8176	return ret;
8177}
8178
8179void ath10k_mac_unregister(struct ath10k *ar)
8180{
8181	ieee80211_unregister_hw(ar->hw);
8182
8183	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8184		ar->dfs_detector->exit(ar->dfs_detector);
8185
8186	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8187	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8188
8189	SET_IEEE80211_DEV(ar->hw, NULL);
8190}